From 356dddbc5508dd65f466098da26a2e47584eafdb Mon Sep 17 00:00:00 2001 From: kobalicek Date: Sat, 24 May 2025 15:53:19 +0200 Subject: [PATCH] [abi] Switched to C++17 --- .github/workflows/build-config.json | 4 +- .github/workflows/build.yml | 1 - CMakeLists.txt | 2 +- src/asmjit/a64.h | 17 +- src/asmjit/arm.h | 11 +- src/asmjit/arm/a64assembler.cpp | 170 +++-- src/asmjit/arm/a64assembler.h | 2 +- src/asmjit/arm/a64builder.cpp | 3 +- src/asmjit/arm/a64builder.h | 2 +- src/asmjit/arm/a64compiler.cpp | 3 +- src/asmjit/arm/a64compiler.h | 2 +- src/asmjit/arm/a64emithelper.cpp | 39 +- src/asmjit/arm/a64func.cpp | 36 +- src/asmjit/arm/a64globals.h | 16 +- src/asmjit/arm/a64instapi.cpp | 12 +- src/asmjit/arm/a64instdb.h | 5 + src/asmjit/arm/a64instdb_p.h | 36 +- src/asmjit/arm/a64operand.h | 180 ++++- src/asmjit/arm/a64rapass.cpp | 117 ++- src/asmjit/arm/a64rapass_p.h | 9 +- src/asmjit/arm/armformatter.cpp | 42 +- src/asmjit/arm/armoperand.h | 131 ++-- src/asmjit/arm/armutils.h | 10 +- src/asmjit/asmjit.h | 2 + src/asmjit/core.h | 40 +- src/asmjit/core/api-config.h | 219 ++---- src/asmjit/core/archcommons.h | 19 +- src/asmjit/core/archtraits.cpp | 39 +- src/asmjit/core/archtraits.h | 30 + src/asmjit/core/assembler.cpp | 103 ++- src/asmjit/core/assembler.h | 12 +- src/asmjit/core/builder.cpp | 232 ++++-- src/asmjit/core/builder.h | 197 ++++- src/asmjit/core/codebuffer.h | 22 + src/asmjit/core/codeholder.cpp | 261 ++++--- src/asmjit/core/codeholder.h | 140 +++- src/asmjit/core/codewriter.cpp | 68 +- src/asmjit/core/codewriter_p.h | 62 +- src/asmjit/core/compiler.cpp | 94 ++- src/asmjit/core/compiler.h | 68 +- src/asmjit/core/compilerdefs.h | 28 + src/asmjit/core/constpool.cpp | 52 +- src/asmjit/core/constpool.h | 15 + src/asmjit/core/cpuinfo.cpp | 228 +++--- src/asmjit/core/cpuinfo.h | 66 +- src/asmjit/core/emithelper.cpp | 75 +- src/asmjit/core/emithelper_p.h | 2 + src/asmjit/core/emitter.cpp | 39 +- src/asmjit/core/emitter.h | 78 +- src/asmjit/core/emitterutils.cpp | 6 +- src/asmjit/core/emitterutils_p.h | 5 +- src/asmjit/core/environment.cpp | 3 +- src/asmjit/core/environment.h | 98 ++- src/asmjit/core/formatter.cpp | 108 ++- src/asmjit/core/formatter.h | 11 + src/asmjit/core/formatter_p.h | 2 +- src/asmjit/core/func.cpp | 60 +- src/asmjit/core/func.h | 268 ++++++- src/asmjit/core/funcargscontext_p.h | 40 +- src/asmjit/core/globals.h | 38 +- src/asmjit/core/inst.h | 131 +++- src/asmjit/core/instdb.cpp | 4 +- src/asmjit/core/jitallocator.cpp | 267 ++++--- src/asmjit/core/jitallocator.h | 39 +- src/asmjit/core/jitruntime.cpp | 3 +- src/asmjit/core/jitruntime.h | 1 + src/asmjit/core/logger.cpp | 6 +- src/asmjit/core/logger.h | 24 + src/asmjit/core/operand.h | 1051 +++++++++++++++++---------- src/asmjit/core/osutils.cpp | 3 +- src/asmjit/core/osutils.h | 2 +- src/asmjit/core/raassignment_p.h | 46 +- src/asmjit/core/rabuilders_p.h | 77 +- src/asmjit/core/radefs_p.h | 283 +++++++- src/asmjit/core/ralocal.cpp | 103 ++- src/asmjit/core/ralocal_p.h | 51 +- src/asmjit/core/rapass.cpp | 300 +++++--- src/asmjit/core/rapass_p.h | 326 ++++++++- src/asmjit/core/rastack.cpp | 27 +- src/asmjit/core/rastack_p.h | 35 +- src/asmjit/core/string.cpp | 121 ++- src/asmjit/core/string.h | 58 +- src/asmjit/core/support.h | 748 ++++++++++++------- src/asmjit/core/target.h | 6 + src/asmjit/core/type.cpp | 76 +- src/asmjit/core/type.h | 217 ++++-- src/asmjit/core/virtmem.cpp | 182 +++-- src/asmjit/core/virtmem.h | 19 +- src/asmjit/core/zone.cpp | 40 +- src/asmjit/core/zone.h | 123 +++- src/asmjit/core/zonehash.cpp | 12 +- src/asmjit/core/zonehash.h | 25 +- src/asmjit/core/zonelist.h | 37 +- src/asmjit/core/zonestack.cpp | 6 +- src/asmjit/core/zonestack.h | 48 +- src/asmjit/core/zonestring.h | 11 +- src/asmjit/core/zonetree.cpp | 2 +- src/asmjit/core/zonetree.h | 49 +- src/asmjit/core/zonevector.cpp | 90 ++- src/asmjit/core/zonevector.h | 205 ++++-- src/asmjit/x86/x86archtraits_p.h | 4 +- src/asmjit/x86/x86assembler.cpp | 246 ++++--- src/asmjit/x86/x86assembler.h | 12 +- src/asmjit/x86/x86builder.cpp | 3 +- src/asmjit/x86/x86builder.h | 13 +- src/asmjit/x86/x86compiler.cpp | 3 +- src/asmjit/x86/x86compiler.h | 38 +- src/asmjit/x86/x86emithelper.cpp | 141 ++-- src/asmjit/x86/x86emithelper_p.h | 1 + src/asmjit/x86/x86emitter.h | 46 +- src/asmjit/x86/x86formatter.cpp | 245 ++++--- src/asmjit/x86/x86func.cpp | 55 +- src/asmjit/x86/x86globals.h | 22 +- src/asmjit/x86/x86instapi.cpp | 315 +++++--- src/asmjit/x86/x86instdb.h | 159 +++- src/asmjit/x86/x86operand.h | 528 ++++++++++---- src/asmjit/x86/x86rapass.cpp | 209 ++++-- src/asmjit/x86/x86rapass_p.h | 13 +- test/asmjit_test_compiler_a64.cpp | 52 +- test/asmjit_test_compiler_x86.cpp | 203 +++--- test/asmjit_test_emitters.cpp | 2 +- test/asmjit_test_execute.cpp | 2 +- test/asmjit_test_x86_sections.cpp | 2 +- test/asmjitutils.h | 4 +- test/performancetimer.h | 11 +- 125 files changed, 7518 insertions(+), 3345 deletions(-) diff --git a/.github/workflows/build-config.json b/.github/workflows/build-config.json index e72d8e3..1d9f331 100644 --- a/.github/workflows/build-config.json +++ b/.github/workflows/build-config.json @@ -1,8 +1,8 @@ { "diagnostics": { "asan": { "definitions": ["ASMJIT_SANITIZE=address"] }, - "ubsan": { "definitions": ["ASMJIT_SANITIZE=undefined"] }, - "msan": { "definitions": ["ASMJIT_SANITIZE=memory"] } + "msan": { "definitions": ["ASMJIT_SANITIZE=memory"] }, + "ubsan": { "definitions": ["ASMJIT_SANITIZE=undefined"] } }, "valgrind_arguments": [ diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 04eeebe..37ed1b3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -54,7 +54,6 @@ jobs: - { title: "no-x86" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_X86=1" } - { title: "no-aarch64" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_AARCH64=1" } - - { title: "lang-c++17" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++17" } - { title: "lang-c++20" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++20" } - { title: "lang-c++23" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++23" } diff --git a/CMakeLists.txt b/CMakeLists.txt index aa6df9d..9b2cce9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -195,7 +195,7 @@ function(asmjit_add_target target target_type) DEFINE_SYMBOL "" CXX_VISIBILITY_PRESET hidden) target_compile_options(${target} PRIVATE ${X_CFLAGS} ${ASMJIT_SANITIZE_CFLAGS} $<$:${X_CFLAGS_DBG}> $<$>:${X_CFLAGS_REL}>) - target_compile_features(${target} PUBLIC cxx_std_11) + target_compile_features(${target} PUBLIC cxx_std_17) target_link_options(${target} PRIVATE ${ASMJIT_PRIVATE_LFLAGS}) target_link_libraries(${target} PRIVATE ${X_LIBRARIES}) diff --git a/src/asmjit/a64.h b/src/asmjit/a64.h index e849eb3..12a4af9 100644 --- a/src/asmjit/a64.h +++ b/src/asmjit/a64.h @@ -48,13 +48,16 @@ //! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64. #include "./arm.h" -#include "./arm/a64assembler.h" -#include "./arm/a64builder.h" -#include "./arm/a64compiler.h" -#include "./arm/a64emitter.h" -#include "./arm/a64globals.h" -#include "./arm/a64instdb.h" -#include "./arm/a64operand.h" + +#include "asmjit-scope-begin.h" +#include "arm/a64assembler.h" +#include "arm/a64builder.h" +#include "arm/a64compiler.h" +#include "arm/a64emitter.h" +#include "arm/a64globals.h" +#include "arm/a64instdb.h" +#include "arm/a64operand.h" +#include "asmjit-scope-end.h" #endif // ASMJIT_A64_H_INCLUDED diff --git a/src/asmjit/arm.h b/src/asmjit/arm.h index e4a3e3d..0e4e673 100644 --- a/src/asmjit/arm.h +++ b/src/asmjit/arm.h @@ -76,9 +76,12 @@ //! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode. //! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64. -#include "./core.h" -#include "./arm/armglobals.h" -#include "./arm/armoperand.h" -#include "./arm/armutils.h" +#include "core.h" + +#include "asmjit-scope-begin.h" +#include "arm/armglobals.h" +#include "arm/armoperand.h" +#include "arm/armutils.h" +#include "asmjit-scope-end.h" #endif // ASMJIT_ARM_H_INCLUDED diff --git a/src/asmjit/arm/a64assembler.cpp b/src/asmjit/arm/a64assembler.cpp index 32514b9..e4395ea 100644 --- a/src/asmjit/arm/a64assembler.cpp +++ b/src/asmjit/arm/a64assembler.cpp @@ -24,8 +24,8 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64) // a64::Assembler - Utils // ====================== -static ASMJIT_FORCE_INLINE constexpr uint32_t diff(RegType a, RegType b) noexcept { return uint32_t(a) - uint32_t(b); } -static ASMJIT_FORCE_INLINE constexpr uint32_t diff(VecElementType elementType, VecElementType baseType) noexcept { return uint32_t(elementType) - uint32_t(baseType); } +static ASMJIT_INLINE_CONSTEXPR uint32_t diff(RegType a, RegType b) noexcept { return uint32_t(a) - uint32_t(b); } +static ASMJIT_INLINE_CONSTEXPR uint32_t diff(VecElementType elementType, VecElementType baseType) noexcept { return uint32_t(elementType) - uint32_t(baseType); } // a64::Assembler - Cond // ===================== @@ -74,39 +74,50 @@ static inline RegType extendOptionToRegType(uint32_t option) noexcept { //! Struct that contains Size (2 bits), Q flag, and S (scalar) flag. These values //! are used to encode Q, Size, and Scalar fields in an opcode. struct SizeOp { - enum : uint8_t { - k128BitShift = 0, - kScalarShift = 1, - kSizeShift = 2, + //! \name Constants + //! \{ - kQ = uint8_t(1u << k128BitShift), - kS = uint8_t(1u << kScalarShift), + static inline constexpr uint8_t k128BitShift = 0; + static inline constexpr uint8_t kScalarShift = 1; + static inline constexpr uint8_t kSizeShift = 2; - k00 = uint8_t(0 << kSizeShift), - k01 = uint8_t(1 << kSizeShift), - k10 = uint8_t(2 << kSizeShift), - k11 = uint8_t(3 << kSizeShift), + static inline constexpr uint8_t kQ = uint8_t(1u << k128BitShift); + static inline constexpr uint8_t kS = uint8_t(1u << kScalarShift); - k00Q = k00 | kQ, - k01Q = k01 | kQ, - k10Q = k10 | kQ, - k11Q = k11 | kQ, + static inline constexpr uint8_t k00 = uint8_t(0 << kSizeShift); + static inline constexpr uint8_t k01 = uint8_t(1 << kSizeShift); + static inline constexpr uint8_t k10 = uint8_t(2 << kSizeShift); + static inline constexpr uint8_t k11 = uint8_t(3 << kSizeShift); - k00S = k00 | kS, - k01S = k01 | kS, - k10S = k10 | kS, - k11S = k11 | kS, + static inline constexpr uint8_t k00Q = k00 | kQ; + static inline constexpr uint8_t k01Q = k01 | kQ; + static inline constexpr uint8_t k10Q = k10 | kQ; + static inline constexpr uint8_t k11Q = k11 | kQ; - kInvalid = 0xFFu, + static inline constexpr uint8_t k00S = k00 | kS; + static inline constexpr uint8_t k01S = k01 | kS; + static inline constexpr uint8_t k10S = k10 | kS; + static inline constexpr uint8_t k11S = k11 | kS; - // Masks used by SizeOpMap. - kSzQ = (0x3u << kSizeShift) | kQ, - kSzS = (0x3u << kSizeShift) | kS, - kSzQS = (0x3u << kSizeShift) | kQ | kS - }; + static inline constexpr uint8_t kInvalid = 0xFFu; + + // Masks used by SizeOpMap. + static inline constexpr uint8_t kSzQ = (0x3u << kSizeShift) | kQ; + static inline constexpr uint8_t kSzS = (0x3u << kSizeShift) | kS; + static inline constexpr uint8_t kSzQS = (0x3u << kSizeShift) | kQ | kS; + + //! \} + + //! \name Members + //! \{ uint8_t value; + //! \} + + //! \name Accessors + //! \{ + inline bool isValid() const noexcept { return value != kInvalid; } inline void makeInvalid() noexcept { value = kInvalid; } @@ -119,6 +130,8 @@ struct SizeOp { ASMJIT_ASSERT(size() > 0); value = uint8_t(value - (1u << kSizeShift)); } + + //! \} }; struct SizeOpTable { @@ -282,8 +295,9 @@ static inline SizeOp armElementTypeToSizeOp(uint32_t vecOpType, RegType regType, SizeOp op = table.array[index]; SizeOp modifiedOp { uint8_t(op.value & map.sizeOpMask) }; - if (!Support::bitTest(map.acceptMask, op.value)) + if (!Support::bitTest(map.acceptMask, op.value)) { modifiedOp.makeInvalid(); + } return modifiedOp; } @@ -363,8 +377,9 @@ static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd, for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) { uint32_t hwImm = uint32_t(imm & 0xFFFFu); - if (hwImm == 0) + if (hwImm == 0) { continue; + } out[count++] = op | (hwIndex << 21) | (hwImm << 5) | rd; op = kMovK; @@ -382,8 +397,9 @@ static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd, for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) { uint32_t hwImm = uint32_t(imm & 0xFFFFu); - if (hwImm == 0xFFFFu) + if (hwImm == 0xFFFFu) { continue; + } out[count++] = op | (hwIndex << 21) | ((hwImm ^ negMask) << 5) | rd; op = kMovK; @@ -503,18 +519,22 @@ static inline bool armCheckMemBaseIndexRel(const Mem& mem) noexcept { RegType baseType = mem.baseType(); RegType indexType = mem.indexType(); - if (!Support::bitTest(kBaseMask, baseType)) + if (!Support::bitTest(kBaseMask, baseType)) { return false; + } if (baseType > RegType::kLabelTag) { // Index allows either GpW or GpX. - if (!Support::bitTest(kIndexMask, indexType)) + if (!Support::bitTest(kIndexMask, indexType)) { return false; + } - if (indexType == RegType::kNone) + if (indexType == RegType::kNone) { return true; - else + } + else { return !mem.hasOffset(); + } } else { // No index register allowed if this is a PC relative address (literal). @@ -542,8 +562,9 @@ static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint if (!reg.hasElementType()) { // Scalar operation [HSD]. uint32_t sz = diff(reg.type(), RegType::kARM_VecH); - if (sz > 2u || !Support::bitTest(szBits[sHf].sizeMask, sz)) + if (sz > 2u || !Support::bitTest(szBits[sHf].sizeMask, sz)) { return false; + } opcode->reset(szBits[sHf].mask[sz] ^ sOp); *szOut = sz; @@ -554,8 +575,9 @@ static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint uint32_t q = diff(reg.type(), RegType::kARM_VecD); uint32_t sz = diff(reg.elementType(), VecElementType::kH); - if (q > 1u || sz > 2u || !Support::bitTest(szBits[vHf].sizeMask, sz)) + if (q > 1u || sz > 2u || !Support::bitTest(szBits[vHf].sizeMask, sz)) { return false; + } opcode->reset(szBits[vHf].mask[sz] ^ (vOp | (q << kQBitIndex))); *szOut = sz; @@ -773,8 +795,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co CondCode instCC = BaseInst::extractARMCondCode(instId); instId = instId & uint32_t(InstIdParts::kRealId); - if (instId >= Inst::_kIdCount) + if (instId >= Inst::_kIdCount) { instId = 0; + } const InstDB::InstInfo* instInfo = &InstDB::_instInfoTable[instId]; uint32_t encodingIndex = instInfo->_encodingDataIndex; @@ -794,21 +817,25 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co uint64_t offsetValue; // Offset value (if known). if (ASMJIT_UNLIKELY(Support::test(options, kRequiresSpecialHandling))) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } // Unknown instruction. - if (ASMJIT_UNLIKELY(instId == 0)) + if (ASMJIT_UNLIKELY(instId == 0)) { goto InvalidInstruction; + } // Condition code can only be used with 'B' instruction. - if (ASMJIT_UNLIKELY(instCC != CondCode::kAL && instId != Inst::kIdB)) + if (ASMJIT_UNLIKELY(instCC != CondCode::kAL && instId != Inst::kIdB)) { goto InvalidInstruction; + } // Grow request, happens rarely. err = writer.ensureSpace(this, 4); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { goto Failed; + } #ifndef ASMJIT_NO_VALIDATION // Strict validation. @@ -817,8 +844,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt); err = _funcs.validate(BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount, ValidationFlags::kNone); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { goto Failed; + } } #endif } @@ -4932,11 +4960,13 @@ EmitOp_Multiple: { ASMJIT_ASSERT(multipleOpCount > 0); err = writer.ensureSpace(this, multipleOpCount * 4u); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { goto Failed; + } - for (uint32_t i = 0; i < multipleOpCount; i++) + for (uint32_t i = 0; i < multipleOpCount; i++) { writer.emit32uLE(multipleOpData[i]); + } goto EmitDone; } @@ -4946,28 +4976,33 @@ EmitOp_Multiple: // -------------------------------------------------------------------------- EmitOp_MemBase_Rn5: - if (!checkMemBase(rmRel->as())) + if (!checkMemBase(rmRel->as())) { goto InvalidAddress; + } opcode.addReg(rmRel->as().baseId(), 5); goto EmitOp; EmitOp_MemBaseNoImm_Rn5: - if (!checkMemBase(rmRel->as()) || rmRel->as().hasIndex()) + if (!checkMemBase(rmRel->as()) || rmRel->as().hasIndex()) { goto InvalidAddress; + } - if (rmRel->as().hasOffset()) + if (rmRel->as().hasOffset()) { goto InvalidDisplacement; + } opcode.addReg(rmRel->as().baseId(), 5); goto EmitOp; EmitOp_MemBaseIndex_Rn5_Rm16: - if (!rmRel->as().hasBaseReg()) + if (!rmRel->as().hasBaseReg()) { goto InvalidAddress; + } - if (rmRel->as().indexId() > 30 && rmRel->as().indexId() != Gp::kIdZr) + if (rmRel->as().indexId() > 30 && rmRel->as().indexId() != Gp::kIdZr) { goto InvalidPhysId; + } opcode.addReg(rmRel->as().indexId(), 16); opcode.addReg(rmRel->as().baseId(), 5); @@ -4992,8 +5027,9 @@ EmitOp_Rel: } LabelEntry* label = _code->labelEntry(labelId); - if (ASMJIT_UNLIKELY(!label)) + if (ASMJIT_UNLIKELY(!label)) { goto InvalidLabel; + } if (offsetFormat.type() == OffsetType::kAArch64_ADRP) { // TODO: [ARM] Always create relocation entry. @@ -5009,8 +5045,9 @@ EmitOp_Rel: size_t codeOffset = writer.offsetFrom(_bufferData); LabelLink* link = _code->newLabelLink(label, _section->id(), codeOffset, intptr_t(labelOffset), offsetFormat); - if (ASMJIT_UNLIKELY(!link)) + if (ASMJIT_UNLIKELY(!link)) { goto OutOfMemory; + } goto EmitOp; } @@ -5027,8 +5064,9 @@ EmitOp_Rel: // Create a new RelocEntry as we cannot calculate the offset right now. RelocEntry* re; err = _code->newRelocEntry(&re, RelocType::kAbsToRel); - if (err) + if (err) { goto Failed; + } re->_sourceSectionId = _section->id(); re->_sourceOffset = codeOffset; @@ -5039,8 +5077,9 @@ EmitOp_Rel: else { uint64_t pc = baseAddress + codeOffset; - if (offsetFormat.type() == OffsetType::kAArch64_ADRP) + if (offsetFormat.type() == OffsetType::kAArch64_ADRP) { pc &= ~uint64_t(4096 - 1); + } offsetValue = targetOffset - pc; goto EmitOp_DispImm; @@ -5051,12 +5090,14 @@ EmitOp_Rel: EmitOp_DispImm: { - if ((offsetValue & Support::lsbMask(offsetFormat.immDiscardLsb())) != 0) + if ((offsetValue & Support::lsbMask(offsetFormat.immDiscardLsb())) != 0) { goto InvalidDisplacement; + } int64_t dispImm64 = int64_t(offsetValue) >> offsetFormat.immDiscardLsb(); - if (!Support::isEncodableOffset64(dispImm64, offsetFormat.immBitCount())) + if (!Support::isEncodableOffset64(dispImm64, offsetFormat.immBitCount())) { goto InvalidDisplacement; + } uint32_t dispImm32 = uint32_t(dispImm64 & Support::lsbMask(offsetFormat.immBitCount())); switch (offsetFormat.type()) { @@ -5094,8 +5135,9 @@ EmitOp: EmitDone: if (Support::test(options, InstOptions::kReserved)) { #ifndef ASMJIT_NO_LOGGING - if (_logger) + if (_logger) { EmitterUtils::logInstructionEmitted(this, BaseInst::composeARMInstId(instId, instCC), options, o0, o1, o2, opExt, 0, 0, writer.cursor()); + } #endif } @@ -5141,21 +5183,26 @@ Failed: Error Assembler::align(AlignMode alignMode, uint32_t alignment) { constexpr uint32_t kNopA64 = 0xD503201Fu; // [11010101|00000011|00100000|00011111]. - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } - if (ASMJIT_UNLIKELY(uint32_t(alignMode) > uint32_t(AlignMode::kMaxValue))) + if (ASMJIT_UNLIKELY(uint32_t(alignMode) > uint32_t(AlignMode::kMaxValue))) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } - if (alignment <= 1) + if (alignment <= 1) { return kErrorOk; + } - if (ASMJIT_UNLIKELY(alignment > Globals::kMaxAlignment || !Support::isPowerOf2(alignment))) + if (ASMJIT_UNLIKELY(alignment > Globals::kMaxAlignment || !Support::isPowerOf2(alignment))) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } uint32_t i = uint32_t(Support::alignUpDiff(offset(), alignment)); - if (i == 0) + if (i == 0) { return kErrorOk; + } CodeWriter writer(this); ASMJIT_PROPAGATE(writer.ensureSpace(this, i)); @@ -5164,8 +5211,9 @@ Error Assembler::align(AlignMode alignMode, uint32_t alignment) { case AlignMode::kCode: { uint32_t pattern = kNopA64; - if (ASMJIT_UNLIKELY(offset() & 0x3u)) + if (ASMJIT_UNLIKELY(offset() & 0x3u)) { return DebugUtils::errored(kErrorInvalidState); + } while (i >= 4) { writer.emit32uLE(pattern); diff --git a/src/asmjit/arm/a64assembler.h b/src/asmjit/arm/a64assembler.h index 3193215..a660ddd 100644 --- a/src/asmjit/arm/a64assembler.h +++ b/src/asmjit/arm/a64assembler.h @@ -21,7 +21,7 @@ class ASMJIT_VIRTAPI Assembler public EmitterExplicitT { public: - typedef BaseAssembler Base; + using Base = BaseAssembler; //! \name Construction & Destruction //! \{ diff --git a/src/asmjit/arm/a64builder.cpp b/src/asmjit/arm/a64builder.cpp index d29b0a4..9396789 100644 --- a/src/asmjit/arm/a64builder.cpp +++ b/src/asmjit/arm/a64builder.cpp @@ -17,8 +17,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64) Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() { _archMask = uint64_t(1) << uint32_t(Arch::kAArch64); - if (code) + if (code) { code->attach(this); + } } Builder::~Builder() noexcept {} diff --git a/src/asmjit/arm/a64builder.h b/src/asmjit/arm/a64builder.h index cab1083..dd15dd1 100644 --- a/src/asmjit/arm/a64builder.h +++ b/src/asmjit/arm/a64builder.h @@ -23,7 +23,7 @@ class ASMJIT_VIRTAPI Builder public EmitterExplicitT { public: ASMJIT_NONCOPYABLE(Builder) - typedef BaseBuilder Base; + using Base = BaseBuilder; //! \name Construction & Destruction //! \{ diff --git a/src/asmjit/arm/a64compiler.cpp b/src/asmjit/arm/a64compiler.cpp index 765fd4b..8d889d8 100644 --- a/src/asmjit/arm/a64compiler.cpp +++ b/src/asmjit/arm/a64compiler.cpp @@ -18,8 +18,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64) Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() { _archMask = uint64_t(1) << uint32_t(Arch::kAArch64); - if (code) + if (code) { code->attach(this); + } } Compiler::~Compiler() noexcept {} diff --git a/src/asmjit/arm/a64compiler.h b/src/asmjit/arm/a64compiler.h index 64f82f6..661e58d 100644 --- a/src/asmjit/arm/a64compiler.h +++ b/src/asmjit/arm/a64compiler.h @@ -24,7 +24,7 @@ class ASMJIT_VIRTAPI Compiler public EmitterExplicitT { public: ASMJIT_NONCOPYABLE(Compiler) - typedef BaseCompiler Base; + using Base = BaseCompiler; //! \name Construction & Destruction //! \{ diff --git a/src/asmjit/arm/a64emithelper.cpp b/src/asmjit/arm/a64emithelper.cpp index 0cf0982..2606172 100644 --- a/src/asmjit/arm/a64emithelper.cpp +++ b/src/asmjit/arm/a64emithelper.cpp @@ -54,14 +54,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove( return emitter->ldr(dst.as().x(), src); default: { - if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) + if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) { return emitter->ldr(dst.as().s(), src); + } - if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) + if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) { return emitter->ldr(dst.as().d(), src); + } - if (TypeUtils::isVec128(typeId)) + if (TypeUtils::isVec128(typeId)) { return emitter->ldr(dst.as().q(), src); + } break; } @@ -90,14 +93,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove( return emitter->str(src.as().x(), dst); default: { - if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) + if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) { return emitter->str(src.as().s(), dst); + } - if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) + if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) { return emitter->str(src.as().d(), dst); + } - if (TypeUtils::isVec128(typeId)) + if (TypeUtils::isVec128(typeId)) { return emitter->str(src.as().q(), dst); + } break; } @@ -120,14 +126,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove( return emitter->mov(dst.as().x(), src.as().x()); default: { - if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) + if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) { return emitter->fmov(dst.as().s(), src.as().s()); + } - if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) + if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) { return emitter->mov(dst.as().b8(), src.as().b8()); + } - if (TypeUtils::isVec128(typeId)) + if (TypeUtils::isVec128(typeId)) { return emitter->mov(dst.as().b16(), src.as().b16()); + } break; } @@ -340,10 +349,12 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) { mem.makePreIndex(); } - if (pair.ids[1] == BaseReg::kIdBad) + if (pair.ids[1] == BaseReg::kIdBad) { ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem)); - else + } + else { ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem)); + } mem.resetOffsetMode(); @@ -422,10 +433,12 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) { mem.makePostIndex(); } - if (pair.ids[1] == BaseReg::kIdBad) + if (pair.ids[1] == BaseReg::kIdBad) { ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem)); - else + } + else { ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem)); + } mem.resetOffsetMode(); } diff --git a/src/asmjit/arm/a64func.cpp b/src/asmjit/arm/a64func.cpp index a33a2f2..9e36074 100644 --- a/src/asmjit/arm/a64func.cpp +++ b/src/asmjit/arm/a64func.cpp @@ -25,18 +25,24 @@ static inline bool shouldTreatAsCDecl(CallConvId ccId) noexcept { } static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept { - if (typeId == TypeId::kFloat32) + if (typeId == TypeId::kFloat32) { return RegType::kARM_VecS; - else if (typeId == TypeId::kFloat64) + } + else if (typeId == TypeId::kFloat64) { return RegType::kARM_VecD; - else if (TypeUtils::isVec32(typeId)) + } + else if (TypeUtils::isVec32(typeId)) { return RegType::kARM_VecS; - else if (TypeUtils::isVec64(typeId)) + } + else if (TypeUtils::isVec64(typeId)) { return RegType::kARM_VecD; - else if (TypeUtils::isVec128(typeId)) + } + else if (TypeUtils::isVec128(typeId)) { return RegType::kARM_VecV; - else + } + else { return RegType::kNone; + } } ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept { @@ -116,8 +122,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si default: { RegType regType = regTypeFromFpOrVecTypeId(typeId); - if (regType == RegType::kNone) + if (regType == RegType::kNone) { return DebugUtils::errored(kErrorInvalidRegType); + } func._rets[valueIndex].initReg(regType, valueIndex, typeId); break; @@ -139,8 +146,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si if (TypeUtils::isInt(typeId)) { uint32_t regId = BaseReg::kIdBad; - if (gpzPos < CallConv::kMaxRegArgsPerGroup) + if (gpzPos < CallConv::kMaxRegArgsPerGroup) { regId = cc._passedOrder[RegGroup::kGp].id[gpzPos]; + } if (regId != BaseReg::kIdBad) { RegType regType = typeId <= TypeId::kUInt32 ? RegType::kARM_GpW : RegType::kARM_GpX; @@ -150,8 +158,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si } else { uint32_t size = Support::max(TypeUtils::sizeOf(typeId), minStackArgSize); - if (size >= 8) + if (size >= 8) { stackOffset = Support::alignUp(stackOffset, 8); + } arg.assignStackOffset(int32_t(stackOffset)); stackOffset += size; } @@ -161,13 +170,15 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) { uint32_t regId = BaseReg::kIdBad; - if (vecPos < CallConv::kMaxRegArgsPerGroup) + if (vecPos < CallConv::kMaxRegArgsPerGroup) { regId = cc._passedOrder[RegGroup::kVec].id[vecPos]; + } if (regId != BaseReg::kIdBad) { RegType regType = regTypeFromFpOrVecTypeId(typeId); - if (regType == RegType::kNone) + if (regType == RegType::kNone) { return DebugUtils::errored(kErrorInvalidRegType); + } arg.initTypeId(typeId); arg.assignRegData(regType, regId); @@ -176,8 +187,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si } else { uint32_t size = Support::max(TypeUtils::sizeOf(typeId), minStackArgSize); - if (size >= 8) + if (size >= 8) { stackOffset = Support::alignUp(stackOffset, 8); + } arg.assignStackOffset(int32_t(stackOffset)); stackOffset += size; } diff --git a/src/asmjit/arm/a64globals.h b/src/asmjit/arm/a64globals.h index 720b6f1..b4dac03 100644 --- a/src/asmjit/arm/a64globals.h +++ b/src/asmjit/arm/a64globals.h @@ -802,7 +802,7 @@ namespace Predicate { //! Address translate options (AT). namespace AT { - static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { + static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); } @@ -860,7 +860,7 @@ namespace DB { //! Data cache maintenance options. namespace DC { - static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { + static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); } @@ -899,7 +899,7 @@ namespace DC { //! Instruction cache maintenance options. namespace IC { - static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { + static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); } @@ -953,7 +953,7 @@ namespace PSB { } namespace TLBI { - static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { + static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); } @@ -1052,7 +1052,7 @@ namespace TSB { //! Processor state access through MSR. namespace PState { //! Encodes a pstate from `op0` and `op1`. - static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op0, uint32_t op1) noexcept { + static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op0, uint32_t op1) noexcept { return (op0 << 3) | (op1 << 0); } @@ -1081,17 +1081,17 @@ namespace SysReg { }; //! Encodes a system register from `op0`, `op1`, `cRn`, `cRm`, and `op2` fields. - static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op0, uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { + static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op0, uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { return (op0 << 14) | (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); } //! Encodes a system register from `fields`. - static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(const Fields& fields) noexcept { + static ASMJIT_INLINE_CONSTEXPR uint32_t encode(const Fields& fields) noexcept { return encode(fields.op0, fields.op1, fields.cRn, fields.cRm, fields.op2); } //! Decodes a system register to \ref Fields. - static ASMJIT_INLINE_NODEBUG constexpr Fields decode(uint32_t id) noexcept { + static ASMJIT_INLINE_CONSTEXPR Fields decode(uint32_t id) noexcept { return Fields { uint8_t((id >> 14) & 0x3u), uint8_t((id >> 11) & 0x7u), diff --git a/src/asmjit/arm/a64instapi.cpp b/src/asmjit/arm/a64instapi.cpp index 2d95993..d655acf 100644 --- a/src/asmjit/arm/a64instapi.cpp +++ b/src/asmjit/arm/a64instapi.cpp @@ -23,8 +23,9 @@ namespace InstInternal { #ifndef ASMJIT_NO_TEXT Error instIdToString(InstId instId, InstStringifyOptions options, String& output) noexcept { uint32_t realId = instId & uint32_t(InstIdParts::kRealId); - if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) + if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) { return DebugUtils::errored(kErrorInvalidInstruction); + } return InstNameUtils::decode(InstDB::_instNameIndexTable[realId], options, InstDB::_instNameStringTable, output); } @@ -100,8 +101,9 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount // Get the instruction data. uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId); - if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) + if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) { return DebugUtils::errored(kErrorInvalidInstruction); + } out->_instFlags = InstRWFlags::kNone; out->_opCount = uint8_t(opCount); @@ -139,10 +141,12 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount op._consecutiveLeadCount = 0; if (srcOp.isReg()) { - if (i == 0) + if (i == 0) { op._consecutiveLeadCount = uint8_t(opCount - 1); - else + } + else { op.addOpFlags(OpRWFlags::kConsecutive); + } } else { const Mem& memOp = srcOp.as(); diff --git a/src/asmjit/arm/a64instdb.h b/src/asmjit/arm/a64instdb.h index a031254..89773ee 100644 --- a/src/asmjit/arm/a64instdb.h +++ b/src/asmjit/arm/a64instdb.h @@ -47,9 +47,13 @@ struct InstInfo { //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; } //! \} @@ -57,6 +61,7 @@ struct InstInfo { ASMJIT_VARAPI const InstInfo _instInfoTable[]; +[[nodiscard]] static inline const InstInfo& infoById(InstId instId) noexcept { instId &= uint32_t(InstIdParts::kRealId); ASMJIT_ASSERT(Inst::isDefinedId(instId)); diff --git a/src/asmjit/arm/a64instdb_p.h b/src/asmjit/arm/a64instdb_p.h index 5c3da7e..322e5a8 100644 --- a/src/asmjit/arm/a64instdb_p.h +++ b/src/asmjit/arm/a64instdb_p.h @@ -264,7 +264,7 @@ namespace EncodingData { #define M_OPCODE(field, bits) \ uint32_t _##field : bits; \ - ASMJIT_INLINE_NODEBUG constexpr uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); } + ASMJIT_INLINE_CONSTEXPR uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); } struct BaseOp { uint32_t opcode; @@ -477,20 +477,20 @@ struct BaseAtomicCasp { uint32_t xOffset : 5; }; -typedef BaseOp BaseBranchReg; -typedef BaseOp BaseBranchRel; -typedef BaseOp BaseBranchCmp; -typedef BaseOp BaseBranchTst; -typedef BaseOp BaseExtract; -typedef BaseOp BaseBfc; -typedef BaseOp BaseBfi; -typedef BaseOp BaseBfx; -typedef BaseOp BaseCCmp; -typedef BaseOp BaseCInc; -typedef BaseOp BaseCSet; -typedef BaseOp BaseCSel; -typedef BaseOp BaseMovKNZ; -typedef BaseOp BaseMull; +using BaseBranchReg = BaseOp; +using BaseBranchRel = BaseOp; +using BaseBranchCmp = BaseOp; +using BaseBranchTst = BaseOp; +using BaseExtract = BaseOp; +using BaseBfc = BaseOp; +using BaseBfi = BaseOp; +using BaseBfx = BaseOp; +using BaseCCmp = BaseOp; +using BaseCInc = BaseOp; +using BaseCSet = BaseOp; +using BaseCSel = BaseOp; +using BaseMovKNZ = BaseOp; +using BaseMull = BaseOp; struct FSimdGeneric { uint32_t _scalarOp : 28; @@ -504,9 +504,9 @@ struct FSimdGeneric { constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); } }; -typedef FSimdGeneric FSimdVV; -typedef FSimdGeneric FSimdVVV; -typedef FSimdGeneric FSimdVVVV; +using FSimdVV = FSimdGeneric; +using FSimdVVV = FSimdGeneric; +using FSimdVVVV = FSimdGeneric; struct FSimdSV { uint32_t opcode; diff --git a/src/asmjit/arm/a64operand.h b/src/asmjit/arm/a64operand.h index 9e23306..32a8890 100644 --- a/src/asmjit/arm/a64operand.h +++ b/src/asmjit/arm/a64operand.h @@ -49,17 +49,27 @@ public: }; //! Test whether this register is ZR register. - ASMJIT_INLINE_NODEBUG constexpr bool isZR() const noexcept { return id() == kIdZr; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isZR() const noexcept { return id() == kIdZr; } + //! Test whether this register is SP register. - ASMJIT_INLINE_NODEBUG constexpr bool isSP() const noexcept { return id() == kIdSp; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isSP() const noexcept { return id() == kIdSp; } //! Cast this register to a 32-bit W register (returns a new operand). + [[nodiscard]] ASMJIT_INLINE_NODEBUG GpW w() const noexcept; + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG GpW r32() const noexcept; + //! Cast this register to a 64-bit X register (returns a new operand). + [[nodiscard]] ASMJIT_INLINE_NODEBUG GpX x() const noexcept; + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG GpX r64() const noexcept; }; @@ -118,117 +128,181 @@ public: //! \endcond //! Returns whether the register has element type or element index (or both). - ASMJIT_INLINE_NODEBUG constexpr bool hasElementTypeOrIndex() const noexcept { return _signature.hasField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasElementTypeOrIndex() const noexcept { return _signature.hasField(); } //! Returns whether the vector register has associated a vector element type. - ASMJIT_INLINE_NODEBUG constexpr bool hasElementType() const noexcept { return _signature.hasField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasElementType() const noexcept { return _signature.hasField(); } + //! Returns vector element type of the register. - ASMJIT_INLINE_NODEBUG constexpr VecElementType elementType() const noexcept { return VecElementType(_signature.getField()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR VecElementType elementType() const noexcept { return VecElementType(_signature.getField()); } + //! Sets vector element type of the register to `elementType`. ASMJIT_INLINE_NODEBUG void setElementType(VecElementType elementType) noexcept { _signature.setField(uint32_t(elementType)); } + //! Resets vector element type to none. ASMJIT_INLINE_NODEBUG void resetElementType() noexcept { _signature.setField(0); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecB8() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecB8() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementB); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecH4() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecH4() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementH); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecS2() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecS2() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementS); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecD1() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecD1() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecB16() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecB16() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementB); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecH8() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecH8() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementH); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecS4() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecS4() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementS); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecD2() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecD2() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementD); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecB4x4() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecB4x4() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementB4); } - ASMJIT_INLINE_NODEBUG constexpr bool isVecH2x4() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVecH2x4() const noexcept { return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits::kSignature | kSignatureElementH2); } //! Creates a cloned register with element access. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec at(uint32_t elementIndex) const noexcept { return Vec((signature() & ~kSignatureRegElementIndexMask) | (elementIndex << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id()); } //! Cast this register to an 8-bit B register (AArch64 only). + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecB b() const noexcept; + //! Cast this register to a 16-bit H register (AArch64 only). + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecH h() const noexcept; + //! Cast this register to a 32-bit S register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecS s() const noexcept; + //! Cast this register to a 64-bit D register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecD d() const noexcept; + //! Cast this register to a 128-bit Q register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV q() const noexcept; + //! Cast this register to a 128-bit V register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV v() const noexcept; //! Casts this register to b (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec v8() const noexcept; + //! Casts this register to h (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec v16() const noexcept; + //! Casts this register to s (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec v32() const noexcept; + //! Casts this register to d (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec v64() const noexcept; + //! Casts this register to q (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec v128() const noexcept; //! Cast this register to a 128-bit V.B[elementIndex] register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV b(uint32_t elementIndex) const noexcept; + //! Cast this register to a 128-bit V.H[elementIndex] register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV h(uint32_t elementIndex) const noexcept; + //! Cast this register to a 128-bit V.S[elementIndex] register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV s(uint32_t elementIndex) const noexcept; + //! Cast this register to a 128-bit V.D[elementIndex] register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV d(uint32_t elementIndex) const noexcept; + //! Cast this register to a 128-bit V.H2[elementIndex] register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV h2(uint32_t elementIndex) const noexcept; + //! Cast this register to a 128-bit V.B4[elementIndex] register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV b4(uint32_t elementIndex) const noexcept; //! Cast this register to V.8B. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecD b8() const noexcept; + //! Cast this register to V.16B. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV b16() const noexcept; + //! Cast this register to V.2H. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecS h2() const noexcept; + //! Cast this register to V.4H. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecD h4() const noexcept; + //! Cast this register to V.8H. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV h8() const noexcept; + //! Cast this register to V.2S. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecD s2() const noexcept; + //! Cast this register to V.4S. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV s4() const noexcept; + //! Cast this register to V.2D. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VecV d2() const noexcept; - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature _makeElementAccessSignature(VecElementType elementType, uint32_t elementIndex) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature _makeElementAccessSignature(VecElementType elementType, uint32_t elementIndex) noexcept { return OperandSignature{ uint32_t(RegTraits::kSignature) | uint32_t(kSignatureRegElementFlagMask) | @@ -301,16 +375,24 @@ namespace regs { #endif //! Creates a 32-bit W register operand. -static ASMJIT_INLINE_NODEBUG constexpr GpW w(uint32_t id) noexcept { return GpW(id); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR GpW w(uint32_t id) noexcept { return GpW(id); } + //! Creates a 64-bit X register operand. -static ASMJIT_INLINE_NODEBUG constexpr GpX x(uint32_t id) noexcept { return GpX(id); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR GpX x(uint32_t id) noexcept { return GpX(id); } //! Creates a 32-bit S register operand. -static ASMJIT_INLINE_NODEBUG constexpr VecS s(uint32_t id) noexcept { return VecS(id); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR VecS s(uint32_t id) noexcept { return VecS(id); } + //! Creates a 64-bit D register operand. -static ASMJIT_INLINE_NODEBUG constexpr VecD d(uint32_t id) noexcept { return VecD(id); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR VecD d(uint32_t id) noexcept { return VecD(id); } + //! Creates a 1282-bit V register operand. -static ASMJIT_INLINE_NODEBUG constexpr VecV v(uint32_t id) noexcept { return VecV(id); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR VecV v(uint32_t id) noexcept { return VecV(id); } static constexpr GpW w0 = GpW(0); static constexpr GpW w1 = GpW(1); @@ -589,22 +671,36 @@ using namespace regs; //! \{ //! Constructs a `UXTB #value` extend and shift (unsigned byte extend) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Shift uxtb(uint32_t value) noexcept { return Shift(ShiftOp::kUXTB, value); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Shift uxtb(uint32_t value) noexcept { return Shift(ShiftOp::kUXTB, value); } + //! Constructs a `UXTH #value` extend and shift (unsigned hword extend) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Shift uxth(uint32_t value) noexcept { return Shift(ShiftOp::kUXTH, value); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Shift uxth(uint32_t value) noexcept { return Shift(ShiftOp::kUXTH, value); } + //! Constructs a `UXTW #value` extend and shift (unsigned word extend) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Shift uxtw(uint32_t value) noexcept { return Shift(ShiftOp::kUXTW, value); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Shift uxtw(uint32_t value) noexcept { return Shift(ShiftOp::kUXTW, value); } + //! Constructs a `UXTX #value` extend and shift (unsigned dword extend) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Shift uxtx(uint32_t value) noexcept { return Shift(ShiftOp::kUXTX, value); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Shift uxtx(uint32_t value) noexcept { return Shift(ShiftOp::kUXTX, value); } //! Constructs a `SXTB #value` extend and shift (signed byte extend) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Shift sxtb(uint32_t value) noexcept { return Shift(ShiftOp::kSXTB, value); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Shift sxtb(uint32_t value) noexcept { return Shift(ShiftOp::kSXTB, value); } + //! Constructs a `SXTH #value` extend and shift (signed hword extend) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Shift sxth(uint32_t value) noexcept { return Shift(ShiftOp::kSXTH, value); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Shift sxth(uint32_t value) noexcept { return Shift(ShiftOp::kSXTH, value); } + //! Constructs a `SXTW #value` extend and shift (signed word extend) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Shift sxtw(uint32_t value) noexcept { return Shift(ShiftOp::kSXTW, value); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Shift sxtw(uint32_t value) noexcept { return Shift(ShiftOp::kSXTW, value); } + //! Constructs a `SXTX #value` extend and shift (signed dword extend) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Shift sxtx(uint32_t value) noexcept { return Shift(ShiftOp::kSXTX, value); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Shift sxtx(uint32_t value) noexcept { return Shift(ShiftOp::kSXTX, value); } //! \} @@ -612,49 +708,57 @@ static ASMJIT_INLINE_NODEBUG constexpr Shift sxtx(uint32_t value) noexcept { ret //! \{ //! Creates `[base, offset]` memory operand (offset mode) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, int32_t offset = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, int32_t offset = 0) noexcept { return Mem(base, offset); } //! Creates `[base, offset]!` memory operand (pre-index mode) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept { return Mem(base, offset, OperandSignature::fromValue(OffsetMode::kPreIndex)); } //! Creates `[base], offset` memory operand (post-index mode) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept { return Mem(base, offset, OperandSignature::fromValue(OffsetMode::kPostIndex)); } //! Creates `[base, index]` memory operand (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Gp& index) noexcept { return Mem(base, index); } //! Creates `[base, index]!` memory operand (pre-index mode) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_pre(const Gp& base, const Gp& index) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_pre(const Gp& base, const Gp& index) noexcept { return Mem(base, index, OperandSignature::fromValue(OffsetMode::kPreIndex)); } //! Creates `[base], index` memory operand (post-index mode) (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_post(const Gp& base, const Gp& index) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_post(const Gp& base, const Gp& index) noexcept { return Mem(base, index, OperandSignature::fromValue(OffsetMode::kPostIndex)); } //! Creates `[base, index, SHIFT_OP #shift]` memory operand (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept { return Mem(base, index, shift); } //! Creates `[base, offset]` memory operand (AArch64). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Label& base, int32_t offset = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Label& base, int32_t offset = 0) noexcept { return Mem(base, offset); } // TODO: [ARM] PC + offset address. #if 0 //! Creates `[PC + offset]` (relative) memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const PC& pc, int32_t offset = 0) noexcept { +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const PC& pc, int32_t offset = 0) noexcept { return Mem(pc, offset); } #endif diff --git a/src/asmjit/arm/a64rapass.cpp b/src/asmjit/arm/a64rapass.cpp index b97f259..67b9442 100644 --- a/src/asmjit/arm/a64rapass.cpp +++ b/src/asmjit/arm/a64rapass.cpp @@ -22,7 +22,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64) // ======================== // TODO: [ARM] These should be shared with all backends. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline uint64_t raImmMaskFromSize(uint32_t size) noexcept { ASMJIT_ASSERT(size > 0 && size < 256); static const uint64_t masks[] = { @@ -47,6 +47,7 @@ static const RegMask raConsecutiveLeadCountToRegMaskFilter[5] = { 0x1FFFFFFFu // [4] 4 consecutive registers. }; +[[nodiscard]] static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept { static constexpr RATiedFlags map[] = { RATiedFlags::kNone, @@ -58,15 +59,18 @@ static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept { return map[uint32_t(rwFlags & OpRWFlags::kRW)]; } +[[nodiscard]] static inline RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept { return raUseOutFlagsFromRWFlags(flags); } +[[nodiscard]] static inline RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept { constexpr uint32_t shift = Support::ConstCTZ::value; return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW); } +[[nodiscard]] static inline RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept { constexpr uint32_t shift = Support::ConstCTZ::value; return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW); @@ -82,18 +86,31 @@ public: : RACFGBuilderT(pass), _arch(pass->cc()->arch()) {} + [[nodiscard]] inline Compiler* cc() const noexcept { return static_cast(_cc); } + [[nodiscard]] Error onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept; + [[nodiscard]] Error onBeforeInvoke(InvokeNode* invokeNode) noexcept; + + [[nodiscard]] Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept; + [[nodiscard]] Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept; + + [[nodiscard]] Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept; + + [[nodiscard]] Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept; + [[nodiscard]] Error onBeforeRet(FuncRetNode* funcRet) noexcept; + + [[nodiscard]] Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept; }; @@ -105,20 +122,26 @@ static InstControlFlow getControlFlowType(InstId instId) noexcept { switch (BaseInst::extractRealId(instId)) { case Inst::kIdB: case Inst::kIdBr: - if (BaseInst::extractARMCondCode(instId) == CondCode::kAL) + if (BaseInst::extractARMCondCode(instId) == CondCode::kAL) { return InstControlFlow::kJump; - else + } + else { return InstControlFlow::kBranch; + } + case Inst::kIdBl: case Inst::kIdBlr: return InstControlFlow::kCall; + case Inst::kIdCbz: case Inst::kIdCbnz: case Inst::kIdTbz: case Inst::kIdTbnz: return InstControlFlow::kBranch; + case Inst::kIdRet: return InstControlFlow::kReturn; + default: return InstControlFlow::kRegular; } @@ -180,12 +203,14 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB if (opRwInfo.consecutiveLeadCount()) { // There must be a single consecutive register lead, otherwise the RW data is invalid. - if (consecutiveOffset != 0xFFFFFFFFu) + if (consecutiveOffset != 0xFFFFFFFFu) { return DebugUtils::errored(kErrorInvalidState); + } // A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct. - if (RATiedReg::consecutiveDataFromFlags(flags) != 0) + if (RATiedReg::consecutiveDataFromFlags(flags) != 0) { return DebugUtils::errored(kErrorNotConsecutiveRegs); + } flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1); consecutiveOffset = 0; @@ -208,8 +233,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB flags |= RATiedFlags::kUseFixed; } else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) { - if (consecutiveOffset == 0xFFFFFFFFu) + if (consecutiveOffset == 0xFFFFFFFFu) { return DebugUtils::errored(kErrorInvalidState); + } flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset); } } @@ -220,8 +246,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB flags |= RATiedFlags::kOutFixed; } else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) { - if (consecutiveOffset == 0xFFFFFFFFu) + if (consecutiveOffset == 0xFFFFFFFFu) { return DebugUtils::errored(kErrorInvalidState); + } flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset); } } @@ -231,19 +258,23 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB // Only the first 0..15 registers can be used if the register uses // element accessor that accesses half-words (h[0..7] elements). if (instInfo.hasFlag(InstDB::kInstFlagVH0_15) && reg.as().elementType() == VecElementType::kH) { - if (Support::test(flags, RATiedFlags::kUse)) + if (Support::test(flags, RATiedFlags::kUse)) { useId &= 0x0000FFFFu; - else + } + else { outId &= 0x0000FFFFu; + } } } ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent)); - if (singleRegOps == i) + if (singleRegOps == i) { singleRegOps++; + } - if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive)) + if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive)) { consecutiveParent = workReg->workId(); + } } } else if (op.isMem()) { @@ -254,7 +285,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB if (mem.isRegHome()) { RAWorkReg* workReg; ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg)); - _pass->getOrCreateStackSlot(workReg); + if (ASMJIT_UNLIKELY(!_pass->getOrCreateStackSlot(workReg))) { + return DebugUtils::errored(kErrorOutOfMemory); + } } else if (mem.hasBaseReg()) { uint32_t vIndex = Operand::virtIdToIndex(mem.baseId()); @@ -273,10 +306,12 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB uint32_t useRewriteMask = 0; uint32_t outRewriteMask = 0; - if (Support::test(flags, RATiedFlags::kUse)) + if (Support::test(flags, RATiedFlags::kUse)) { useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId)); - else + } + else { outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId)); + } ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask)); } @@ -299,10 +334,12 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB uint32_t useRewriteMask = 0; uint32_t outRewriteMask = 0; - if (Support::test(flags, RATiedFlags::kUse)) + if (Support::test(flags, RATiedFlags::kUse)) { useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId])); - else + } + else { outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId])); + } ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask)); } @@ -374,8 +411,9 @@ Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept { if (fd.hasRet()) { for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { const FuncValue& ret = fd.ret(valueIndex); - if (!ret) + if (!ret) { break; + } const Operand& op = invokeNode->ret(valueIndex); if (op.isReg()) { @@ -411,14 +449,16 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) { const FuncValuePack& argPack = fd.argPack(argIndex); for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { - if (!argPack[valueIndex]) + if (!argPack[valueIndex]) { continue; + } const FuncValue& arg = argPack[valueIndex]; const Operand& op = invokeNode->arg(argIndex, valueIndex); - if (op.isNone()) + if (op.isNone()) { continue; + } if (op.isReg()) { const Reg& reg = op.as(); @@ -427,8 +467,9 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept if (arg.isIndirect()) { RegGroup regGroup = workReg->group(); - if (regGroup != RegGroup::kGp) + if (regGroup != RegGroup::kGp) { return DebugUtils::errored(kErrorInvalidState); + } ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId())); } else if (arg.isReg()) { @@ -445,8 +486,9 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) { const FuncValue& ret = fd.ret(retIndex); - if (!ret) + if (!ret) { break; + } const Operand& op = invokeNode->ret(retIndex); if (op.isReg()) { @@ -525,11 +567,13 @@ Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& a DebugUtils::unused(invokeNode); Mem stackPtr = ptr(_pass->_sp.as(), arg.stackOffset()); - if (reg.isGp()) + if (reg.isGp()) { return cc()->str(reg.as(), stackPtr); + } - if (reg.isVec()) + if (reg.isVec()) { return cc()->str(reg.as(), stackPtr); + } return DebugUtils::errored(kErrorInvalidState); } @@ -549,11 +593,14 @@ Error RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept { for (uint32_t i = 0; i < opCount; i++) { const Operand& op = opArray[i]; - if (op.isNone()) continue; + if (op.isNone()) { + continue; + } const FuncValue& ret = funcDetail.ret(i); - if (ASMJIT_UNLIKELY(!ret.isReg())) + if (ASMJIT_UNLIKELY(!ret.isReg())) { return DebugUtils::errored(kErrorInvalidAssignment); + } if (op.isReg()) { // Register return value. @@ -614,8 +661,9 @@ void ARMRAPass::onInit() noexcept { // Apple ABI requires that the frame-pointer register is not changed by leaf functions and properly updated // by non-leaf functions. So, let's make this register unavailable as it's just not safe to update it. - if (hasFP || cc()->environment().isDarwin()) + if (hasFP || cc()->environment().isDarwin()) { makeUnavailable(RegGroup::kGp, Gp::kIdFp); + } makeUnavailable(RegGroup::kGp, Gp::kIdSp); makeUnavailable(RegGroup::kGp, Gp::kIdOs); // OS-specific use, usually TLS. @@ -663,13 +711,17 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no Support::BitWordIterator useIt(tiedReg->useRewriteMask()); uint32_t useId = tiedReg->useId(); - while (useIt.hasNext()) + + while (useIt.hasNext()) { inst->rewriteIdAtIndex(useIt.next(), useId); + } Support::BitWordIterator outIt(tiedReg->outRewriteMask()); uint32_t outId = tiedReg->outId(); - while (outIt.hasNext()) + + while (outIt.hasNext()) { inst->rewriteIdAtIndex(outIt.next(), outId); + } } // This data is allocated by Zone passed to `runOnFunction()`, which @@ -703,8 +755,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no BaseMem& mem = op.as(); if (mem.isRegHome()) { uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId()); - if (ASMJIT_UNLIKELY(virtIndex >= virtCount)) + if (ASMJIT_UNLIKELY(virtIndex >= virtCount)) { return DebugUtils::errored(kErrorInvalidVirtId); + } VirtReg* virtReg = cc()->virtRegByIndex(virtIndex); RAWorkReg* workReg = virtReg->workReg(); @@ -730,8 +783,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no inst->setOp(1, Imm(offset)); } else { - if (mem.hasIndex()) + if (mem.hasIndex()) { return DebugUtils::errored(kErrorInvalidAddressIndex); + } GpX dst(inst->op(0).as().id()); GpX base(mem.baseId()); @@ -775,8 +829,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no // ================================ Error ARMRAPass::updateStackFrame() noexcept { - if (_func->frame().hasFuncCalls()) + if (_func->frame().hasFuncCalls()) { _func->frame().addDirtyRegs(RegGroup::kGp, Support::bitMask(Gp::kIdLr)); + } return BaseRAPass::updateStackFrame(); } diff --git a/src/asmjit/arm/a64rapass_p.h b/src/asmjit/arm/a64rapass_p.h index 7313087..f4ccdd1 100644 --- a/src/asmjit/arm/a64rapass_p.h +++ b/src/asmjit/arm/a64rapass_p.h @@ -29,10 +29,15 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64) class ARMRAPass : public BaseRAPass { public: ASMJIT_NONCOPYABLE(ARMRAPass) - typedef BaseRAPass Base; + using Base = BaseRAPass; + + //! \name Members + //! \{ EmitHelper _emitHelper; + //! \} + //! \name Construction & Destruction //! \{ @@ -45,9 +50,11 @@ public: //! \{ //! Returns the compiler casted to `arm::Compiler`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast(_cb); } //! Returns emit helper. + [[nodiscard]] ASMJIT_INLINE_NODEBUG EmitHelper* emitHelper() noexcept { return &_emitHelper; } //! \} diff --git a/src/asmjit/arm/armformatter.cpp b/src/asmjit/arm/armformatter.cpp index 3fe2c6b..79e1d62 100644 --- a/src/asmjit/arm/armformatter.cpp +++ b/src/asmjit/arm/armformatter.cpp @@ -334,10 +334,12 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister( ASMJIT_ASSERT(vReg != nullptr); const char* name = vReg->name(); - if (name && name[0] != '\0') + if (name && name[0] != '\0') { ASMJIT_PROPAGATE(sb.append(name)); - else + } + else { ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(rId)))); + } virtRegFormatted = true; } @@ -356,19 +358,22 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister( case RegType::kARM_VecD: case RegType::kARM_VecV: letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kARM_VecB)]; - if (elementType) + if (elementType) { letter = 'v'; + } break; case RegType::kARM_GpW: if (Environment::is64Bit(arch)) { letter = 'w'; - if (rId == a64::Gp::kIdZr) + if (rId == a64::Gp::kIdZr) { return sb.append("wzr", 3); + } - if (rId == a64::Gp::kIdSp) + if (rId == a64::Gp::kIdSp) { return sb.append("wsp", 3); + } } else { letter = 'r'; @@ -377,17 +382,20 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister( case RegType::kARM_GpX: if (Environment::is64Bit(arch)) { - if (rId == a64::Gp::kIdZr) + if (rId == a64::Gp::kIdZr) { return sb.append("xzr", 3); - if (rId == a64::Gp::kIdSp) + } + + if (rId == a64::Gp::kIdSp) { return sb.append("sp", 2); + } letter = 'x'; break; } // X registers are undefined in 32-bit mode. - ASMJIT_FALLTHROUGH; + [[fallthrough]]; default: ASMJIT_PROPAGATE(sb.appendFormat("?%u", uint32_t(regType), rId)); @@ -445,8 +453,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegisterList( count++; } while (rMask & mask); - if (!first) + if (!first) { ASMJIT_PROPAGATE(sb.append(", ")); + } ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, regType, start, 0, 0xFFFFFFFFu)); if (count >= 2u) { @@ -477,8 +486,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand( uint32_t elementType = op._signature.getField(); uint32_t elementIndex = op.as().elementIndex(); - if (!op.as().hasElementIndex()) + if (!op.as().hasElementIndex()) { elementIndex = 0xFFFFFFFFu; + } return formatRegister(sb, flags, emitter, arch, reg.type(), reg.id(), elementType, elementIndex); } @@ -524,8 +534,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand( int64_t off = int64_t(m.offset()); uint32_t base = 10; - if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9) + if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9) { base = 16; + } if (base == 10) { ASMJIT_PROPAGATE(sb.appendInt(off, base)); @@ -538,16 +549,19 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand( if (m.hasShift()) { ASMJIT_PROPAGATE(sb.append(' ')); - if (!m.isPreOrPost()) + if (!m.isPreOrPost()) { ASMJIT_PROPAGATE(formatShiftOp(sb, m.shiftOp())); + } ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift())); } - if (!m.isPostIndex()) + if (!m.isPostIndex()) { ASMJIT_PROPAGATE(sb.append(']')); + } - if (m.isPreIndex()) + if (m.isPreIndex()) { ASMJIT_PROPAGATE(sb.append('!')); + } return kErrorOk; } diff --git a/src/asmjit/arm/armoperand.h b/src/asmjit/arm/armoperand.h index 583a3d8..d18ac00 100644 --- a/src/asmjit/arm/armoperand.h +++ b/src/asmjit/arm/armoperand.h @@ -47,40 +47,40 @@ public: ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg) //! Gets whether the register is either `R` or `W` register (32-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpR() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isGpR() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is either `R` or `W` register (32-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpW() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isGpW() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is an `X` register (64-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpX() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isGpX() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a VEC-B register (8-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isVecB() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVecB() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a VEC-H register (16-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isVecH() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVecH() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a VEC-S register (32-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isVecS() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVecS() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a VEC-D register (64-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isVecD() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVecD() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a VEC-Q register (128-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isVecQ() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVecQ() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is either VEC-D (64-bit) or VEC-Q (128-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; } + ASMJIT_INLINE_CONSTEXPR bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; } //! Gets whether the register is a VEC-V register (128-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isVecV() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVecV() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is an 8-bit vector register or view, alias if \ref isVecB(). - ASMJIT_INLINE_NODEBUG constexpr bool isVec8() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVec8() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a 16-bit vector register or view, alias if \ref isVecH(). - ASMJIT_INLINE_NODEBUG constexpr bool isVec16() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVec16() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a 32-bit vector register or view, alias if \ref isVecS(). - ASMJIT_INLINE_NODEBUG constexpr bool isVec32() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVec32() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a 64-bit vector register or view, alias if \ref isVecD(). - ASMJIT_INLINE_NODEBUG constexpr bool isVec64() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVec64() const noexcept { return baseSignature() == RegTraits::kSignature; } //! Gets whether the register is a 128-bit vector register or view, alias if \ref isVecQ(). - ASMJIT_INLINE_NODEBUG constexpr bool isVec128() const noexcept { return baseSignature() == RegTraits::kSignature; } + ASMJIT_INLINE_CONSTEXPR bool isVec128() const noexcept { return baseSignature() == RegTraits::kSignature; } template - ASMJIT_INLINE_NODEBUG void setRegT(uint32_t id) noexcept { + ASMJIT_INLINE_CONSTEXPR void setRegT(uint32_t id) noexcept { setSignature(RegTraits::kSignature); setId(id); } @@ -95,13 +95,13 @@ public: static ASMJIT_INLINE_NODEBUG OperandSignature signatureOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToSignature(type); } template - static ASMJIT_INLINE_NODEBUG RegGroup groupOfT() noexcept { return RegTraits::kGroup; } + static ASMJIT_INLINE_CONSTEXPR RegGroup groupOfT() noexcept { return RegTraits::kGroup; } template - static ASMJIT_INLINE_NODEBUG TypeId typeIdOfT() noexcept { return RegTraits::kTypeId; } + static ASMJIT_INLINE_CONSTEXPR TypeId typeIdOfT() noexcept { return RegTraits::kTypeId; } template - static ASMJIT_INLINE_NODEBUG OperandSignature signatureOfT() noexcept { return OperandSignature{RegTraits::kSignature}; } + static ASMJIT_INLINE_CONSTEXPR OperandSignature signatureOfT() noexcept { return OperandSignature{RegTraits::kSignature}; } static ASMJIT_INLINE_NODEBUG bool isGpW(const Operand_& op) noexcept { return op.as().isGpW(); } static ASMJIT_INLINE_NODEBUG bool isGpX(const Operand_& op) noexcept { return op.as().isGpX(); } @@ -146,16 +146,16 @@ public: }; //! Returns whether the register has element index (it's an element index access). - ASMJIT_INLINE_NODEBUG constexpr bool hasElementIndex() const noexcept { return _signature.hasField(); } + ASMJIT_INLINE_CONSTEXPR bool hasElementIndex() const noexcept { return _signature.hasField(); } //! Returns element index of the register. - ASMJIT_INLINE_NODEBUG constexpr uint32_t elementIndex() const noexcept { return _signature.getField(); } + ASMJIT_INLINE_CONSTEXPR uint32_t elementIndex() const noexcept { return _signature.getField(); } //! Sets element index of the register to `elementType`. - ASMJIT_INLINE_NODEBUG void setElementIndex(uint32_t elementIndex) noexcept { + ASMJIT_INLINE_CONSTEXPR void setElementIndex(uint32_t elementIndex) noexcept { _signature |= kSignatureRegElementFlagMask; _signature.setField(elementIndex); } //! Resets element index of the register. - ASMJIT_INLINE_NODEBUG void resetElementIndex() noexcept { + ASMJIT_INLINE_CONSTEXPR void resetElementIndex() noexcept { _signature &= ~(kSignatureRegElementFlagMask | kSignatureRegElementIndexMask); } }; @@ -187,35 +187,35 @@ public: //! \{ //! Construct a default `Mem` operand, that points to [0]. - ASMJIT_INLINE_NODEBUG constexpr Mem() noexcept + ASMJIT_INLINE_CONSTEXPR Mem() noexcept : BaseMem() {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const Mem& other) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const Mem& other) noexcept : BaseMem(other) {} ASMJIT_INLINE_NODEBUG explicit Mem(Globals::NoInit_) noexcept : BaseMem(Globals::NoInit) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept : BaseMem(signature, baseId, indexId, offset) {} - ASMJIT_INLINE_NODEBUG constexpr explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(RegType::kLabelTag) | signature, base.id(), 0, off) {} - ASMJIT_INLINE_NODEBUG constexpr explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(base.type()) | signature, base.id(), 0, off) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(base.type()) | Signature::fromMemIndexType(index.type()) | signature, base.id(), index.id(), 0) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(base.type()) | Signature::fromMemIndexType(index.type()) | @@ -223,7 +223,7 @@ public: Signature::fromValue(shift.value()) | signature, base.id(), index.id(), 0) {} - ASMJIT_INLINE_NODEBUG constexpr explicit Mem(uint64_t base, Signature signature = Signature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR explicit Mem(uint64_t base, Signature signature = Signature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {} @@ -232,7 +232,10 @@ public: //! \name Overloaded Operators //! \{ - ASMJIT_INLINE_NODEBUG Mem& operator=(const Mem& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR Mem& operator=(const Mem& other) noexcept { + copyFrom(other); + return *this; + } //! \} @@ -240,24 +243,24 @@ public: //! \{ //! Clones the memory operand. - ASMJIT_INLINE_NODEBUG constexpr Mem clone() const noexcept { return Mem(*this); } + ASMJIT_INLINE_CONSTEXPR Mem clone() const noexcept { return Mem(*this); } //! Gets new memory operand adjusted by `off`. - ASMJIT_INLINE_NODEBUG Mem cloneAdjusted(int64_t off) const noexcept { + ASMJIT_INLINE_CONSTEXPR Mem cloneAdjusted(int64_t off) const noexcept { Mem result(*this); result.addOffset(off); return result; } //! Clones the memory operand and makes it pre-index. - ASMJIT_INLINE_NODEBUG Mem pre() const noexcept { + ASMJIT_INLINE_CONSTEXPR Mem pre() const noexcept { Mem result(*this); result.setOffsetMode(OffsetMode::kPreIndex); return result; } //! Clones the memory operand, applies a given offset `off` and makes it pre-index. - ASMJIT_INLINE_NODEBUG Mem pre(int64_t off) const noexcept { + ASMJIT_INLINE_CONSTEXPR Mem pre(int64_t off) const noexcept { Mem result(*this); result.setOffsetMode(OffsetMode::kPreIndex); result.addOffset(off); @@ -265,14 +268,14 @@ public: } //! Clones the memory operand and makes it post-index. - ASMJIT_INLINE_NODEBUG Mem post() const noexcept { + ASMJIT_INLINE_CONSTEXPR Mem post() const noexcept { Mem result(*this); result.setOffsetMode(OffsetMode::kPostIndex); return result; } //! Clones the memory operand, applies a given offset `off` and makes it post-index. - ASMJIT_INLINE_NODEBUG Mem post(int64_t off) const noexcept { + ASMJIT_INLINE_CONSTEXPR Mem post(int64_t off) const noexcept { Mem result(*this); result.setOffsetMode(OffsetMode::kPostIndex); result.addOffset(off); @@ -296,12 +299,12 @@ public: using BaseMem::setIndex; - ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index, uint32_t shift) noexcept { + ASMJIT_INLINE_CONSTEXPR void setIndex(const BaseReg& index, uint32_t shift) noexcept { setIndex(index); setShift(shift); } - ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index, Shift shift) noexcept { + ASMJIT_INLINE_CONSTEXPR void setIndex(const BaseReg& index, Shift shift) noexcept { setIndex(index); setShift(shift); } @@ -312,48 +315,48 @@ public: //! \{ //! Gets offset mode. - ASMJIT_INLINE_NODEBUG constexpr OffsetMode offsetMode() const noexcept { return OffsetMode(_signature.getField()); } + ASMJIT_INLINE_CONSTEXPR OffsetMode offsetMode() const noexcept { return OffsetMode(_signature.getField()); } //! Sets offset mode to `mode`. - ASMJIT_INLINE_NODEBUG void setOffsetMode(OffsetMode mode) noexcept { _signature.setField(uint32_t(mode)); } + ASMJIT_INLINE_CONSTEXPR void setOffsetMode(OffsetMode mode) noexcept { _signature.setField(uint32_t(mode)); } //! Resets offset mode to default (fixed offset, without write-back). - ASMJIT_INLINE_NODEBUG void resetOffsetMode() noexcept { _signature.setField(uint32_t(OffsetMode::kFixed)); } + ASMJIT_INLINE_CONSTEXPR void resetOffsetMode() noexcept { _signature.setField(uint32_t(OffsetMode::kFixed)); } //! Tests whether the current memory offset mode is fixed (see \ref OffsetMode::kFixed). - ASMJIT_INLINE_NODEBUG constexpr bool isFixedOffset() const noexcept { return offsetMode() == OffsetMode::kFixed; } + ASMJIT_INLINE_CONSTEXPR bool isFixedOffset() const noexcept { return offsetMode() == OffsetMode::kFixed; } //! Tests whether the current memory offset mode is either pre-index or post-index (write-back is used). - ASMJIT_INLINE_NODEBUG constexpr bool isPreOrPost() const noexcept { return offsetMode() != OffsetMode::kFixed; } + ASMJIT_INLINE_CONSTEXPR bool isPreOrPost() const noexcept { return offsetMode() != OffsetMode::kFixed; } //! Tests whether the current memory offset mode is pre-index (write-back is used). - ASMJIT_INLINE_NODEBUG constexpr bool isPreIndex() const noexcept { return offsetMode() == OffsetMode::kPreIndex; } + ASMJIT_INLINE_CONSTEXPR bool isPreIndex() const noexcept { return offsetMode() == OffsetMode::kPreIndex; } //! Tests whether the current memory offset mode is post-index (write-back is used). - ASMJIT_INLINE_NODEBUG constexpr bool isPostIndex() const noexcept { return offsetMode() == OffsetMode::kPostIndex; } + ASMJIT_INLINE_CONSTEXPR bool isPostIndex() const noexcept { return offsetMode() == OffsetMode::kPostIndex; } //! Sets offset mode of this memory operand to pre-index (write-back is used). - ASMJIT_INLINE_NODEBUG void makePreIndex() noexcept { setOffsetMode(OffsetMode::kPreIndex); } + ASMJIT_INLINE_CONSTEXPR void makePreIndex() noexcept { setOffsetMode(OffsetMode::kPreIndex); } //! Sets offset mode of this memory operand to post-index (write-back is used). - ASMJIT_INLINE_NODEBUG void makePostIndex() noexcept { setOffsetMode(OffsetMode::kPostIndex); } + ASMJIT_INLINE_CONSTEXPR void makePostIndex() noexcept { setOffsetMode(OffsetMode::kPostIndex); } //! Gets shift operation that is used by index register. - ASMJIT_INLINE_NODEBUG constexpr ShiftOp shiftOp() const noexcept { return ShiftOp(_signature.getField()); } + ASMJIT_INLINE_CONSTEXPR ShiftOp shiftOp() const noexcept { return ShiftOp(_signature.getField()); } //! Sets shift operation that is used by index register. - ASMJIT_INLINE_NODEBUG void setShiftOp(ShiftOp sop) noexcept { _signature.setField(uint32_t(sop)); } + ASMJIT_INLINE_CONSTEXPR void setShiftOp(ShiftOp sop) noexcept { _signature.setField(uint32_t(sop)); } //! Resets shift operation that is used by index register to LSL (default value). - ASMJIT_INLINE_NODEBUG void resetShiftOp() noexcept { _signature.setField(uint32_t(ShiftOp::kLSL)); } + ASMJIT_INLINE_CONSTEXPR void resetShiftOp() noexcept { _signature.setField(uint32_t(ShiftOp::kLSL)); } //! Gets whether the memory operand has shift (aka scale) constant. - ASMJIT_INLINE_NODEBUG constexpr bool hasShift() const noexcept { return _signature.hasField(); } + ASMJIT_INLINE_CONSTEXPR bool hasShift() const noexcept { return _signature.hasField(); } //! Gets the memory operand's shift (aka scale) constant. - ASMJIT_INLINE_NODEBUG constexpr uint32_t shift() const noexcept { return _signature.getField(); } + ASMJIT_INLINE_CONSTEXPR uint32_t shift() const noexcept { return _signature.getField(); } //! Sets the memory operand's shift (aka scale) constant. - ASMJIT_INLINE_NODEBUG void setShift(uint32_t shift) noexcept { _signature.setField(shift); } + ASMJIT_INLINE_CONSTEXPR void setShift(uint32_t shift) noexcept { _signature.setField(shift); } //! Sets the memory operand's shift and shift operation. - ASMJIT_INLINE_NODEBUG void setShift(Shift shift) noexcept { + ASMJIT_INLINE_CONSTEXPR void setShift(Shift shift) noexcept { _signature.setField(uint32_t(shift.op())); _signature.setField(shift.value()); } //! Resets the memory operand's shift (aka scale) constant to zero. - ASMJIT_INLINE_NODEBUG void resetShift() noexcept { _signature.setField(0); } + ASMJIT_INLINE_CONSTEXPR void resetShift() noexcept { _signature.setField(0); } //! \} }; @@ -362,17 +365,17 @@ public: //! \{ //! Constructs a `LSL #value` shift (logical shift left). -static ASMJIT_INLINE_NODEBUG constexpr Shift lsl(uint32_t value) noexcept { return Shift(ShiftOp::kLSL, value); } +static ASMJIT_INLINE_CONSTEXPR Shift lsl(uint32_t value) noexcept { return Shift(ShiftOp::kLSL, value); } //! Constructs a `LSR #value` shift (logical shift right). -static ASMJIT_INLINE_NODEBUG constexpr Shift lsr(uint32_t value) noexcept { return Shift(ShiftOp::kLSR, value); } +static ASMJIT_INLINE_CONSTEXPR Shift lsr(uint32_t value) noexcept { return Shift(ShiftOp::kLSR, value); } //! Constructs a `ASR #value` shift (arithmetic shift right). -static ASMJIT_INLINE_NODEBUG constexpr Shift asr(uint32_t value) noexcept { return Shift(ShiftOp::kASR, value); } +static ASMJIT_INLINE_CONSTEXPR Shift asr(uint32_t value) noexcept { return Shift(ShiftOp::kASR, value); } //! Constructs a `ROR #value` shift (rotate right). -static ASMJIT_INLINE_NODEBUG constexpr Shift ror(uint32_t value) noexcept { return Shift(ShiftOp::kROR, value); } +static ASMJIT_INLINE_CONSTEXPR Shift ror(uint32_t value) noexcept { return Shift(ShiftOp::kROR, value); } //! Constructs a `RRX` shift (rotate with carry by 1). -static ASMJIT_INLINE_NODEBUG constexpr Shift rrx() noexcept { return Shift(ShiftOp::kRRX, 0); } +static ASMJIT_INLINE_CONSTEXPR Shift rrx() noexcept { return Shift(ShiftOp::kRRX, 0); } //! Constructs a `MSL #value` shift (logical shift left filling ones). -static ASMJIT_INLINE_NODEBUG constexpr Shift msl(uint32_t value) noexcept { return Shift(ShiftOp::kMSL, value); } +static ASMJIT_INLINE_CONSTEXPR Shift msl(uint32_t value) noexcept { return Shift(ShiftOp::kMSL, value); } //! \} @@ -385,7 +388,7 @@ static ASMJIT_INLINE_NODEBUG constexpr Shift msl(uint32_t value) noexcept { retu //! Absolute memory operands can only be used if it's known that the PC relative offset is encodable and that it //! would be within the limits. Absolute address is also often output from disassemblers, so AsmJit supports it to //! make it possible to assemble such output back. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(uint64_t base) noexcept { return Mem(base); } +static ASMJIT_INLINE_CONSTEXPR Mem ptr(uint64_t base) noexcept { return Mem(base); } //! \} diff --git a/src/asmjit/arm/armutils.h b/src/asmjit/arm/armutils.h index 8241eda..ac3441e 100644 --- a/src/asmjit/arm/armutils.h +++ b/src/asmjit/arm/armutils.h @@ -18,7 +18,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(arm) namespace Utils { //! Encodes a 12-bit immediate part of opcode that ise used by a standard 32-bit ARM encoding. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline bool encodeAArch32Imm(uint64_t imm, uint32_t* encodedImmOut) noexcept { if (imm & 0xFFFFFFFF00000000u) return false; @@ -73,7 +73,7 @@ struct LogicalImm { //! | 0 | 11110s | .....r | 2 | //! +---+--------+--------+------+ //! ``` -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noexcept { // Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits. do { @@ -121,7 +121,7 @@ static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noex //! Returns true if the given `imm` value is encodable as a logical immediate. The `width` argument describes the //! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate //! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noexcept { LogicalImm dummy; return encodeLogicalImm(imm, width, &dummy); @@ -129,7 +129,7 @@ static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noe //! Returns true if the given `imm` value is encodable as an immediate with `add` and `sub` instructions on AArch64. //! These two instructions can encode 12-bit immediate value optionally shifted left by 12 bits. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_INLINE_NODEBUG bool isAddSubImm(uint64_t imm) noexcept { return imm <= 0xFFFu || (imm & ~uint64_t(0xFFFu << 12)) == 0; } @@ -153,7 +153,7 @@ static ASMJIT_INLINE_NODEBUG uint32_t encodeImm64ByteMaskToImm8(uint64_t imm) no //! \cond //! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8. template -static ASMJIT_FORCE_INLINE bool isFPImm8Generic(T val) noexcept { +static ASMJIT_INLINE bool isFPImm8Generic(T val) noexcept { constexpr uint32_t kAllBsMask = Support::lsbMask(kNumBBits); constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1); constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern; diff --git a/src/asmjit/asmjit.h b/src/asmjit/asmjit.h index f5184eb..5d22d67 100644 --- a/src/asmjit/asmjit.h +++ b/src/asmjit/asmjit.h @@ -24,6 +24,8 @@ #ifndef ASMJIT_ASMJIT_H_INCLUDED #define ASMJIT_ASMJIT_H_INCLUDED +#pragma message("asmjit/asmjit.h is deprecated! Please use asmjit/core.h, asmjit/x86.h, or asmjit/a64.h") + #include "./core.h" #ifndef ASMJIT_NO_X86 diff --git a/src/asmjit/core.h b/src/asmjit/core.h index 758f26f..d643104 100644 --- a/src/asmjit/core.h +++ b/src/asmjit/core.h @@ -102,21 +102,21 @@ namespace asmjit { //! //! - Requirements: //! -//! - AsmJit won't build without C++11 enabled. If you use older GCC or Clang you would have to enable at least -//! C++11 standard through compiler flags. +//! - AsmJit won't build without C++17 enabled. If you use older GCC or Clang you would have to enable at least +//! C++17 standard through compiler flags. //! //! - Tested: //! //! - **Clang** - Tested by GitHub Actions - Clang 10+ is officially supported and tested by CI, older Clang versions -//! having C++11 should work, but are not tested anymore due to upgraded CI images. +//! having C++17 should work, but these versions are not tested anymore due to upgraded CI images. //! -//! - **GNU** - Tested by GitHub Actions - GCC 7+ is officially supported, older GCC versions from 4.8+ having C++11 -//! enabled should also work, but are not tested anymore due to upgraded CI images. +//! - **GNU** - Tested by GitHub Actions - GCC 9+ is officially supported and tested by CI, older GCC versions such +//! as GCC 7 should work, but these versions are not tested anymore due to upgraded CI images. //! -//! - **MINGW** - Reported to work, but not tested in our CI environment (help welcome). +//! - **MINGW** - Reported to work, but not tested in our CI environment (help welcome!). //! -//! - **MSVC** - Tested by GitHub Actions - VS2019+ is officially supported, VS2015 and VS2017 is reported to work, -//! but not tested by CI anymore. +//! - **MSVC** - Tested by GitHub Actions - VS2019 and onwards are officially supported and tested by CI, VS2015 and +//! VS2017 are not tested anymore due to upgraded CI images. //! //! ### Supported Operating Systems and Platforms //! @@ -173,7 +173,7 @@ namespace asmjit { //! cmake_minimum_required(VERSION 3.30) //! //! project(asmjit_consumer C CXX) # Both C and CXX are required. -//! set(CMAKE_CXX_STANDARD 17) # C++11 and never is supported. +//! set(CMAKE_CXX_STANDARD 17) # C++17 and never is supported. //! //! set(ASMJIT_DIR "3rdparty/asmjit") # Location of AsmJit. //! set(ASMJIT_STATIC TRUE) # Force static build. @@ -201,12 +201,12 @@ namespace asmjit { //! //! \section build_backends AsmJit Backends //! -//! AsmJit currently supports only X86/X64 backend, but the plan is to add more backends in the future. By default -//! AsmJit builds only the host backend, which is auto-detected at compile-time, but this can be overridden. +//! All backends AsmJit supports are included by default. To exclude a backend use the following build-type macros: //! //! - \ref ASMJIT_NO_X86 - Disables both X86 and X86_64 backends. //! - \ref ASMJIT_NO_AARCH64 - Disables AArch64 backend. //! - \ref ASMJIT_NO_FOREIGN - Disables the support for foreign architecture backends, only keeps a native backend. +//! For example if your target is X86, `ASMJIT_NO_FOREIGN` would disable every backend but X86. //! //! \section build_options Build Options //! @@ -268,14 +268,24 @@ namespace asmjit { //! - Visit our [Public Gitter Chat](https://app.gitter.im/#/room/#asmjit:gitter.im) if you need a quick help. //! //! - Build AsmJit with `ASMJIT_NO_DEPRECATED` macro defined to make sure that you are not using deprecated -//! functionality at all. Deprecated functions are decorated with `ASMJIT_DEPRECATED()` macro, but sometimes +//! functionality at all. Deprecated functions are decorated with `[[deprecated]]` attribute, but sometimes //! it's not possible to decorate everything like classes, which are used by deprecated functions as well, //! because some compilers would warn about that. If your project compiles fine with `ASMJIT_NO_DEPRECATED` //! it's not using anything, which was deprecated. //! //! \section api_changes API Changes //! -//! ### Changes committed at XXXX-XX-XX +//! ### Changes committed at 2025-05-24 +//! +//! Core changes: +//! +//! - AsmJit now requires C++17 to compile. +//! +//! - Deprecated asmjit/asmjit.h header. Use asmjit/core.h to include everything except backend specific stuff, +//! and asmjit/x86.h or asmjit/a64.h to include tools of a specific architecture. At this time the asmjit.h +//! header is just deprecated, so it will still work as it used to for some time. +//! +//! ### Changes committed at 2025-05-10 //! //! Core changes: //! @@ -565,7 +575,7 @@ namespace asmjit { //! using namespace asmjit; //! //! // Signature of the generated function. -//! typedef int (*Func)(void); +//! using Func = int (*)(void); //! //! int main() { //! JitRuntime rt; // Runtime specialized for JIT code execution. @@ -712,7 +722,7 @@ namespace asmjit { //! //! using namespace asmjit; //! -//! typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b); +//! using SumIntsFunc = void (*)(int* dst, const int* a, const int* b); //! //! int main() { //! // Create a custom environment that matches the current host environment. diff --git a/src/asmjit/core/api-config.h b/src/asmjit/core/api-config.h index 0b603fd..762692e 100644 --- a/src/asmjit/core/api-config.h +++ b/src/asmjit/core/api-config.h @@ -16,7 +16,7 @@ #define ASMJIT_LIBRARY_MAKE_VERSION(major, minor, patch) ((major << 16) | (minor << 8) | (patch)) //! AsmJit library version, see \ref ASMJIT_LIBRARY_MAKE_VERSION for a version format reference. -#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 15, 0) +#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 16, 0) //! \def ASMJIT_ABI_NAMESPACE //! @@ -27,7 +27,7 @@ //! AsmJit default, which makes it possible to use multiple AsmJit libraries within a single project, totally //! controlled by users. This is useful especially in cases in which some of such library comes from third party. #if !defined(ASMJIT_ABI_NAMESPACE) - #define ASMJIT_ABI_NAMESPACE v1_15 + #define ASMJIT_ABI_NAMESPACE v1_16 #endif // !ASMJIT_ABI_NAMESPACE //! \} @@ -287,27 +287,26 @@ namespace asmjit { //! is exported. However, GCC has some strange behavior that even if one or more symbol is exported it doesn't export //! typeinfo unless the class itself is decorated with "visibility(default)" (i.e. ASMJIT_API). -//! \def ASMJIT_FORCE_INLINE +//! \def ASMJIT_INLINE //! //! Decorator to force inlining of functions, uses either `__attribute__((__always_inline__))` or __forceinline, //! depending on C++ compiler. //! \def ASMJIT_INLINE_NODEBUG //! -//! Like \ref ASMJIT_FORCE_INLINE, but uses additionally `__nodebug__` or `__artificial__` attribute to make the +//! Like \ref ASMJIT_INLINE, but uses additionally `__nodebug__` or `__artificial__` attribute to make the //! debugging of some AsmJit functions easier, especially getters and one-line abstractions where usually you don't //! want to step in. +//! \def ASMJIT_INLINE_CONSTEXPR +//! +//! Like \ref ASMJIT_INLINE_NODEBUG, but having an additional `constexpr` attribute. + //! \def ASMJIT_NOINLINE //! //! Decorator to avoid inlining of functions, uses either `__attribute__((__noinline__))` or `__declspec(noinline)` //! depending on C++ compiler. -//! \def ASMJIT_NORETURN -//! -//! Decorator that marks functions that should never return. Typically used to implement assertion handlers that -//! terminate, so the function never returns. - //! \def ASMJIT_CDECL //! //! CDECL function attribute - either `__attribute__((__cdecl__))` or `__cdecl`. @@ -371,11 +370,11 @@ namespace asmjit { // Function attributes. #if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) - #define ASMJIT_FORCE_INLINE inline __attribute__((__always_inline__)) + #define ASMJIT_INLINE inline __attribute__((__always_inline__)) #elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER) - #define ASMJIT_FORCE_INLINE __forceinline + #define ASMJIT_INLINE __forceinline #else - #define ASMJIT_FORCE_INLINE inline + #define ASMJIT_INLINE inline #endif @@ -387,15 +386,14 @@ namespace asmjit { #define ASMJIT_INLINE_NODEBUG inline #endif +#define ASMJIT_INLINE_CONSTEXPR constexpr ASMJIT_INLINE_NODEBUG + #if defined(__GNUC__) #define ASMJIT_NOINLINE __attribute__((__noinline__)) - #define ASMJIT_NORETURN __attribute__((__noreturn__)) #elif defined(_MSC_VER) #define ASMJIT_NOINLINE __declspec(noinline) - #define ASMJIT_NORETURN __declspec(noreturn) #else #define ASMJIT_NOINLINE - #define ASMJIT_NORETURN #endif // Calling conventions. @@ -424,7 +422,7 @@ namespace asmjit { #define ASMJIT_VECTORCALL #endif -// Type alignment (not allowed by C++11 'alignas' keyword). +// Type alignment (not allowed by C++17 'alignas' keyword). #if defined(__GNUC__) #define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE #elif defined(_MSC_VER) @@ -442,35 +440,15 @@ namespace asmjit { #define ASMJIT_MAY_ALIAS #endif -//! \def ASMJIT_MAYBE_UNUSED -//! -//! Expands to `[[maybe_unused]]` if supported or a compiler attribute instead. -#if __cplusplus >= 201703L - #define ASMJIT_MAYBE_UNUSED [[maybe_unused]] -#elif defined(__GNUC__) - #define ASMJIT_MAYBE_UNUSED __attribute__((unused)) -#else - #define ASMJIT_MAYBE_UNUSED -#endif - -#if defined(__clang_major__) && __clang_major__ >= 4 && !defined(_DOXYGEN) - // NOTE: Clang allows to apply this attribute to function arguments, which is what we want. Once GCC decides to - // support this use, we will enable it for GCC as well. However, until that, it will be clang only, which is - // what we need for static analysis. +#if defined(__clang__) && !defined(_DOXYGEN) + // NOTE: Clang allows to apply this attribute to function arguments, which is what we want. Once GCC decides + // to support this use, we will enable it for GCC as well. However, until that, it will be clang only, which + // is what we need for static analysis. #define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT __attribute__((__nonnull__)) #else #define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT #endif -//! \def ASMJIT_NOEXCEPT_TYPE -//! -//! Defined to `noexcept` in C++17 mode or nothing otherwise. Used by function typedefs. -#if __cplusplus >= 201703L - #define ASMJIT_NOEXCEPT_TYPE noexcept -#else - #define ASMJIT_NOEXCEPT_TYPE -#endif - //! \def ASMJIT_ASSUME(...) //! //! Macro that tells the C/C++ compiler that the expression `...` evaluates to true. @@ -504,35 +482,13 @@ namespace asmjit { #define ASMJIT_UNLIKELY(...) (__VA_ARGS__) #endif -//! \def ASMJIT_FALLTHROUGH -//! -//! Portable [[fallthrough]] attribute. -#if defined(__clang__) && __cplusplus >= 201103L - #define ASMJIT_FALLTHROUGH [[clang::fallthrough]] -#elif defined(__GNUC__) && __GNUC__ >= 7 - #define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__)) -#else - #define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */ -#endif - -//! \def ASMJIT_DEPRECATED -//! -//! Marks function, class, struct, enum, or anything else as deprecated. -#if defined(__GNUC__) - #define ASMJIT_DEPRECATED(MESSAGE) __attribute__((__deprecated__(MESSAGE))) -#elif defined(_MSC_VER) - #define ASMJIT_DEPRECATED(MESSAGE) __declspec(deprecated(MESSAGE)) -#else - #define ASMJIT_DEPRECATED(MESSAGE) -#endif - // Utilities. #define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100) #define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0])) #if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0) #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined"))) -#elif defined(__GNUC__) && __GNUC__ >= 5 +#elif defined(__GNUC__) #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__)) #else #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF @@ -541,25 +497,14 @@ namespace asmjit { // Diagnostic Macros // ====================================== -#if !defined(__clang__) && !defined(__INTEL_COMPILER) && !defined(_DOXYGEN) - #if defined(__GNUC__) && __GNUC__ == 4 - // There is a bug in GCC 4.X that has been fixed in GCC 5+, so just silence the warning. - #define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ - _Pragma("GCC diagnostic push") \ - _Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") - #define ASMJIT_END_DIAGNOSTIC_SCOPE \ - _Pragma("GCC diagnostic pop") - #elif defined(_MSC_VER) - #define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ - __pragma(warning(push)) \ - __pragma(warning(disable: 4127)) /* conditional expression is const */ \ - __pragma(warning(disable: 4201)) /* nameless struct/union */ - #define ASMJIT_END_DIAGNOSTIC_SCOPE \ - __pragma(warning(pop)) - #endif -#endif - -#if !defined(ASMJIT_BEGIN_DIAGNOSTIC_SCOPE) && !defined(ASMJIT_END_DIAGNOSTIC_SCOPE) +#if defined(_MSC_VER) && !defined(__clang__) && !defined(_DOXYGEN) + #define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ + __pragma(warning(push)) \ + __pragma(warning(disable: 4127)) /* conditional expression is const */ \ + __pragma(warning(disable: 4201)) /* nameless struct/union */ + #define ASMJIT_END_DIAGNOSTIC_SCOPE \ + __pragma(warning(pop)) +#else #define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE #define ASMJIT_END_DIAGNOSTIC_SCOPE #endif @@ -568,19 +513,19 @@ namespace asmjit { // ====================================== #if !defined(ASMJIT_NO_ABI_NAMESPACE) && !defined(_DOXYGEN) - #define ASMJIT_BEGIN_NAMESPACE \ - ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ - namespace asmjit { \ + #define ASMJIT_BEGIN_NAMESPACE \ + ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ + namespace asmjit { \ inline namespace ASMJIT_ABI_NAMESPACE { - #define ASMJIT_END_NAMESPACE \ - }} \ + #define ASMJIT_END_NAMESPACE \ + }} \ ASMJIT_END_DIAGNOSTIC_SCOPE #else - #define ASMJIT_BEGIN_NAMESPACE \ - ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ + #define ASMJIT_BEGIN_NAMESPACE \ + ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ namespace asmjit { - #define ASMJIT_END_NAMESPACE \ - } \ + #define ASMJIT_END_NAMESPACE \ + } \ ASMJIT_END_DIAGNOSTIC_SCOPE #endif @@ -590,13 +535,13 @@ namespace asmjit { // C++ Utilities // ============= -#define ASMJIT_NONCOPYABLE(Type) \ - Type(const Type& other) = delete; \ +#define ASMJIT_NONCOPYABLE(Type) \ + Type(const Type& other) = delete; \ Type& operator=(const Type& other) = delete; -#define ASMJIT_NONCONSTRUCTIBLE(Type) \ - Type() = delete; \ - Type(const Type& other) = delete; \ +#define ASMJIT_NONCONSTRUCTIBLE(Type) \ + Type() = delete; \ + Type(const Type& other) = delete; \ Type& operator=(const Type& other) = delete; //! \def ASMJIT_DEFINE_ENUM_FLAGS(T) @@ -605,38 +550,32 @@ namespace asmjit { #ifdef _DOXYGEN #define ASMJIT_DEFINE_ENUM_FLAGS(T) #else - #define ASMJIT_DEFINE_ENUM_FLAGS(T) \ - static ASMJIT_INLINE_NODEBUG constexpr T operator~(T a) noexcept { \ - return T(~(std::underlying_type::type)(a)); \ - } \ - \ - static ASMJIT_INLINE_NODEBUG constexpr T operator|(T a, T b) noexcept { \ - return T((std::underlying_type::type)(a) | \ - (std::underlying_type::type)(b)); \ - } \ - static ASMJIT_INLINE_NODEBUG constexpr T operator&(T a, T b) noexcept { \ - return T((std::underlying_type::type)(a) & \ - (std::underlying_type::type)(b)); \ - } \ - static ASMJIT_INLINE_NODEBUG constexpr T operator^(T a, T b) noexcept { \ - return T((std::underlying_type::type)(a) ^ \ - (std::underlying_type::type)(b)); \ - } \ - \ - static ASMJIT_INLINE_NODEBUG T& operator|=(T& a, T b) noexcept { \ - a = T((std::underlying_type::type)(a) | \ - (std::underlying_type::type)(b)); \ - return a; \ - } \ - static ASMJIT_INLINE_NODEBUG T& operator&=(T& a, T b) noexcept { \ - a = T((std::underlying_type::type)(a) & \ - (std::underlying_type::type)(b)); \ - return a; \ - } \ - static ASMJIT_INLINE_NODEBUG T& operator^=(T& a, T b) noexcept { \ - a = T((std::underlying_type::type)(a) ^ \ - (std::underlying_type::type)(b)); \ - return a; \ + #define ASMJIT_DEFINE_ENUM_FLAGS(T) \ + static ASMJIT_INLINE_CONSTEXPR T operator~(T a) noexcept { \ + return T(~std::underlying_type_t(a)); \ + } \ + \ + static ASMJIT_INLINE_CONSTEXPR T operator|(T a, T b) noexcept { \ + return T(std::underlying_type_t(a) | std::underlying_type_t(b)); \ + } \ + static ASMJIT_INLINE_CONSTEXPR T operator&(T a, T b) noexcept { \ + return T(std::underlying_type_t(a) & std::underlying_type_t(b)); \ + } \ + static ASMJIT_INLINE_CONSTEXPR T operator^(T a, T b) noexcept { \ + return T(std::underlying_type_t(a) ^ std::underlying_type_t(b)); \ + } \ + \ + static ASMJIT_INLINE_CONSTEXPR T& operator|=(T& a, T b) noexcept { \ + a = T(std::underlying_type_t(a) | std::underlying_type_t(b)); \ + return a; \ + } \ + static ASMJIT_INLINE_CONSTEXPR T& operator&=(T& a, T b) noexcept { \ + a = T(std::underlying_type_t(a) & std::underlying_type_t(b)); \ + return a; \ + } \ + static ASMJIT_INLINE_CONSTEXPR T& operator^=(T& a, T b) noexcept { \ + a = T(std::underlying_type_t(a) ^ std::underlying_type_t(b)); \ + return a; \ } #endif @@ -646,18 +585,18 @@ namespace asmjit { #if defined(_DOXYGEN) || (defined(_MSC_VER) && _MSC_VER <= 1900) #define ASMJIT_DEFINE_ENUM_COMPARE(T) #else - #define ASMJIT_DEFINE_ENUM_COMPARE(T) \ - static ASMJIT_INLINE_NODEBUG bool operator<(T a, T b) noexcept { \ - return (std::underlying_type::type)(a) < (std::underlying_type::type)(b); \ - } \ - static ASMJIT_INLINE_NODEBUG bool operator<=(T a, T b) noexcept { \ - return (std::underlying_type::type)(a) <= (std::underlying_type::type)(b); \ - } \ - static ASMJIT_INLINE_NODEBUG bool operator>(T a, T b) noexcept { \ - return (std::underlying_type::type)(a) > (std::underlying_type::type)(b); \ - } \ - static ASMJIT_INLINE_NODEBUG bool operator>=(T a, T b) noexcept { \ - return (std::underlying_type::type)(a) >= (std::underlying_type::type)(b); \ + #define ASMJIT_DEFINE_ENUM_COMPARE(T) \ + static ASMJIT_INLINE_CONSTEXPR bool operator<(T a, T b) noexcept { \ + return (std::underlying_type_t)(a) < (std::underlying_type_t)(b); \ + } \ + static ASMJIT_INLINE_CONSTEXPR bool operator<=(T a, T b) noexcept { \ + return (std::underlying_type_t)(a) <= (std::underlying_type_t)(b); \ + } \ + static ASMJIT_INLINE_CONSTEXPR bool operator>(T a, T b) noexcept { \ + return (std::underlying_type_t)(a) > (std::underlying_type_t)(b); \ + } \ + static ASMJIT_INLINE_CONSTEXPR bool operator>=(T a, T b) noexcept { \ + return (std::underlying_type_t)(a) >= (std::underlying_type_t)(b); \ } #endif diff --git a/src/asmjit/core/archcommons.h b/src/asmjit/core/archcommons.h index 2b47d17..41c96fc 100644 --- a/src/asmjit/core/archcommons.h +++ b/src/asmjit/core/archcommons.h @@ -101,9 +101,12 @@ static constexpr CondCode _reverseCondTable[] = { //! \endcond //! Reverses a condition code (reverses the corresponding operands of a comparison). -static ASMJIT_INLINE_NODEBUG constexpr CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; } + //! Negates a condition code. -static ASMJIT_INLINE_NODEBUG constexpr CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); } //! Memory offset mode. //! @@ -180,20 +183,24 @@ public: ASMJIT_INLINE_NODEBUG Shift() noexcept = default; //! Copy constructor (default) - ASMJIT_INLINE_NODEBUG constexpr Shift(const Shift& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR Shift(const Shift& other) noexcept = default; //! Constructs Shift from operation `op` and shift `value`. - ASMJIT_INLINE_NODEBUG constexpr Shift(ShiftOp op, uint32_t value) noexcept + ASMJIT_INLINE_CONSTEXPR Shift(ShiftOp op, uint32_t value) noexcept : _op(op), _value(value) {} //! Returns the shift operation. - ASMJIT_INLINE_NODEBUG constexpr ShiftOp op() const noexcept { return _op; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR ShiftOp op() const noexcept { return _op; } + //! Sets shift operation to `op`. ASMJIT_INLINE_NODEBUG void setOp(ShiftOp op) noexcept { _op = op; } //! Returns the shift amount. - ASMJIT_INLINE_NODEBUG constexpr uint32_t value() const noexcept { return _value; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t value() const noexcept { return _value; } + //! Sets shift amount to `value`. ASMJIT_INLINE_NODEBUG void setValue(uint32_t value) noexcept { _value = value; } }; diff --git a/src/asmjit/core/archtraits.cpp b/src/asmjit/core/archtraits.cpp index a15a00c..095c165 100644 --- a/src/asmjit/core/archtraits.cpp +++ b/src/asmjit/core/archtraits.cpp @@ -106,51 +106,64 @@ ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegSignature(Arch arch, TypeId typeId // TODO: Remove this, should never be used like this. // Passed RegType instead of TypeId? - if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue)) + if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue)) { typeId = archTraits.regTypeToTypeId(RegType(uint32_t(typeId))); + } - if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId))) + if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId))) { return DebugUtils::errored(kErrorInvalidTypeId); + } // First normalize architecture dependent types. if (TypeUtils::isAbstract(typeId)) { bool is32Bit = Environment::is32Bit(arch); - if (typeId == TypeId::kIntPtr) + if (typeId == TypeId::kIntPtr) { typeId = is32Bit ? TypeId::kInt32 : TypeId::kInt64; - else + } + else { typeId = is32Bit ? TypeId::kUInt32 : TypeId::kUInt64; + } } // Type size helps to construct all groups of registers. // TypeId is invalid if the size is zero. uint32_t size = TypeUtils::sizeOf(typeId); - if (ASMJIT_UNLIKELY(!size)) + if (ASMJIT_UNLIKELY(!size)) { return DebugUtils::errored(kErrorInvalidTypeId); + } - if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80)) + if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80)) { return DebugUtils::errored(kErrorInvalidUseOfF80); + } RegType regType = RegType::kNone; if (TypeUtils::isBetween(typeId, TypeId::_kBaseStart, TypeId::_kVec32Start)) { regType = archTraits._typeIdToRegType[uint32_t(typeId) - uint32_t(TypeId::_kBaseStart)]; if (regType == RegType::kNone) { - if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64) + if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64) { return DebugUtils::errored(kErrorInvalidUseOfGpq); - else + } + else { return DebugUtils::errored(kErrorInvalidTypeId); + } } } else { - if (size <= 8 && archTraits._regSignature[RegType::kVec64].isValid()) + if (size <= 8 && archTraits._regSignature[RegType::kVec64].isValid()) { regType = RegType::kVec64; - else if (size <= 16 && archTraits._regSignature[RegType::kVec128].isValid()) + } + else if (size <= 16 && archTraits._regSignature[RegType::kVec128].isValid()) { regType = RegType::kVec128; - else if (size == 32 && archTraits._regSignature[RegType::kVec256].isValid()) + } + else if (size == 32 && archTraits._regSignature[RegType::kVec256].isValid()) { regType = RegType::kVec256; - else if (archTraits._regSignature[RegType::kVec512].isValid()) + } + else if (archTraits._regSignature[RegType::kVec512].isValid()) { regType = RegType::kVec512; - else + } + else { return DebugUtils::errored(kErrorInvalidTypeId); + } } *typeIdOut = typeId; diff --git a/src/asmjit/core/archtraits.h b/src/asmjit/core/archtraits.h index a10a41e..2ec29ba 100644 --- a/src/asmjit/core/archtraits.h +++ b/src/asmjit/core/archtraits.h @@ -205,49 +205,76 @@ struct ArchTraits { //! \{ //! Returns stack pointer register id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t spRegId() const noexcept { return _spRegId; } + //! Returns stack frame register id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t fpRegId() const noexcept { return _fpRegId; } + //! Returns link register id, if the architecture provides it. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t linkRegId() const noexcept { return _linkRegId; } + //! Returns instruction pointer register id, if the architecture provides it. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t ipRegId() const noexcept { return _ipRegId; } //! Returns a hardware stack alignment requirement. //! //! \note This is a hardware constraint. Architectures that don't constrain it would return the lowest alignment //! (1), however, some architectures may constrain the alignment, for example AArch64 requires 16-byte alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; } //! Tests whether the architecture provides link register, which is used across function calls. If the link //! register is not provided then a function call pushes the return address on stack (X86/X64). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; } //! Returns minimum addressable offset on stack guaranteed for all instructions. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t minStackOffset() const noexcept { return _minStackOffset; } + //! Returns maximum addressable offset on stack depending on specific instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t maxStackOffset() const noexcept { return _maxStackOffset; } //! Returns ISA flags of the given register `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstHints instFeatureHints(RegGroup group) const noexcept { return _instHints[group]; } + //! Tests whether the given register `group` has the given `flag` set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasInstHint(RegGroup group, InstHints feature) const noexcept { return Support::test(_instHints[group], feature); } + //! Tests whether the ISA provides register swap instruction for the given register `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasInstRegSwap(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kRegSwap); } + //! Tests whether the ISA provides push/pop instructions for the given register `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasInstPushPop(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kPushPop); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRegType(RegType type) const noexcept { return type <= RegType::kMaxValue && _regSignature[type].isValid(); } //! Returns an operand signature from the given register `type` of this architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG OperandSignature regTypeToSignature(RegType type) const noexcept { return _regSignature[type]; } + //! Returns a register from the given register `type` of this architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegGroup regTypeToGroup(RegType type) const noexcept { return _regSignature[type].regGroup(); } + //! Returns a register size the given register `type` of this architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t regTypeToSize(RegType type) const noexcept { return _regSignature[type].size(); } + //! Returns a corresponding `TypeId` from the given register `type` of this architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG TypeId regTypeToTypeId(RegType type) const noexcept { return _regTypeToTypeId[type]; } //! Returns a table of ISA word names that appear in formatted text. Word names are ISA dependent. @@ -257,9 +284,11 @@ struct ArchTraits { //! - [1] 16-bits //! - [2] 32-bits //! - [3] 64-bits + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ArchTypeNameId* typeNameIdTable() const noexcept { return _typeNameIdTable; } //! Returns an ISA word name identifier of the given `index`, see \ref typeNameIdTable() for more details. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ArchTypeNameId typeNameIdByIndex(uint32_t index) const noexcept { return _typeNameIdTable[index]; } //! \} @@ -268,6 +297,7 @@ struct ArchTraits { //! \{ //! Returns a const reference to `ArchTraits` for the given architecture `arch`. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG const ArchTraits& byArch(Arch arch) noexcept; //! \} diff --git a/src/asmjit/core/assembler.cpp b/src/asmjit/core/assembler.cpp index d6c8762..a2a2806 100644 --- a/src/asmjit/core/assembler.cpp +++ b/src/asmjit/core/assembler.cpp @@ -26,12 +26,14 @@ BaseAssembler::~BaseAssembler() noexcept {} // ================================= Error BaseAssembler::setOffset(size_t offset) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } size_t size = Support::max(_section->bufferSize(), this->offset()); - if (ASMJIT_UNLIKELY(offset > size)) + if (ASMJIT_UNLIKELY(offset > size)) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } _bufferPtr = _bufferData + offset; return kErrorOk; @@ -50,15 +52,18 @@ static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noe } Error BaseAssembler::section(Section* section) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } - if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section) + if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section) { return reportError(DebugUtils::errored(kErrorInvalidSection)); + } #ifndef ASMJIT_NO_LOGGING - if (_logger) + if (_logger) { _logger->logf(".section %s {#%u}\n", section->name(), section->id()); + } #endif BaseAssembler_initSection(this, section); @@ -73,10 +78,12 @@ Label BaseAssembler::newLabel() { if (ASMJIT_LIKELY(_code)) { LabelEntry* le; Error err = _code->newLabelEntry(&le); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { reportError(err); - else + } + else { labelId = le->id(); + } } return Label(labelId); } @@ -86,28 +93,33 @@ Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, LabelType if (ASMJIT_LIKELY(_code)) { LabelEntry* le; Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { reportError(err); - else + } + else { labelId = le->id(); + } } return Label(labelId); } Error BaseAssembler::bind(const Label& label) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } Error err = _code->bindLabel(label, _section->id(), offset()); #ifndef ASMJIT_NO_LOGGING - if (_logger) + if (_logger) { EmitterUtils::logLabelBound(this, label); + } #endif resetInlineComment(); - if (err) + if (err) { return reportError(err); + } return kErrorOk; } @@ -116,11 +128,13 @@ Error BaseAssembler::bind(const Label& label) { // ===================== Error BaseAssembler::embed(const void* data, size_t dataSize) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } - if (dataSize == 0) + if (dataSize == 0) { return kErrorOk; + } CodeWriter writer(this); ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); @@ -144,11 +158,13 @@ Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t item uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize()); TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta); - if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) + if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } - if (itemCount == 0 || repeatCount == 0) + if (itemCount == 0 || repeatCount == 0) { return kErrorOk; + } uint32_t typeSize = TypeUtils::sizeOf(finalTypeId); Support::FastUInt8 of = 0; @@ -156,15 +172,16 @@ Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t item size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of); size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of); - if (ASMJIT_UNLIKELY(of)) + if (ASMJIT_UNLIKELY(of)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } CodeWriter writer(this); ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize)); - for (size_t i = 0; i < repeatCount; i++) + for (size_t i = 0; i < repeatCount; i++) { writer.emitData(data, dataSize); - + } writer.done(this); #ifndef ASMJIT_NO_LOGGING @@ -194,18 +211,21 @@ static const TypeId dataTypeIdBySize[9] = { #endif Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } - if (ASMJIT_UNLIKELY(!isLabelValid(label))) + if (ASMJIT_UNLIKELY(!isLabelValid(label))) { return reportError(DebugUtils::errored(kErrorInvalidLabel)); + } ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment()))); ASMJIT_PROPAGATE(bind(label)); size_t size = pool.size(); - if (!size) + if (!size) { return kErrorOk; + } CodeWriter writer(this); ASMJIT_PROPAGATE(writer.ensureSpace(this, size)); @@ -234,21 +254,25 @@ Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) { } Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } ASMJIT_ASSERT(_code != nullptr); RelocEntry* re; LabelEntry* le = _code->labelEntry(label); - if (ASMJIT_UNLIKELY(!le)) + if (ASMJIT_UNLIKELY(!le)) { return reportError(DebugUtils::errored(kErrorInvalidLabel)); + } - if (dataSize == 0) + if (dataSize == 0) { dataSize = registerSize(); + } - if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) + if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) { return reportError(DebugUtils::errored(kErrorInvalidOperandSize)); + } CodeWriter writer(this); ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); @@ -266,8 +290,9 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) { #endif Error err = _code->newRelocEntry(&re, RelocType::kRelToAbs); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err); + } re->_sourceSectionId = _section->id(); re->_sourceOffset = offset(); @@ -282,8 +307,9 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) { of.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize); LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of); - if (ASMJIT_UNLIKELY(!link)) + if (ASMJIT_UNLIKELY(!link)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } link->relocId = re->id(); } @@ -296,20 +322,24 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) { } Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } LabelEntry* labelEntry = _code->labelEntry(label); LabelEntry* baseEntry = _code->labelEntry(base); - if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry)) + if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry)) { return reportError(DebugUtils::errored(kErrorInvalidLabel)); + } - if (dataSize == 0) + if (dataSize == 0) { dataSize = registerSize(); + } - if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) + if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) { return reportError(DebugUtils::errored(kErrorInvalidOperandSize)); + } CodeWriter writer(this); ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); @@ -336,12 +366,14 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size else { RelocEntry* re; Error err = _code->newRelocEntry(&re, RelocType::kExpression); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err); + } Expression* exp = _code->_zone.newT(); - if (ASMJIT_UNLIKELY(!exp)) + if (ASMJIT_UNLIKELY(!exp)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } exp->reset(); exp->opType = ExpressionOpType::kSub; @@ -365,8 +397,9 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size Error BaseAssembler::comment(const char* data, size_t size) { if (!hasEmitterFlag(EmitterFlags::kLogComments)) { - if (!hasEmitterFlag(EmitterFlags::kAttached)) + if (!hasEmitterFlag(EmitterFlags::kAttached)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } return kErrorOk; } diff --git a/src/asmjit/core/assembler.h b/src/asmjit/core/assembler.h index 68f3d5a..2efc95e 100644 --- a/src/asmjit/core/assembler.h +++ b/src/asmjit/core/assembler.h @@ -28,7 +28,7 @@ ASMJIT_BEGIN_NAMESPACE class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter { public: ASMJIT_NONCOPYABLE(BaseAssembler) - typedef BaseEmitter Base; + using Base = BaseEmitter; //! Current section where the assembling happens. Section* _section = nullptr; @@ -53,11 +53,15 @@ public: //! \{ //! Returns the capacity of the current CodeBuffer. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); } + //! Returns the number of remaining bytes in the current CodeBuffer. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); } //! Returns the current position in the CodeBuffer. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); } //! Sets the current position in the CodeBuffer to `offset`. @@ -66,10 +70,15 @@ public: ASMJIT_API Error setOffset(size_t offset); //! Returns the start of the CodeBuffer in the current section. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t* bufferData() const noexcept { return _bufferData; } + //! Returns the end (first invalid byte) in the current section. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t* bufferEnd() const noexcept { return _bufferEnd; } + //! Returns the current pointer in the CodeBuffer in the current section. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t* bufferPtr() const noexcept { return _bufferPtr; } //! \} @@ -78,6 +87,7 @@ public: //! \{ //! Returns the current section. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Section* currentSection() const noexcept { return _section; } ASMJIT_API Error section(Section* section) override; diff --git a/src/asmjit/core/builder.cpp b/src/asmjit/core/builder.cpp index 1fa4420..ba771be 100644 --- a/src/asmjit/core/builder.cpp +++ b/src/asmjit/core/builder.cpp @@ -35,8 +35,9 @@ public: // ======================= static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept { - for (Pass* pass : self->_passes) + for (Pass* pass : self->_passes) { pass->~Pass(); + } self->_passes.reset(); } @@ -62,8 +63,9 @@ Error BaseBuilder::newInstNode(InstNode** out, InstId instId, InstOptions instOp ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity); InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity)); - if (ASMJIT_UNLIKELY(!node)) + if (ASMJIT_UNLIKELY(!node)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } *out = new(Support::PlacementNew{node}) InstNode(this, instId, instOptions, opCount, opCapacity); return kErrorOk; @@ -88,15 +90,17 @@ Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const vo uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize()); TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta); - if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) + if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } uint32_t typeSize = TypeUtils::sizeOf(finalTypeId); Support::FastUInt8 of = 0; size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of); - if (ASMJIT_UNLIKELY(of)) + if (ASMJIT_UNLIKELY(of)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } EmbedDataNode* node; ASMJIT_PROPAGATE(_newNodeT(&node)); @@ -109,13 +113,15 @@ Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const vo uint8_t* dstData = node->_inlineData; if (dataSize > EmbedDataNode::kInlineBufferSize) { dstData = static_cast(_dataZone.alloc(dataSize, 8)); - if (ASMJIT_UNLIKELY(!dstData)) + if (ASMJIT_UNLIKELY(!dstData)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } node->_externalData = dstData; } - if (data) + if (data) { memcpy(dstData, data, dataSize); + } *out = node; return kErrorOk; @@ -132,13 +138,15 @@ Error BaseBuilder::newCommentNode(CommentNode** out, const char* data, size_t si *out = nullptr; if (data) { - if (size == SIZE_MAX) + if (size == SIZE_MAX) { size = strlen(data); + } if (size > 0) { data = static_cast(_dataZone.dup(data, size, true)); - if (ASMJIT_UNLIKELY(!data)) + if (ASMJIT_UNLIKELY(!data)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } } } @@ -168,15 +176,18 @@ BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept { node->_next = next; prev->_next = node; - if (next) + if (next) { next->_prev = node; - else + } + else { _nodeList._last = node; + } } node->addFlags(NodeFlags::kIsActive); - if (node->isSection()) + if (node->isSection()) { _dirtySectionLinks = true; + } _cursor = node; return node; @@ -193,14 +204,17 @@ BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept { node->_next = next; node->addFlags(NodeFlags::kIsActive); - if (node->isSection()) + if (node->isSection()) { _dirtySectionLinks = true; + } prev->_next = node; - if (next) + if (next) { next->_prev = node; - else + } + else { _nodeList._last = node; + } return node; } @@ -218,43 +232,54 @@ BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept { node->_next = next; node->addFlags(NodeFlags::kIsActive); - if (node->isSection()) + if (node->isSection()) { _dirtySectionLinks = true; + } next->_prev = node; - if (prev) + if (prev) { prev->_next = node; - else + } + else { _nodeList._first = node; + } return node; } BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept { - if (!node->isActive()) + if (!node->isActive()) { return node; + } BaseNode* prev = node->prev(); BaseNode* next = node->next(); - if (_nodeList._first == node) + if (_nodeList._first == node) { _nodeList._first = next; - else + } + else { prev->_next = next; + } - if (_nodeList._last == node) + if (_nodeList._last == node) { _nodeList._last = prev; - else + } + else { next->_prev = prev; + } node->_prev = nullptr; node->_next = nullptr; node->clearFlags(NodeFlags::kIsActive); - if (node->isSection()) - _dirtySectionLinks = true; - if (_cursor == node) + if (node->isSection()) { + _dirtySectionLinks = true; + } + + if (_cursor == node) { _cursor = prev; + } return node; } @@ -265,21 +290,26 @@ void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept { return; } - if (!first->isActive()) + if (!first->isActive()) { return; + } BaseNode* prev = first->prev(); BaseNode* next = last->next(); - if (_nodeList._first == first) + if (_nodeList._first == first) { _nodeList._first = next; - else + } + else { prev->_next = next; + } - if (_nodeList._last == last) + if (_nodeList._last == last) { _nodeList._last = prev; - else + } + else { next->_prev = prev; + } BaseNode* node = first; uint32_t didRemoveSection = false; @@ -293,16 +323,19 @@ void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept { node->clearFlags(NodeFlags::kIsActive); didRemoveSection |= uint32_t(node->isSection()); - if (_cursor == node) + if (_cursor == node) { _cursor = prev; + } - if (node == last) + if (node == last) { break; + } node = next; } - if (didRemoveSection) + if (didRemoveSection) { _dirtySectionLinks = true; + } } BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept { @@ -317,28 +350,34 @@ BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept { Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) { *out = nullptr; - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } - if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId))) + if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId))) { return reportError(DebugUtils::errored(kErrorInvalidSection)); + } if (sectionId >= _sectionNodes.size()) { Error err = _sectionNodes.reserve(&_allocator, sectionId + 1); - if (ASMJIT_UNLIKELY(err != kErrorOk)) + if (ASMJIT_UNLIKELY(err != kErrorOk)) { return reportError(err); + } } SectionNode* node = nullptr; - if (sectionId < _sectionNodes.size()) + if (sectionId < _sectionNodes.size()) { node = _sectionNodes[sectionId]; + } if (!node) { ASMJIT_PROPAGATE(_newNodeT(&node, sectionId)); // We have already reserved enough space, this cannot fail now. - if (sectionId >= _sectionNodes.size()) - _sectionNodes.resize(&_allocator, sectionId + 1); + if (sectionId >= _sectionNodes.size()) { + // SAFETY: No need to check for error condition as we have already reserved enough space. + (void)_sectionNodes.resize(&_allocator, sectionId + 1); + } _sectionNodes[sectionId] = node; } @@ -361,36 +400,42 @@ Error BaseBuilder::section(Section* section) { // This is a bit tricky. We cache section links to make sure that // switching sections doesn't involve traversal in linked-list unless // the position of the section has changed. - if (hasDirtySectionLinks()) + if (hasDirtySectionLinks()) { updateSectionLinks(); + } - if (node->_nextSection) + if (node->_nextSection) { _cursor = node->_nextSection->_prev; - else + } + else { _cursor = _nodeList.last(); + } } return kErrorOk; } void BaseBuilder::updateSectionLinks() noexcept { - if (!_dirtySectionLinks) + if (!_dirtySectionLinks) { return; + } BaseNode* node_ = _nodeList.first(); SectionNode* currentSection = nullptr; while (node_) { if (node_->isSection()) { - if (currentSection) + if (currentSection) { currentSection->_nextSection = node_->as(); + } currentSection = node_->as(); } node_ = node_->next(); } - if (currentSection) + if (currentSection) { currentSection->_nextSection = nullptr; + } _dirtySectionLinks = false; } @@ -401,15 +446,18 @@ void BaseBuilder::updateSectionLinks() noexcept { Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) { *out = nullptr; - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } uint32_t index = labelId; - if (ASMJIT_UNLIKELY(index >= _code->labelCount())) + if (ASMJIT_UNLIKELY(index >= _code->labelCount())) { return DebugUtils::errored(kErrorInvalidLabel); + } - if (index >= _labelNodes.size()) + if (index >= _labelNodes.size()) { ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1)); + } LabelNode* node = _labelNodes[index]; if (!node) { @@ -422,8 +470,9 @@ Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) { } Error BaseBuilder::registerLabelNode(LabelNode* node) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } LabelEntry* le; ASMJIT_PROPAGATE(_code->newLabelEntry(&le)); @@ -445,13 +494,15 @@ static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) { uint32_t growBy = labelId - self->_labelNodes.size(); Error err = self->_labelNodes.willGrow(&self->_allocator, growBy); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return self->reportError(err); + } LabelNode* node; ASMJIT_PROPAGATE(self->_newNodeT(&node, labelId)); - self->_labelNodes.resize(&self->_allocator, labelId + 1); + // SAFETY: No need to check for error condition as we have already reserved enough space. + (void)self->_labelNodes.resize(&self->_allocator, labelId + 1); self->_labelNodes[labelId] = node; node->_labelId = labelId; return kErrorOk; @@ -495,15 +546,18 @@ Error BaseBuilder::bind(const Label& label) { // ==================== ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept { - for (Pass* pass : _passes) - if (strcmp(pass->name(), name) == 0) + for (Pass* pass : _passes) { + if (strcmp(pass->name(), name) == 0) { return pass; + } + } return nullptr; } ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } if (ASMJIT_UNLIKELY(pass == nullptr)) { // Since this is directly called by `addPassT()` we treat `null` argument @@ -512,8 +566,9 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept { } else if (ASMJIT_UNLIKELY(pass->_cb)) { // Kinda weird, but okay... - if (pass->_cb == this) + if (pass->_cb == this) { return kErrorOk; + } return DebugUtils::errored(kErrorInvalidState); } @@ -523,15 +578,18 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept { } ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } - if (ASMJIT_UNLIKELY(pass == nullptr)) + if (ASMJIT_UNLIKELY(pass == nullptr)) { return DebugUtils::errored(kErrorInvalidArgument); + } if (pass->_cb != nullptr) { - if (pass->_cb != this) + if (pass->_cb != this) { return DebugUtils::errored(kErrorInvalidState); + } uint32_t index = _passes.indexOf(pass); ASMJIT_ASSERT(index != Globals::kNotFound); @@ -545,11 +603,13 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept { } Error BaseBuilder::runPasses() { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } - if (_passes.empty()) + if (_passes.empty()) { return kErrorOk; + } ErrorHandler* prev = errorHandler(); PostponedErrorHandler postponed; @@ -560,14 +620,16 @@ Error BaseBuilder::runPasses() { for (Pass* pass : _passes) { _passZone.reset(); err = pass->run(&_passZone, _logger); - if (err) + if (err) { break; + } } _passZone.reset(); setErrorHandler(prev); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr); + } return kErrorOk; } @@ -580,8 +642,9 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1, InstOptions options = instOptions() | forcedInstOptions(); if (Support::test(options, InstOptions::kReserved)) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } #ifndef ASMJIT_NO_VALIDATION // Strict validation. @@ -626,12 +689,14 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1, node->setOp(0, o0); node->setOp(1, o1); node->setOp(2, o2); - for (uint32_t i = 3; i < opCount; i++) + for (uint32_t i = 3; i < opCount; i++) { node->setOp(i, opExt[i - 3]); + } node->resetOpRange(opCount, opCapacity); - if (comment) + if (comment) { node->setInlineComment(static_cast(_dataZone.dup(comment, strlen(comment), true))); + } addNode(node); resetExtraReg(); @@ -642,8 +707,9 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1, // =================== Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } AlignNode* node; ASMJIT_PROPAGATE(newAlignNode(&node, alignMode, alignment)); @@ -657,8 +723,9 @@ Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) { // =================== Error BaseBuilder::embed(const void* data, size_t dataSize) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } EmbedDataNode* node; ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, data, dataSize)); @@ -669,8 +736,9 @@ Error BaseBuilder::embed(const void* data, size_t dataSize) { } Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t itemRepeat) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } EmbedDataNode* node; ASMJIT_PROPAGATE(newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat)); @@ -681,11 +749,13 @@ Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCo } Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } - if (!isLabelValid(label)) + if (!isLabelValid(label)) { return reportError(DebugUtils::errored(kErrorInvalidLabel)); + } ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment()))); ASMJIT_PROPAGATE(bind(label)); @@ -710,11 +780,13 @@ static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept { } Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } - if (!BaseBuilder_checkDataSize(dataSize)) + if (!BaseBuilder_checkDataSize(dataSize)) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } EmbedLabelNode* node; ASMJIT_PROPAGATE(_newNodeT(&node, label.id(), uint32_t(dataSize))); @@ -724,11 +796,13 @@ Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) { } Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } - if (!BaseBuilder_checkDataSize(dataSize)) + if (!BaseBuilder_checkDataSize(dataSize)) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } EmbedLabelDeltaNode* node; ASMJIT_PROPAGATE(_newNodeT(&node, label.id(), base.id(), uint32_t(dataSize))); @@ -741,8 +815,9 @@ Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t // ===================== Error BaseBuilder::comment(const char* data, size_t size) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return DebugUtils::errored(kErrorNotInitialized); + } CommentNode* node; ASMJIT_PROPAGATE(newCommentNode(&node, data, size)); @@ -827,7 +902,9 @@ Error BaseBuilder::serializeTo(BaseEmitter* dst) { err = dst->comment(node->inlineComment()); } - if (err) break; + if (err) { + break; + } node_ = node_->next(); } while (node_); @@ -843,8 +920,9 @@ Error BaseBuilder::onAttach(CodeHolder* code) noexcept { SectionNode* initialSection; Error err = sectionNodeOf(&initialSection, 0); - if (!err) + if (!err) { err = _passes.willGrow(&_allocator, 8); + } if (ASMJIT_UNLIKELY(err)) { onDetach(code); diff --git a/src/asmjit/core/builder.h b/src/asmjit/core/builder.h index 6c10ecb..5e2f3e9 100644 --- a/src/asmjit/core/builder.h +++ b/src/asmjit/core/builder.h @@ -163,9 +163,13 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _first == nullptr; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* first() const noexcept { return _first; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* last() const noexcept { return _last; } //! \} @@ -185,7 +189,7 @@ public: class ASMJIT_VIRTAPI BaseBuilder : public BaseEmitter { public: ASMJIT_NONCOPYABLE(BaseBuilder) - typedef BaseEmitter Base; + using Base = BaseEmitter; //! \name Members //! \{ @@ -231,11 +235,15 @@ public: //! \name Node Management //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeList nodeList() const noexcept { return _nodeList; } //! Returns the first node. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* firstNode() const noexcept { return _nodeList.first(); } + //! Returns the last node. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* lastNode() const noexcept { return _nodeList.last(); } //! Allocates and instantiates a new node of type `T` and returns its instance. If the allocation fails `nullptr` @@ -282,6 +290,7 @@ public: //! When the Builder/Compiler is created it automatically creates a '.text' \ref SectionNode, which will be the //! initial one. When instructions are added they are always added after the cursor and the cursor is changed //! to be that newly added node. Use `setCursor()` to change where new nodes are inserted. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* cursor() const noexcept { return _cursor; } //! Sets the current node to `node` and return the previous one. @@ -302,11 +311,13 @@ public: //! //! \note If a section of some id is not associated with the Builder/Compiler it would be null, so always check //! for nulls if you iterate over the vector. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& sectionNodes() const noexcept { return _sectionNodes; } //! Tests whether the `SectionNode` of the given `sectionId` was registered. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRegisteredSectionNode(uint32_t sectionId) const noexcept { return sectionId < _sectionNodes.size() && _sectionNodes[sectionId] != nullptr; } @@ -321,6 +332,7 @@ public: //! Returns whether the section links of active section nodes are dirty. You can update these links by calling //! `updateSectionLinks()` in such case. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasDirtySectionLinks() const noexcept { return _dirtySectionLinks; } //! Updates links of all active section nodes. @@ -335,14 +347,17 @@ public: //! //! \note If a label of some id is not associated with the Builder/Compiler it would be null, so always check for //! nulls if you iterate over the vector. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& labelNodes() const noexcept { return _labelNodes; } //! Tests whether the `LabelNode` of the given `labelId` was registered. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRegisteredLabelNode(uint32_t labelId) const noexcept { return labelId < _labelNodes.size() && _labelNodes[labelId] != nullptr; } //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRegisteredLabelNode(const Label& label) const noexcept { return hasRegisteredLabelNode(label.id()); } @@ -364,8 +379,12 @@ public: //! Use \ref labelNodeOf() functions to get back \ref LabelNode from a label or its identifier. ASMJIT_API Error registerLabelNode(LabelNode* ASMJIT_NONNULL(node)); + [[nodiscard]] ASMJIT_API Label newLabel() override; + + [[nodiscard]] ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) override; + ASMJIT_API Error bind(const Label& label) override; //! \} @@ -374,6 +393,7 @@ public: //! \{ //! Returns a vector of `Pass` instances that will be executed by `runPasses()`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& passes() const noexcept { return _passes; } //! Allocates and instantiates a new pass of type `T` and returns its instance. If the allocation fails `nullptr` is @@ -384,10 +404,12 @@ public: //! \remarks The pointer returned (if non-null) is owned by the Builder or Compiler. When the Builder/Compiler is //! destroyed it destroys all passes it created so no manual memory management is required. template + [[nodiscard]] inline T* newPassT() noexcept { return _codeZone.newT(); } //! \overload template + [[nodiscard]] inline T* newPassT(Args&&... args) noexcept { return _codeZone.newT(std::forward(args)...); } template @@ -399,9 +421,12 @@ public: //! Returns `Pass` by name. //! //! If the pass having the given `name` doesn't exist `nullptr` is returned. + [[nodiscard]] ASMJIT_API Pass* passByName(const char* name) const noexcept; + //! Adds `pass` to the list of passes. ASMJIT_API Error addPass(Pass* pass) noexcept; + //! Removes `pass` from the list of passes and delete it. ASMJIT_API Error deletePass(Pass* pass) noexcept; @@ -604,17 +629,24 @@ public: //! Casts this node to `T*`. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* as() noexcept { return static_cast(this); } + //! Casts this node to `const T*`. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return static_cast(this); } //! Returns previous node or `nullptr` if this node is either first or not part of Builder/Compiler node-list. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* prev() const noexcept { return _prev; } + //! Returns next node or `nullptr` if this node is either last or not part of Builder/Compiler node-list. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* next() const noexcept { return _next; } //! Returns the type of the node, see \ref NodeType. + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeType type() const noexcept { return _any._nodeType; } //! Sets the type of the node, see `NodeType` (internal). @@ -624,37 +656,65 @@ public: ASMJIT_INLINE_NODEBUG void setType(NodeType type) noexcept { _any._nodeType = type; } //! Tests whether this node is either `InstNode` or extends it. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isInst() const noexcept { return hasFlag(NodeFlags::kActsAsInst); } + //! Tests whether this node is `SectionNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isSection() const noexcept { return type() == NodeType::kSection; } + //! Tests whether this node is either `LabelNode` or extends it. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLabel() const noexcept { return hasFlag(NodeFlags::kActsAsLabel); } + //! Tests whether this node is `AlignNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAlign() const noexcept { return type() == NodeType::kAlign; } + //! Tests whether this node is `EmbedDataNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEmbedData() const noexcept { return type() == NodeType::kEmbedData; } + //! Tests whether this node is `EmbedLabelNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEmbedLabel() const noexcept { return type() == NodeType::kEmbedLabel; } + //! Tests whether this node is `EmbedLabelDeltaNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEmbedLabelDelta() const noexcept { return type() == NodeType::kEmbedLabelDelta; } + //! Tests whether this node is `ConstPoolNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isConstPool() const noexcept { return type() == NodeType::kConstPool; } + //! Tests whether this node is `CommentNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isComment() const noexcept { return type() == NodeType::kComment; } + //! Tests whether this node is `SentinelNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isSentinel() const noexcept { return type() == NodeType::kSentinel; } //! Tests whether this node is `FuncNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFunc() const noexcept { return type() == NodeType::kFunc; } + //! Tests whether this node is `FuncRetNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFuncRet() const noexcept { return type() == NodeType::kFuncRet; } + //! Tests whether this node is `InvokeNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isInvoke() const noexcept { return type() == NodeType::kInvoke; } //! Returns the node flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeFlags flags() const noexcept { return _any._nodeFlags; } + //! Tests whether the node has the given `flag` set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(NodeFlags flag) const noexcept { return Support::test(_any._nodeFlags, flag); } + //! Replaces node flags with `flags`. ASMJIT_INLINE_NODEBUG void setFlags(NodeFlags flags) noexcept { _any._nodeFlags = flags; } //! Adds the given `flags` to node flags. @@ -663,24 +723,39 @@ public: ASMJIT_INLINE_NODEBUG void clearFlags(NodeFlags flags) noexcept { _any._nodeFlags &= ~flags; } //! Tests whether the node is code that can be executed. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isCode() const noexcept { return hasFlag(NodeFlags::kIsCode); } + //! Tests whether the node is data that cannot be executed. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isData() const noexcept { return hasFlag(NodeFlags::kIsData); } + //! Tests whether the node is informative only (is never encoded like comment, etc...). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isInformative() const noexcept { return hasFlag(NodeFlags::kIsInformative); } + //! Tests whether the node is removable if it's in an unreachable code block. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isRemovable() const noexcept { return hasFlag(NodeFlags::kIsRemovable); } + //! Tests whether the node has no effect when executed (label, .align, nop, ...). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasNoEffect() const noexcept { return hasFlag(NodeFlags::kHasNoEffect); } + //! Tests whether the node is part of the code. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isActive() const noexcept { return hasFlag(NodeFlags::kIsActive); } //! Tests whether the node has a position assigned. //! //! \remarks Returns `true` if node position is non-zero. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasPosition() const noexcept { return _position != 0; } + //! Returns node position. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t position() const noexcept { return _position; } + //! Sets node position. //! //! Node position is a 32-bit unsigned integer that is used by Compiler to track where the node is relatively to @@ -697,10 +772,15 @@ public: //! size so you can either store a pointer or `int64_t` value through `setUserDataAsPtr()`, `setUserDataAsInt64()` //! and `setUserDataAsUInt64()`. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* userDataAsPtr() const noexcept { return static_cast(_userDataPtr); } + //! Returns user data casted to `int64_t`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG int64_t userDataAsInt64() const noexcept { return int64_t(_userDataU64); } + //! Returns user data casted to `uint64_t`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t userDataAsUInt64() const noexcept { return _userDataU64; } //! Sets user data to `data`. @@ -715,10 +795,14 @@ public: ASMJIT_INLINE_NODEBUG void resetUserData() noexcept { _userDataU64 = 0; } //! Tests whether the node has an associated pass data. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasPassData() const noexcept { return _passData != nullptr; } + //! Returns the node pass data - data used during processing & transformations. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* passData() const noexcept { return (T*)_passData; } + //! Sets the node pass data to `data`. template ASMJIT_INLINE_NODEBUG void setPassData(T* data) noexcept { _passData = (void*)data; } @@ -726,9 +810,13 @@ public: ASMJIT_INLINE_NODEBUG void resetPassData() noexcept { _passData = nullptr; } //! Tests whether the node has an inline comment/annotation. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasInlineComment() const noexcept { return _inlineComment != nullptr; } + //! Returns an inline comment/annotation string. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; } + //! Sets an inline comment/annotation string to `s`. ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; } //! Resets an inline comment/annotation string to nullptr. @@ -752,10 +840,10 @@ public: //! embed 5. The rest (up to 6 operands) is considered extended. //! //! The number of operands InstNode holds is decided when \ref InstNode is created. - static constexpr uint32_t kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_)); + static inline constexpr uint32_t kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_)); //! Count of maximum number of operands \ref InstNode can hold. - static constexpr uint32_t kFullOpCapacity = Globals::kMaxOpCount; + static inline constexpr uint32_t kFullOpCapacity = Globals::kMaxOpCount; //! \} @@ -791,7 +879,10 @@ public: //! \name Instruction Object //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseInst& baseInst() noexcept { return _baseInst; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const BaseInst& baseInst() const noexcept { return _baseInst; } //! \} @@ -800,8 +891,11 @@ public: //! \{ //! Returns the instruction id, see `BaseInst::Id`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _baseInst.id(); } + //! Returns the instruction real id, see `BaseInst::Id`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _baseInst.realId(); } //! Sets the instruction id to `id`, see `BaseInst::Id`. @@ -813,9 +907,13 @@ public: //! \{ //! Returns instruction options, see \ref InstOptions for more details. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _baseInst.options(); } + //! Tests whether instruction has the given \option` set/enabled. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return _baseInst.hasOption(option); } + //! Sets instruction `options` to the provided value, resetting all others. ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _baseInst.setOptions(options); } //! Adds instruction `options` to the instruction. @@ -831,11 +929,17 @@ public: //! \{ //! Tests whether the node has an extra register operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _baseInst.hasExtraReg(); } + //! Returns extra register operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _baseInst.extraReg(); } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _baseInst.extraReg(); } + //! Sets extra register operand to `reg`. ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _baseInst.setExtraReg(reg); } //! Sets extra register operand to `reg`. @@ -849,24 +953,30 @@ public: //! \{ //! Returns operand count. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _inst._opCount; } + //! Returns operand capacity. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t opCapacity() const noexcept { return _inst._opCapacity; } //! Sets operand count. ASMJIT_INLINE_NODEBUG void setOpCount(uint32_t opCount) noexcept { _inst._opCount = uint8_t(opCount); } //! Returns operands array. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Operand* operands() noexcept { return reinterpret_cast(reinterpret_cast(this) + sizeof(InstNode)); } //! Returns operands array (const). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const Operand* operands() const noexcept { return reinterpret_cast(reinterpret_cast(this) + sizeof(InstNode)); } //! Returns operand at the given `index`. + [[nodiscard]] inline Operand& op(uint32_t index) noexcept { ASMJIT_ASSERT(index < opCapacity()); @@ -875,6 +985,7 @@ public: } //! Returns operand at the given `index` (const). + [[nodiscard]] inline const Operand& op(uint32_t index) const noexcept { ASMJIT_ASSERT(index < opCapacity()); @@ -911,6 +1022,7 @@ public: //! \{ //! Tests whether the given operand type `opType` is used by the instruction. + [[nodiscard]] inline bool hasOpType(OperandType opType) const noexcept { const Operand* ops = operands(); for (uint32_t i = 0, count = opCount(); i < count; i++) @@ -920,18 +1032,26 @@ public: } //! Tests whether the instruction uses at least one register operand. + [[nodiscard]] inline bool hasRegOp() const noexcept { return hasOpType(OperandType::kReg); } + //! Tests whether the instruction uses at least one memory operand. + [[nodiscard]] inline bool hasMemOp() const noexcept { return hasOpType(OperandType::kMem); } + //! Tests whether the instruction uses at least one immediate operand. + [[nodiscard]] inline bool hasImmOp() const noexcept { return hasOpType(OperandType::kImm); } + //! Tests whether the instruction uses at least one label operand. + [[nodiscard]] inline bool hasLabelOp() const noexcept { return hasOpType(OperandType::kLabel); } //! Returns the index of the given operand type `opType`. //! //! \note If the operand type wa found, the value returned represents its index in \ref operands() //! array, otherwise \ref Globals::kNotFound is returned to signalize that the operand was not found. + [[nodiscard]] inline uint32_t indexOfOpType(OperandType opType) const noexcept { uint32_t i = 0; uint32_t count = opCount(); @@ -947,10 +1067,15 @@ public: } //! A shortcut that calls `indexOfOpType(OperandType::kMem)`. + [[nodiscard]] inline uint32_t indexOfMemOp() const noexcept { return indexOfOpType(OperandType::kMem); } + //! A shortcut that calls `indexOfOpType(OperandType::kImm)`. + [[nodiscard]] inline uint32_t indexOfImmOp() const noexcept { return indexOfOpType(OperandType::kImm); } + //! A shortcut that calls `indexOfOpType(OperandType::kLabel)`. + [[nodiscard]] inline uint32_t indexOfLabelOp() const noexcept { return indexOfOpType(OperandType::kLabel); } //! \} @@ -961,18 +1086,22 @@ public: //! \cond INTERNAL //! Returns uint32_t[] view that represents BaseInst::RegOnly and instruction operands. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t* _getRewriteArray() noexcept { return &_baseInst._extraReg._id; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const uint32_t* _getRewriteArray() const noexcept { return &_baseInst._extraReg._id; } //! Maximum value of rewrite id - 6 operands each having 4 slots is 24, one RegOnly having 2 slots => 26. - static constexpr uint32_t kMaxRewriteId = 26 - 1; + static inline constexpr uint32_t kMaxRewriteId = 26 - 1; //! Returns a rewrite index of the given pointer to `id`. //! //! This function returns a value that can be then passed to `\ref rewriteIdAtIndex() function. It can address //! any id from any operand that is used by the instruction in addition to \ref BaseInst::regOnly field, which //! can also be used by the register allocator. + [[nodiscard]] inline uint32_t getRewriteIndex(const uint32_t* id) const noexcept { const uint32_t* array = _getRewriteArray(); ASMJIT_ASSERT(array <= id); @@ -1010,14 +1139,16 @@ public: //! There are only two capacities used - \ref kBaseOpCapacity and \ref kFullOpCapacity, so this function //! is used to decide between these two. The general rule is that instructions that can be represented with //! \ref kBaseOpCapacity would use this value, and all others would take \ref kFullOpCapacity. - static ASMJIT_INLINE_NODEBUG constexpr uint32_t capacityOfOpCount(uint32_t opCount) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR uint32_t capacityOfOpCount(uint32_t opCount) noexcept { return opCount <= kBaseOpCapacity ? kBaseOpCapacity : kFullOpCapacity; } //! Calculates the size of \ref InstNode required to hold at most `opCapacity` operands. //! //! This function is used internally to allocate \ref InstNode. - static ASMJIT_INLINE_NODEBUG constexpr size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept { return sizeof(InstNode) + opCapacity * sizeof(Operand); } //! \endcond @@ -1107,8 +1238,11 @@ public: //! \{ //! Returns \ref Label representation of the \ref LabelNode. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); } + //! Returns the id of the label. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; } //! \} @@ -1146,12 +1280,16 @@ public: //! \{ //! Returns align mode. + [[nodiscard]] ASMJIT_INLINE_NODEBUG AlignMode alignMode() const noexcept { return _alignData._alignMode; } + //! Sets align mode to `alignMode`. ASMJIT_INLINE_NODEBUG void setAlignMode(AlignMode alignMode) noexcept { _alignData._alignMode = alignMode; } //! Returns align offset in bytes. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; } + //! Sets align offset in bytes to `offset`. ASMJIT_INLINE_NODEBUG void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; } @@ -1167,9 +1305,7 @@ public: ASMJIT_NONCOPYABLE(EmbedDataNode) //! \cond INTERNAL - enum : uint32_t { - kInlineBufferSize = 128 - (sizeof(BaseNode) + sizeof(size_t) * 2) - }; + static inline constexpr uint32_t kInlineBufferSize = 128 - (sizeof(BaseNode) + sizeof(size_t) * 2); //! \endcond //! \name Members @@ -1204,30 +1340,38 @@ public: //! \{ //! Returns data type as \ref TypeId. + [[nodiscard]] ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _embed._typeId; } + //! Returns the size of a single data element. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t typeSize() const noexcept { return _embed._typeSize; } //! Returns a pointer to the data casted to `uint8_t`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t* data() const noexcept { return dataSize() <= kInlineBufferSize ? const_cast(_inlineData) : _externalData; } //! Returns a pointer to the data casted to `T`. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* dataAs() const noexcept { return reinterpret_cast(data()); } //! Returns the number of (typed) items in the array. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t itemCount() const noexcept { return _itemCount; } //! Returns how many times the data is repeated (default 1). //! //! Repeated data is useful when defining constants for SIMD, for example. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t repeatCount() const noexcept { return _repeatCount; } //! Returns the size of the data, not considering the number of times it repeats. //! //! \note The returned value is the same as `typeSize() * itemCount()`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return typeSize() * _itemCount; } //! \} @@ -1261,17 +1405,23 @@ public: //! \{ //! Returns the label to embed as \ref Label operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); } - //! Returns the id of the label. - ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; } //! Sets the label id from `label` operand. ASMJIT_INLINE_NODEBUG void setLabel(const Label& label) noexcept { setLabelId(label.id()); } + + //! Returns the id of the label. + [[nodiscard]] + ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; } + //! Sets the label id (use with caution, improper use can break a lot of things). ASMJIT_INLINE_NODEBUG void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; } //! Returns the data size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t dataSize() const noexcept { return _dataSize; } + //! Sets the data size. ASMJIT_INLINE_NODEBUG void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; } @@ -1308,18 +1458,25 @@ public: //! \{ //! Returns the label as `Label` operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); } - //! Returns the id of the label. - ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; } //! Sets the label id from `label` operand. ASMJIT_INLINE_NODEBUG void setLabel(const Label& label) noexcept { setLabelId(label.id()); } + + //! Returns the id of the label. + [[nodiscard]] + ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; } + //! Sets the label id. ASMJIT_INLINE_NODEBUG void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; } //! Returns the base label as `Label` operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Label baseLabel() const noexcept { return Label(_baseLabelId); } + //! Returns the id of the base label. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t baseLabelId() const noexcept { return _baseLabelId; } //! Sets the base label id from `label` operand. @@ -1328,7 +1485,9 @@ public: ASMJIT_INLINE_NODEBUG void setBaseLabelId(uint32_t baseLabelId) noexcept { _baseLabelId = baseLabelId; } //! Returns the size of the embedded label address. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t dataSize() const noexcept { return _dataSize; } + //! Sets the size of the embedded label address. ASMJIT_INLINE_NODEBUG void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; } @@ -1366,15 +1525,23 @@ public: //! \{ //! Tests whether the constant-pool is empty. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _constPool.empty(); } + //! Returns the size of the constant-pool in bytes. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _constPool.size(); } + //! Returns minimum alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _constPool.alignment(); } //! Returns the wrapped `ConstPool` instance. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ConstPool& constPool() noexcept { return _constPool; } + //! Returns the wrapped `ConstPool` instance (const). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ConstPool& constPool() const noexcept { return _constPool; } //! \} @@ -1431,6 +1598,7 @@ public: //! \{ //! Returns the type of the sentinel. + [[nodiscard]] ASMJIT_INLINE_NODEBUG SentinelType sentinelType() const noexcept { return _sentinel._sentinelType; } @@ -1471,8 +1639,11 @@ public: //! \{ //! Returns \ref BaseBuilder associated with the pass. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const BaseBuilder* cb() const noexcept { return _cb; } + //! Returns the name of the pass. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name; } //! \} diff --git a/src/asmjit/core/codebuffer.h b/src/asmjit/core/codebuffer.h index d4b7ceb..32bb03c 100644 --- a/src/asmjit/core/codebuffer.h +++ b/src/asmjit/core/codebuffer.h @@ -45,11 +45,14 @@ struct CodeBuffer { //! \{ //! Returns a reference to the byte at the given `index`. + [[nodiscard]] inline uint8_t& operator[](size_t index) noexcept { ASMJIT_ASSERT(index < _size); return _data[index]; } + //! \overload + [[nodiscard]] inline const uint8_t& operator[](size_t index) const noexcept { ASMJIT_ASSERT(index < _size); return _data[index]; @@ -61,34 +64,47 @@ struct CodeBuffer { //! \{ //! Returns code buffer flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CodeBufferFlags flags() const noexcept { return _flags; } + //! Tests whether the code buffer has the given `flag` set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); } //! Tests whether this code buffer has a fixed size. //! //! Fixed size means that the code buffer is fixed and cannot grow. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return hasFlag(CodeBufferFlags::kIsFixed); } //! Tests whether the data in this code buffer is external. //! //! External data can only be provided by users, it's never used by AsmJit. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return hasFlag(CodeBufferFlags::kIsExternal); } //! Tests whether the data in this code buffer is allocated (non-null). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return _data != nullptr; } //! Tests whether the code buffer is empty. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return !_size; } //! Returns the size of the data. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } + //! Returns the capacity of the data. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return _capacity; } //! Returns the pointer to the data the buffer references. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _data; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _data; } //! \} @@ -96,10 +112,16 @@ struct CodeBuffer { //! \name Iterators //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t* begin() noexcept { return _data; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const uint8_t* begin() const noexcept { return _data; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t* end() noexcept { return _data + _size; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const uint8_t* end() const noexcept { return _data + _size; } //! \} diff --git a/src/asmjit/core/codeholder.cpp b/src/asmjit/core/codeholder.cpp index e3d164d..9f18081 100644 --- a/src/asmjit/core/codeholder.cpp +++ b/src/asmjit/core/codeholder.cpp @@ -83,8 +83,9 @@ static void CodeHolder_resetInternal(CodeHolder* self, ResetPolicy resetPolicy) uint32_t numSections = self->_sections.size(); for (i = 0; i < numSections; i++) { Section* section = self->_sections[i]; - if (section->_buffer.data() && !section->_buffer.isExternal()) + if (section->_buffer.data() && !section->_buffer.isExternal()) { ::free(section->_buffer._data); + } section->_buffer._data = nullptr; section->_buffer._capacity = 0; } @@ -150,8 +151,9 @@ Error CodeHolder::init(const Environment& environment, uint64_t baseAddress) noe Error CodeHolder::init(const Environment& environment, const CpuFeatures& cpuFeatures, uint64_t baseAddress) noexcept { // Cannot reinitialize if it's locked or there is one or more emitter attached. - if (isInitialized()) + if (isInitialized()) { return DebugUtils::errored(kErrorAlreadyInitialized); + } // If we are just initializing there should be no emitters attached. ASMJIT_ASSERT(_emitters.empty()); @@ -193,23 +195,27 @@ void CodeHolder::reset(ResetPolicy resetPolicy) noexcept { Error CodeHolder::attach(BaseEmitter* emitter) noexcept { // Catch a possible misuse of the API. - if (ASMJIT_UNLIKELY(!emitter)) + if (ASMJIT_UNLIKELY(!emitter)) { return DebugUtils::errored(kErrorInvalidArgument); + } // Invalid emitter, this should not be possible. EmitterType type = emitter->emitterType(); - if (ASMJIT_UNLIKELY(type == EmitterType::kNone || uint32_t(type) > uint32_t(EmitterType::kMaxValue))) + if (ASMJIT_UNLIKELY(type == EmitterType::kNone || uint32_t(type) > uint32_t(EmitterType::kMaxValue))) { return DebugUtils::errored(kErrorInvalidState); + } uint64_t archMask = emitter->_archMask; - if (ASMJIT_UNLIKELY(!(archMask & (uint64_t(1) << uint32_t(arch()))))) + if (ASMJIT_UNLIKELY(!(archMask & (uint64_t(1) << uint32_t(arch()))))) { return DebugUtils::errored(kErrorInvalidArch); + } // This is suspicious, but don't fail if `emitter` is already attached // to this code holder. This is not error, but it's not recommended. if (emitter->_code != nullptr) { - if (emitter->_code == this) + if (emitter->_code == this) { return kErrorOk; + } return DebugUtils::errored(kErrorInvalidState); } @@ -225,18 +231,21 @@ Error CodeHolder::attach(BaseEmitter* emitter) noexcept { } Error CodeHolder::detach(BaseEmitter* emitter) noexcept { - if (ASMJIT_UNLIKELY(!emitter)) + if (ASMJIT_UNLIKELY(!emitter)) { return DebugUtils::errored(kErrorInvalidArgument); + } - if (ASMJIT_UNLIKELY(emitter->_code != this)) + if (ASMJIT_UNLIKELY(emitter->_code != this)) { return DebugUtils::errored(kErrorInvalidState); + } // NOTE: We always detach if we were asked to, if error happens during // `emitter->onDetach()` we just propagate it, but the BaseEmitter will // be detached. Error err = kErrorOk; - if (!emitter->isDestroyed()) + if (!emitter->isDestroyed()) { err = emitter->onDetach(this); + } // Disconnect CodeHolder <-> BaseEmitter. uint32_t index = _emitters.indexOf(emitter); @@ -275,13 +284,16 @@ static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t uint8_t* oldData = cb->_data; uint8_t* newData; - if (oldData && !cb->isExternal()) + if (oldData && !cb->isExternal()) { newData = static_cast(::realloc(oldData, n)); - else + } + else { newData = static_cast(::malloc(n)); + } - if (ASMJIT_UNLIKELY(!newData)) + if (ASMJIT_UNLIKELY(!newData)) { return DebugUtils::errored(kErrorOutOfMemory); + } cb->_data = newData; cb->_capacity = n; @@ -306,35 +318,44 @@ static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept { // The size of the section must be valid. size_t size = cb->size(); - if (ASMJIT_UNLIKELY(n > std::numeric_limits::max() - size)) + if (ASMJIT_UNLIKELY(n > std::numeric_limits::max() - size)) { return DebugUtils::errored(kErrorOutOfMemory); + } // We can now check if growing the buffer is really necessary. It's unlikely // that this function is called while there is still room for `n` bytes. size_t capacity = cb->capacity(); size_t required = cb->size() + n; - if (ASMJIT_UNLIKELY(required <= capacity)) - return kErrorOk; - if (cb->isFixed()) + if (ASMJIT_UNLIKELY(required <= capacity)) { + return kErrorOk; + } + + if (cb->isFixed()) { return DebugUtils::errored(kErrorTooLarge); + } size_t kInitialCapacity = 8096; - if (capacity < kInitialCapacity) + if (capacity < kInitialCapacity) { capacity = kInitialCapacity; - else + } + else { capacity += Globals::kAllocOverhead; + } do { size_t old = capacity; - if (capacity < Globals::kGrowThreshold) + if (capacity < Globals::kGrowThreshold) { capacity *= 2; - else + } + else { capacity += Globals::kGrowThreshold; + } // Overflow. - if (ASMJIT_UNLIKELY(old > capacity)) + if (ASMJIT_UNLIKELY(old > capacity)) { return DebugUtils::errored(kErrorOutOfMemory); + } } while (capacity - Globals::kAllocOverhead < required); return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead); @@ -343,11 +364,13 @@ Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept { Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept { size_t capacity = cb->capacity(); - if (n <= capacity) + if (n <= capacity) { return kErrorOk; + } - if (cb->isFixed()) + if (cb->isFixed()) { return DebugUtils::errored(kErrorTooLarge); + } return CodeHolder_reserveInternal(this, cb, n); } @@ -358,28 +381,34 @@ Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept { Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, SectionFlags flags, uint32_t alignment, int32_t order) noexcept { *sectionOut = nullptr; - if (nameSize == SIZE_MAX) + if (nameSize == SIZE_MAX) { nameSize = strlen(name); + } - if (alignment == 0) + if (alignment == 0) { alignment = 1; + } - if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment))) + if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment))) { return DebugUtils::errored(kErrorInvalidArgument); + } - if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize)) + if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize)) { return DebugUtils::errored(kErrorInvalidSectionName); + } uint32_t sectionId = _sections.size(); - if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId)) + if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId)) { return DebugUtils::errored(kErrorTooManySections); + } ASMJIT_PROPAGATE(_sections.willGrow(&_allocator)); ASMJIT_PROPAGATE(_sectionsByOrder.willGrow(&_allocator)); Section* section = _allocator.allocZeroedT
(); - if (ASMJIT_UNLIKELY(!section)) + if (ASMJIT_UNLIKELY(!section)) { return DebugUtils::errored(kErrorOutOfMemory); + } section->_id = sectionId; section->_flags = flags; @@ -399,24 +428,28 @@ Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t name } Section* CodeHolder::sectionByName(const char* name, size_t nameSize) const noexcept { - if (nameSize == SIZE_MAX) + if (nameSize == SIZE_MAX) { nameSize = strlen(name); + } // This could be also put in a hash-table similarly like we do with labels, // however it's questionable as the number of sections should be pretty low // in general. Create an issue if this becomes a problem. if (nameSize <= Globals::kMaxSectionNameSize) { - for (Section* section : _sections) - if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0') + for (Section* section : _sections) { + if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0') { return section; + } + } } return nullptr; } Section* CodeHolder::ensureAddressTableSection() noexcept { - if (_addressTableSection) + if (_addressTableSection) { return _addressTableSection; + } newSection(&_addressTableSection, CodeHolder_addrTabName, @@ -429,16 +462,19 @@ Section* CodeHolder::ensureAddressTableSection() noexcept { Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept { AddressTableEntry* entry = _addressTableEntries.get(address); - if (entry) + if (entry) { return kErrorOk; + } Section* section = ensureAddressTableSection(); - if (ASMJIT_UNLIKELY(!section)) + if (ASMJIT_UNLIKELY(!section)) { return DebugUtils::errored(kErrorOutOfMemory); + } entry = _zone.newT(address); - if (ASMJIT_UNLIKELY(!entry)) + if (ASMJIT_UNLIKELY(!entry)) { return DebugUtils::errored(kErrorOutOfMemory); + } _addressTableEntries.insert(entry); section->_virtualSize += _environment.registerSize(); @@ -452,24 +488,26 @@ Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept { //! Only used to lookup a label from `_namedLabels`. class LabelByName { public: + const char* _key {}; + uint32_t _keySize {}; + uint32_t _hashCode {}; + uint32_t _parentId {}; + inline LabelByName(const char* key, size_t keySize, uint32_t hashCode, uint32_t parentId) noexcept : _key(key), _keySize(uint32_t(keySize)), _hashCode(hashCode), _parentId(parentId) {} + [[nodiscard]] inline uint32_t hashCode() const noexcept { return _hashCode; } + [[nodiscard]] inline bool matches(const LabelEntry* entry) const noexcept { return entry->nameSize() == _keySize && entry->parentId() == _parentId && ::memcmp(entry->name(), _key, _keySize) == 0; } - - const char* _key; - uint32_t _keySize; - uint32_t _hashCode; - uint32_t _parentId; }; // Returns a hash of `name` and fixes `nameSize` if it's `SIZE_MAX`. @@ -479,7 +517,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize size_t i = 0; for (;;) { uint8_t c = uint8_t(name[i]); - if (!c) break; + if (!c) { + break; + } hashCode = Support::hashRound(hashCode, c); i++; } @@ -488,7 +528,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize else { for (size_t i = 0; i < nameSize; i++) { uint8_t c = uint8_t(name[i]); - if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName); + if (ASMJIT_UNLIKELY(!c)) { + return DebugUtils::errored(kErrorInvalidLabelName); + } hashCode = Support::hashRound(hashCode, c); } } @@ -497,7 +539,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept { LabelLink* link = _allocator.allocT(); - if (ASMJIT_UNLIKELY(!link)) return nullptr; + if (ASMJIT_UNLIKELY(!link)) { + return nullptr; + } link->next = le->_links; le->_links = link; @@ -516,14 +560,16 @@ Error CodeHolder::newLabelEntry(LabelEntry** entryOut) noexcept { *entryOut = nullptr; uint32_t labelId = _labelEntries.size(); - if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) + if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) { return DebugUtils::errored(kErrorTooManyLabels); + } ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator)); LabelEntry* le = _allocator.allocZeroedT(); - if (ASMJIT_UNLIKELY(!le)) + if (ASMJIT_UNLIKELY(!le)) { return DebugUtils::errored(kErrorOutOfMemory); + } le->_setId(labelId); le->_parentId = Globals::kInvalidId; @@ -539,10 +585,12 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize); if (ASMJIT_UNLIKELY(nameSize == 0)) { - if (type == LabelType::kAnonymous) + if (type == LabelType::kAnonymous) { return newLabelEntry(entryOut); - else + } + else { return DebugUtils::errored(kErrorInvalidLabelName); + } } if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize)) @@ -551,18 +599,21 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si switch (type) { case LabelType::kAnonymous: { // Anonymous labels cannot have a parent (or more specifically, parent is useless here). - if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) + if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) { return DebugUtils::errored(kErrorInvalidParentLabel); + } uint32_t labelId = _labelEntries.size(); - if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) + if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) { return DebugUtils::errored(kErrorTooManyLabels); + } ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator)); LabelEntry* le = _allocator.allocZeroedT(); - if (ASMJIT_UNLIKELY(!le)) + if (ASMJIT_UNLIKELY(!le)) { return DebugUtils::errored(kErrorOutOfMemory); + } // NOTE: This LabelEntry has a name, but we leave its hashCode as zero as it's anonymous. le->_setId(labelId); @@ -577,8 +628,9 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si } case LabelType::kLocal: { - if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size())) + if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size())) { return DebugUtils::errored(kErrorInvalidParentLabel); + } hashCode ^= parentId; break; @@ -586,8 +638,9 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si case LabelType::kGlobal: case LabelType::kExternal: { - if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) + if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) { return DebugUtils::errored(kErrorInvalidParentLabel); + } break; } @@ -600,20 +653,23 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si // different id, this is already accomplished by having a different hashes // between the same label names having different parent labels. LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId)); - if (ASMJIT_UNLIKELY(le)) + if (ASMJIT_UNLIKELY(le)) { return DebugUtils::errored(kErrorLabelAlreadyDefined); + } Error err = kErrorOk; uint32_t labelId = _labelEntries.size(); - if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) + if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) { return DebugUtils::errored(kErrorTooManyLabels); + } ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator)); le = _allocator.allocZeroedT(); - if (ASMJIT_UNLIKELY(!le)) + if (ASMJIT_UNLIKELY(!le)) { return DebugUtils::errored(kErrorOutOfMemory); + } le->_hashCode = hashCode; le->_setId(labelId); @@ -631,24 +687,28 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si uint32_t CodeHolder::labelIdByName(const char* name, size_t nameSize, uint32_t parentId) noexcept { uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize); - if (ASMJIT_UNLIKELY(!nameSize)) + if (ASMJIT_UNLIKELY(!nameSize)) { return 0; + } - if (parentId != Globals::kInvalidId) + if (parentId != Globals::kInvalidId) { hashCode ^= parentId; + } LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId)); return le ? le->id() : uint32_t(Globals::kInvalidId); } ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept { - if (!hasUnresolvedLinks()) + if (!hasUnresolvedLinks()) { return kErrorOk; + } Error err = kErrorOk; for (LabelEntry* le : labelEntries()) { - if (!le->isBound()) + if (!le->isBound()) { continue; + } LabelLinkIterator link(le); if (link) { @@ -695,15 +755,18 @@ ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept { ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId, uint64_t toOffset) noexcept { LabelEntry* le = labelEntry(label); - if (ASMJIT_UNLIKELY(!le)) + if (ASMJIT_UNLIKELY(!le)) { return DebugUtils::errored(kErrorInvalidLabel); + } - if (ASMJIT_UNLIKELY(toSectionId > _sections.size())) + if (ASMJIT_UNLIKELY(toSectionId > _sections.size())) { return DebugUtils::errored(kErrorInvalidSection); + } // Label can be bound only once. - if (ASMJIT_UNLIKELY(le->isBound())) + if (ASMJIT_UNLIKELY(le->isBound())) { return DebugUtils::errored(kErrorLabelAlreadyBound); + } // Bind the label. Section* section = _sections[toSectionId]; @@ -761,12 +824,14 @@ Error CodeHolder::newRelocEntry(RelocEntry** dst, RelocType relocType) noexcept ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator)); uint32_t relocId = _relocations.size(); - if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId)) + if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId)) { return DebugUtils::errored(kErrorTooManyRelocations); + } RelocEntry* re = _allocator.allocZeroedT(); - if (ASMJIT_UNLIKELY(!re)) + if (ASMJIT_UNLIKELY(!re)) { return DebugUtils::errored(kErrorOutOfMemory); + } re->_id = relocId; re->_relocType = relocType; @@ -798,8 +863,9 @@ static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, ui case ExpressionValueType::kLabel: { LabelEntry* le = exp->value[i].label; - if (!le->isBound()) + if (!le->isBound()) { return DebugUtils::errored(kErrorExpressionLabelNotBound); + } v = le->section()->offset() + le->offset(); break; } @@ -863,14 +929,16 @@ Error CodeHolder::flatten() noexcept { uint64_t realSize = section->realSize(); if (realSize) { uint64_t alignedOffset = Support::alignUp(offset, section->alignment()); - if (ASMJIT_UNLIKELY(alignedOffset < offset)) + if (ASMJIT_UNLIKELY(alignedOffset < offset)) { return DebugUtils::errored(kErrorTooLarge); + } Support::FastUInt8 of = 0; offset = Support::addOverflow(alignedOffset, realSize, &of); - if (ASMJIT_UNLIKELY(of)) + if (ASMJIT_UNLIKELY(of)) { return DebugUtils::errored(kErrorTooLarge); + } } } @@ -879,13 +947,15 @@ Error CodeHolder::flatten() noexcept { offset = 0; for (Section* section : _sectionsByOrder) { uint64_t realSize = section->realSize(); - if (realSize) + if (realSize) { offset = Support::alignUp(offset, section->alignment()); + } section->_offset = offset; // Make sure the previous section extends a bit to cover the alignment. - if (prev) + if (prev) { prev->_virtualSize = offset - prev->_offset; + } prev = section; offset += realSize; @@ -908,16 +978,18 @@ size_t CodeHolder::codeSize() const noexcept { } } - if ((sizeof(uint64_t) > sizeof(size_t) && offset > uint64_t(SIZE_MAX)) || of) + if ((sizeof(uint64_t) > sizeof(size_t) && offset > uint64_t(SIZE_MAX)) || of) { return SIZE_MAX; + } return size_t(offset); } Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { // Base address must be provided. - if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress)) + if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress)) { return DebugUtils::errored(kErrorInvalidArgument); + } _baseAddress = baseAddress; uint32_t addressSize = _environment.registerSize(); @@ -927,22 +999,23 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { uint8_t* addressTableEntryData = nullptr; if (addressTableSection) { - ASMJIT_PROPAGATE( - reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize()))); + ASMJIT_PROPAGATE(reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize()))); addressTableEntryData = addressTableSection->_buffer.data(); } // Relocate all recorded locations. for (const RelocEntry* re : _relocations) { // Possibly deleted or optimized-out entry. - if (re->relocType() == RelocType::kNone) + if (re->relocType() == RelocType::kNone) { continue; + } Section* sourceSection = sectionById(re->sourceSectionId()); Section* targetSection = nullptr; - if (re->targetSectionId() != Globals::kInvalidId) + if (re->targetSectionId() != Globals::kInvalidId) { targetSection = sectionById(re->targetSectionId()); + } uint64_t value = re->payload(); uint64_t sectionOffset = sourceSection->offset(); @@ -951,8 +1024,9 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { // Make sure that the `RelocEntry` doesn't go out of bounds. size_t regionSize = re->format().regionSize(); if (ASMJIT_UNLIKELY(re->sourceOffset() >= sourceSection->bufferSize() || - sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize)) + sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize)) { return DebugUtils::errored(kErrorInvalidRelocEntry); + } uint8_t* buffer = sourceSection->data(); @@ -970,8 +1044,9 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { case RelocType::kRelToAbs: { // Value is currently a relative offset from the start of its section. // We have to convert it to an absolute offset (including base address). - if (ASMJIT_UNLIKELY(!targetSection)) + if (ASMJIT_UNLIKELY(!targetSection)) { return DebugUtils::errored(kErrorInvalidRelocEntry); + } //value += baseAddress + sectionOffset + sourceOffset + regionSize; value += baseAddress + targetSection->offset(); @@ -982,40 +1057,46 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { value -= baseAddress + sectionOffset + sourceOffset + regionSize; // Sign extend as we are not interested in the high 32-bit word in a 32-bit address space. - if (addressSize <= 4) + if (addressSize <= 4) { value = uint64_t(int64_t(int32_t(value & 0xFFFFFFFFu))); - else if (!Support::isInt32(int64_t(value))) + } + else if (!Support::isInt32(int64_t(value))) { return DebugUtils::errored(kErrorRelocOffsetOutOfRange); + } break; } case RelocType::kX64AddressEntry: { size_t valueOffset = size_t(re->sourceOffset()) + re->format().valueOffset(); - if (re->format().valueSize() != 4 || valueOffset < 2) + if (re->format().valueSize() != 4 || valueOffset < 2) { return DebugUtils::errored(kErrorInvalidRelocEntry); + } // First try whether a relative 32-bit displacement would work. value -= baseAddress + sectionOffset + sourceOffset + regionSize; if (!Support::isInt32(int64_t(value))) { // Relative 32-bit displacement is not possible, use '.addrtab' section. AddressTableEntry* atEntry = _addressTableEntries.get(re->payload()); - if (ASMJIT_UNLIKELY(!atEntry)) + if (ASMJIT_UNLIKELY(!atEntry)) { return DebugUtils::errored(kErrorInvalidRelocEntry); + } // Cannot be null as we have just matched the `AddressTableEntry`. ASMJIT_ASSERT(addressTableSection != nullptr); - if (!atEntry->hasAssignedSlot()) + if (!atEntry->hasAssignedSlot()) { atEntry->_slot = addressTableEntryCount++; + } size_t atEntryIndex = size_t(atEntry->slot()) * addressSize; uint64_t addrSrc = sectionOffset + sourceOffset + regionSize; uint64_t addrDst = addressTableSection->offset() + uint64_t(atEntryIndex); value = addrDst - addrSrc; - if (!Support::isInt32(int64_t(value))) + if (!Support::isInt32(int64_t(value))) { return DebugUtils::errored(kErrorRelocOffsetOutOfRange); + } // Bytes that replace [REX, OPCODE] bytes. uint32_t byte0 = 0xFF; @@ -1064,14 +1145,16 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { } Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, CopySectionFlags copyFlags) noexcept { - if (ASMJIT_UNLIKELY(!isSectionValid(sectionId))) + if (ASMJIT_UNLIKELY(!isSectionValid(sectionId))) { return DebugUtils::errored(kErrorInvalidSection); + } Section* section = sectionById(sectionId); size_t bufferSize = section->bufferSize(); - if (ASMJIT_UNLIKELY(dstSize < bufferSize)) + if (ASMJIT_UNLIKELY(dstSize < bufferSize)) { return DebugUtils::errored(kErrorInvalidArgument); + } memcpy(dst, section->data(), bufferSize); @@ -1086,14 +1169,16 @@ Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, CopySectionFlags copyFlags) noexcept { size_t end = 0; for (Section* section : _sectionsByOrder) { - if (section->offset() > dstSize) + if (section->offset() > dstSize) { return DebugUtils::errored(kErrorInvalidArgument); + } size_t bufferSize = section->bufferSize(); size_t offset = size_t(section->offset()); - if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize)) + if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize)) { return DebugUtils::errored(kErrorInvalidArgument); + } uint8_t* dstTarget = static_cast(dst) + offset; size_t paddingSize = 0; diff --git a/src/asmjit/core/codeholder.h b/src/asmjit/core/codeholder.h index 3f2d1d7..33247d7 100644 --- a/src/asmjit/core/codeholder.h +++ b/src/asmjit/core/codeholder.h @@ -175,34 +175,50 @@ public: //! \{ //! Returns the section id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; } + //! Returns the section name, as a null terminated string. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.str; } //! Returns the section data. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _buffer.data(); } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _buffer.data(); } //! Returns the section flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG SectionFlags flags() const noexcept { return _flags; } + //! Tests whether the section has the given `flag`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(SectionFlags flag) const noexcept { return Support::test(_flags, flag); } + //! Adds `flags` to the section flags. ASMJIT_INLINE_NODEBUG void addFlags(SectionFlags flags) noexcept { _flags |= flags; } + //! Removes `flags` from the section flags. ASMJIT_INLINE_NODEBUG void clearFlags(SectionFlags flags) noexcept { _flags &= ~flags; } //! Returns the minimum section alignment + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; } + //! Sets the minimum section alignment ASMJIT_INLINE_NODEBUG void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; } //! Returns the section order, which has a higher priority than section id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG int32_t order() const noexcept { return _order; } //! Returns the section offset, relative to base. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t offset() const noexcept { return _offset; } + //! Set the section offset. ASMJIT_INLINE_NODEBUG void setOffset(uint64_t offset) noexcept { _offset = offset; } @@ -212,18 +228,26 @@ public: //! size returned by `bufferSize()` as the buffer stores real data emitted by assemblers or appended by users. //! //! Use `realSize()` to get the real and final size of this section. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t virtualSize() const noexcept { return _virtualSize; } + //! Sets the virtual size of the section. ASMJIT_INLINE_NODEBUG void setVirtualSize(uint64_t virtualSize) noexcept { _virtualSize = virtualSize; } //! Returns the buffer size of the section. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t bufferSize() const noexcept { return _buffer.size(); } + //! Returns the real size of the section calculated from virtual and buffer sizes. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t realSize() const noexcept { return Support::max(virtualSize(), bufferSize()); } //! Returns the `CodeBuffer` used by this section. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CodeBuffer& buffer() noexcept { return _buffer; } + //! Returns the `CodeBuffer` used by this section (const). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const CodeBuffer& buffer() const noexcept { return _buffer; } //! \} @@ -256,15 +280,25 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t address() const noexcept { return _address; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t slot() const noexcept { return _slot; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAssignedSlot() const noexcept { return _slot != 0xFFFFFFFFu; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator<(const AddressTableEntry& other) const noexcept { return _address < other._address; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator>(const AddressTableEntry& other) const noexcept { return _address > other._address; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator<(uint64_t queryAddress) const noexcept { return _address < queryAddress; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator>(uint64_t queryAddress) const noexcept { return _address > queryAddress; } //! \} @@ -434,19 +468,32 @@ struct OffsetFormat { } //! Returns flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; } + //! Returns the size of the region/instruction where the offset is encoded. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t regionSize() const noexcept { return _regionSize; } + //! Returns the offset of the word relative to the start of the region where the offset is. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t valueOffset() const noexcept { return _valueOffset; } + //! Returns the size of the data-type (word) that contains the offset, in bytes. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t valueSize() const noexcept { return _valueSize; } + //! Returns the count of bits of the offset value in the data it's stored in. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t immBitCount() const noexcept { return _immBitCount; } + //! Returns the bit-shift of the offset value in the data it's stored in. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t immBitShift() const noexcept { return _immBitShift; } + //! Returns the number of least significant bits of the offset value, that must be zero and that are not part of //! the encoded data. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t immDiscardLsb() const noexcept { return _immDiscardLsb; } //! Resets this offset format to a simple data value of `dataSize` bytes. @@ -536,17 +583,28 @@ struct RelocEntry { //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RelocType relocType() const noexcept { return _relocType; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const OffsetFormat& format() const noexcept { return _format; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t sourceSectionId() const noexcept { return _sourceSectionId; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t targetSectionId() const noexcept { return _targetSectionId; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t sourceOffset() const noexcept { return _sourceOffset; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t payload() const noexcept { return _payload; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG Expression* payloadAsExpression() const noexcept { return reinterpret_cast(uintptr_t(_payload)); } @@ -603,15 +661,14 @@ public: //! \name Constants //! \{ - enum : uint32_t { - //! SSO size of \ref _name. - //! - //! \cond INTERNAL - //! Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has granularity of 32 bytes anyway). This - //! gives `_name` the remaining space, which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures. - //! \endcond - kStaticNameSize = 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*)) - }; + //! SSO size of \ref _name. + //! + //! \cond INTERNAL + //! Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has granularity of 32 bytes anyway). This + //! gives `_name` the remaining space, which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures. + //! \endcond + static inline constexpr uint32_t kStaticNameSize = + 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*)); //! \} @@ -642,52 +699,68 @@ public: // compiler targeting 64-bit CPU will add to align the structure to 64-bits. //! Returns label id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _customData; } + //! Sets label id (internal, used only by `CodeHolder`). ASMJIT_INLINE_NODEBUG void _setId(uint32_t id) noexcept { _customData = id; } //! Returns label type. + [[nodiscard]] ASMJIT_INLINE_NODEBUG LabelType type() const noexcept { return _type; } //! Tests whether the label has a parent label. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasParent() const noexcept { return _parentId != Globals::kInvalidId; } + //! Returns label's parent id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t parentId() const noexcept { return _parentId; } //! Returns the section where the label was bound. //! //! If the label was not yet bound the return value is `nullptr`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Section* section() const noexcept { return _section; } //! Tests whether the label has name. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasName() const noexcept { return !_name.empty(); } //! Returns the label's name. //! //! \note Local labels will return their local name without their parent part, for example ".L1". + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); } //! Returns size of label's name. //! //! \note Label name is always null terminated, so you can use `strlen()` to get it, however, it's also cached in //! `LabelEntry` itself, so if you want to know the size the fastest way is to call `LabelEntry::nameSize()`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); } //! Returns links associated with this label. + [[nodiscard]] ASMJIT_INLINE_NODEBUG LabelLink* links() const noexcept { return _links; } //! Tests whether the label is bound. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isBound() const noexcept { return _section != nullptr; } + //! Tests whether the label is bound to a the given `sectionId`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isBoundTo(Section* section) const noexcept { return _section == section; } //! Returns the label offset (only useful if the label is bound). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t offset() const noexcept { return _offset; } //! Returns the hash-value of label's name and its parent label (if any). //! //! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function is implemented in `Support::hashString()` //! and `Support::hashRound()`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t hashCode() const noexcept { return _hashCode; } //! \} @@ -776,6 +849,7 @@ public: //! Tests whether the `CodeHolder` has been initialized. //! //! Emitters can be only attached to initialized `CodeHolder` instances. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _environment.isInitialized(); } //! Initializes CodeHolder to hold code described by the given `environment` and `baseAddress`. @@ -805,6 +879,7 @@ public: //! \note This should be only used for AsmJit's purposes. Code holder uses arena allocator to allocate everything, //! so anything allocated through this allocator will be invalidated by \ref CodeHolder::reset() or by CodeHolder's //! destructor. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return const_cast(&_allocator); } //! \} @@ -813,19 +888,27 @@ public: //! \{ //! Returns the target environment information. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; } //! Returns the target architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); } + //! Returns the target sub-architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); } //! Returns the minimum CPU features of the target architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const CpuFeatures& cpuFeatures() const noexcept { return _cpuFeatures; } //! Tests whether a static base-address is set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; } + //! Returns a static base-address or \ref Globals::kNoBaseAddress, if not set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t baseAddress() const noexcept { return _baseAddress; } //! \} @@ -834,6 +917,7 @@ public: //! \{ //! Returns a vector of attached emitters. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& emitters() const noexcept { return _emitters; } //! \} @@ -842,6 +926,7 @@ public: //! \{ //! Returns the attached logger. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; } //! Attaches a `logger` to CodeHolder and propagates it to all attached emitters. ASMJIT_API void setLogger(Logger* logger) noexcept; @@ -852,8 +937,10 @@ public: //! \{ //! Tests whether the CodeHolder has an attached error handler, see \ref ErrorHandler. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; } //! Returns the attached error handler. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; } //! Attach an error handler to this `CodeHolder`. ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept; @@ -881,13 +968,19 @@ public: //! \{ //! Returns an array of `Section*` records. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& sections() const noexcept { return _sections; } + //! Returns an array of `Section*` records sorted according to section order first, then section id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& sectionsByOrder() const noexcept { return _sectionsByOrder; } + //! Returns the number of sections. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t sectionCount() const noexcept { return _sections.size(); } //! Tests whether the given `sectionId` is valid. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isSectionValid(uint32_t sectionId) const noexcept { return sectionId < _sections.size(); } //! Creates a new section and return its pointer in `sectionOut`. @@ -896,19 +989,23 @@ public: ASMJIT_API Error newSection(Section** sectionOut, const char* name, size_t nameSize = SIZE_MAX, SectionFlags flags = SectionFlags::kNone, uint32_t alignment = 1, int32_t order = 0) noexcept; //! Returns a section entry of the given index. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Section* sectionById(uint32_t sectionId) const noexcept { return _sections[sectionId]; } //! Returns section-id that matches the given `name`. //! //! If there is no such section `Section::kInvalidId` is returned. + [[nodiscard]] ASMJIT_API Section* sectionByName(const char* name, size_t nameSize = SIZE_MAX) const noexcept; //! Returns '.text' section (section that commonly represents code). //! //! \note Text section is always the first section in \ref CodeHolder::sections() array. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Section* textSection() const noexcept { return _sections[0]; } //! Tests whether '.addrtab' section exists. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAddressTable() const noexcept { return _addressTableSection != nullptr; } //! Returns '.addrtab' section. @@ -917,10 +1014,12 @@ public: //! addresses that cannot be encoded in instructions like 'jmp' or 'call'. //! //! \note This section is created on demand, the returned pointer can be null. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Section* addressTableSection() const noexcept { return _addressTableSection; } //! Ensures that '.addrtab' section exists (creates it if it doesn't) and //! returns it. Can return `nullptr` on out of memory condition. + [[nodiscard]] ASMJIT_API Section* ensureAddressTableSection() noexcept; //! Used to add an address to an address table. @@ -939,22 +1038,27 @@ public: //! \{ //! Returns array of `LabelEntry*` records. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& labelEntries() const noexcept { return _labelEntries; } //! Returns number of labels created. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t labelCount() const noexcept { return _labelEntries.size(); } //! Tests whether the label having `id` is valid (i.e. created by `newLabelEntry()`). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLabelValid(uint32_t labelId) const noexcept { return labelId < _labelEntries.size(); } //! Tests whether the `label` is valid (i.e. created by `newLabelEntry()`). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { return label.id() < _labelEntries.size(); } //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLabelBound(uint32_t labelId) const noexcept { return isLabelValid(labelId) && _labelEntries[labelId]->isBound(); } @@ -962,16 +1066,19 @@ public: //! Tests whether the `label` is already bound. //! //! Returns `false` if the `label` is not valid. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLabelBound(const Label& label) const noexcept { return isLabelBound(label.id()); } //! Returns LabelEntry of the given label `id`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG LabelEntry* labelEntry(uint32_t labelId) const noexcept { return isLabelValid(labelId) ? _labelEntries[labelId] : static_cast(nullptr); } //! Returns LabelEntry of the given `label`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG LabelEntry* labelEntry(const Label& label) const noexcept { return labelEntry(label.id()); } @@ -980,12 +1087,14 @@ public: //! //! The offset returned is relative to the start of the section. Zero offset is returned for unbound labels, //! which is their initial offset value. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t labelOffset(uint32_t labelId) const noexcept { ASMJIT_ASSERT(isLabelValid(labelId)); return _labelEntries[labelId]->offset(); } //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t labelOffset(const Label& label) const noexcept { return labelOffset(label.id()); } @@ -994,6 +1103,7 @@ public: //! //! \remarks The offset of the section where the label is bound must be valid in order to use this function, //! otherwise the value returned will not be reliable. + [[nodiscard]] inline uint64_t labelOffsetFromBase(uint32_t labelId) const noexcept { ASMJIT_ASSERT(isLabelValid(labelId)); const LabelEntry* le = _labelEntries[labelId]; @@ -1001,6 +1111,7 @@ public: } //! \overload + [[nodiscard]] inline uint64_t labelOffsetFromBase(const Label& label) const noexcept { return labelOffsetFromBase(label.id()); } @@ -1031,6 +1142,7 @@ public: //! //! If the named label doesn't a default constructed \ref Label is returned, //! which has its id set to \ref Globals::kInvalidId. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept { return Label(labelIdByName(name, nameSize, parentId)); } @@ -1038,16 +1150,21 @@ public: //! Returns a label id by name. //! //! If the named label doesn't exist \ref Globals::kInvalidId is returned. + [[nodiscard]] ASMJIT_API uint32_t labelIdByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept; //! Tests whether there are any unresolved label links. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasUnresolvedLinks() const noexcept { return _unresolvedLinkCount != 0; } + //! Returns the number of label links, which are unresolved. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t unresolvedLinkCount() const noexcept { return _unresolvedLinkCount; } //! Creates a new label-link used to store information about yet unbound labels. //! //! Returns `null` if the allocation failed. + [[nodiscard]] ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept; //! Resolves cross-section links (`LabelLink`) associated with each label that was used as a destination in code @@ -1066,11 +1183,15 @@ public: //! \{ //! Tests whether the code contains relocation entries. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRelocEntries() const noexcept { return !_relocations.empty(); } + //! Returns array of `RelocEntry*` records. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& relocEntries() const noexcept { return _relocations; } //! Returns a RelocEntry of the given `id`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RelocEntry* relocEntry(uint32_t id) const noexcept { return _relocations[id]; } //! Creates a new relocation entry of type `relocType`. @@ -1093,6 +1214,7 @@ public: //! \note All sections will be iterated over and the code size returned would represent the minimum code size of //! all combined sections after applying minimum alignment. Code size may decrease after calling `flatten()` and //! `relocateToBase()`. + [[nodiscard]] ASMJIT_API size_t codeSize() const noexcept; //! Relocates the code to the given `baseAddress`. diff --git a/src/asmjit/core/codewriter.cpp b/src/asmjit/core/codewriter.cpp index 2ee5b38..31871a4 100644 --- a/src/asmjit/core/codewriter.cpp +++ b/src/asmjit/core/codewriter.cpp @@ -16,8 +16,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs uint32_t discardLsb = format.immDiscardLsb(); // Invalid offset (should not happen). - if (!bitCount || bitCount > format.valueSize() * 8u) + if (!bitCount || bitCount > format.valueSize() * 8u) { return false; + } uint32_t value; uint32_t u = 0; @@ -27,8 +28,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs // absolute value. if (format.hasSignBit()) { u = uint32_t(offset64 >= 0); - if (u == 0) + if (u == 0) { offset64 = -offset64; + } unsignedLogic = true; } @@ -36,30 +38,35 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs if (unsignedLogic) { if (discardLsb) { ASMJIT_ASSERT(discardLsb <= 32); - if ((offset64 & Support::lsbMask(discardLsb)) != 0) + if ((offset64 & Support::lsbMask(discardLsb)) != 0) { return false; + } offset64 = int64_t(uint64_t(offset64) >> discardLsb); } value = uint32_t(offset64 & Support::lsbMask(bitCount)); - if (value != offset64) + if (value != offset64) { return false; + } } else { // The rest of OffsetType options are all signed. if (discardLsb) { ASMJIT_ASSERT(discardLsb <= 32); - if ((offset64 & Support::lsbMask(discardLsb)) != 0) + if ((offset64 & Support::lsbMask(discardLsb)) != 0) { return false; + } offset64 >>= discardLsb; } - if (!Support::isInt32(offset64)) + if (!Support::isInt32(offset64)) { return false; + } value = uint32_t(int32_t(offset64)); - if (!Support::isEncodableOffset32(int32_t(value), bitCount)) + if (!Support::isEncodableOffset32(int32_t(value), bitCount)) { return false; + } } switch (format.type()) { @@ -72,8 +79,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs // Opcode: {.....|imm:1|..N.N|......|imm:3|....|imm:8} case OffsetType::kThumb32_ADR: { // Sanity checks. - if (format.valueSize() != 4 || bitCount != 12 || bitShift != 0) + if (format.valueSize() != 4 || bitCount != 12 || bitShift != 0) { return false; + } uint32_t imm8 = (value & 0x00FFu); uint32_t imm3 = (value & 0x0700u) << (12 - 8); @@ -88,13 +96,14 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs case OffsetType::kThumb32_BLX: // The calculation is the same as `B`, but the first LSB bit must be zero, so account for that. value <<= 1; - ASMJIT_FALLTHROUGH; + [[fallthrough]]; // Opcode: {....|.|imm[23]|imm[20:11]|..|ja|.|jb|imm[10:0]} case OffsetType::kThumb32_B: { // Sanity checks. - if (format.valueSize() != 4) + if (format.valueSize() != 4) { return false; + } uint32_t ia = (value & 0x0007FFu); uint32_t ib = (value & 0x1FF800u) << (16 - 11); @@ -109,8 +118,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs // Opcode: {....|.|imm[19]|....|imm[16:11]|..|ja|.|jb|imm[10:0]} case OffsetType::kThumb32_BCond: { // Sanity checks. - if (format.valueSize() != 4 || bitCount != 20 || bitShift != 0) + if (format.valueSize() != 4 || bitCount != 20 || bitShift != 0) { return false; + } uint32_t ia = (value & 0x0007FFu); uint32_t ib = (value & 0x01F800u) << (16 - 11); @@ -124,8 +134,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs case OffsetType::kAArch32_ADR: { uint32_t encodedImm; - if (!arm::Utils::encodeAArch32Imm(value, &encodedImm)) + if (!arm::Utils::encodeAArch32Imm(value, &encodedImm)) { return false; + } *dst = (Support::bitMask(22) << u) | (encodedImm << bitShift); return true; @@ -138,8 +149,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs case OffsetType::kAArch32_U23_0To3At0_4To7At8: { // Sanity checks. - if (format.valueSize() != 4 || bitCount != 8 || bitShift != 0) + if (format.valueSize() != 4 || bitCount != 8 || bitShift != 0) { return false; + } uint32_t immLo = (value & 0x0Fu); uint32_t immHi = (value & 0xF0u) << (8 - 4); @@ -150,8 +162,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs case OffsetType::kAArch32_1To24At0_0At24: { // Sanity checks. - if (format.valueSize() != 4 || bitCount != 25 || bitShift != 0) + if (format.valueSize() != 4 || bitCount != 25 || bitShift != 0) { return false; + } uint32_t immLo = (value & 0x0000001u) << 24; uint32_t immHi = (value & 0x1FFFFFEu) >> 1; @@ -163,8 +176,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs case OffsetType::kAArch64_ADR: case OffsetType::kAArch64_ADRP: { // Sanity checks. - if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5) + if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5) { return false; + } uint32_t immLo = value & 0x3u; uint32_t immHi = (value >> 2) & Support::lsbMask(19); @@ -182,8 +196,9 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs uint32_t bitCount = format.immBitCount(); uint32_t discardLsb = format.immDiscardLsb(); - if (!bitCount || bitCount > format.valueSize() * 8u) + if (!bitCount || bitCount > format.valueSize() * 8u) { return false; + } uint64_t value; @@ -191,26 +206,30 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs if (format.type() == OffsetType::kUnsignedOffset) { if (discardLsb) { ASMJIT_ASSERT(discardLsb <= 32); - if ((offset64 & Support::lsbMask(discardLsb)) != 0) + if ((offset64 & Support::lsbMask(discardLsb)) != 0) { return false; + } offset64 = int64_t(uint64_t(offset64) >> discardLsb); } value = uint64_t(offset64) & Support::lsbMask(bitCount); - if (value != uint64_t(offset64)) + if (value != uint64_t(offset64)) { return false; + } } else { // The rest of OffsetType options are all signed. if (discardLsb) { ASMJIT_ASSERT(discardLsb <= 32); - if ((offset64 & Support::lsbMask(discardLsb)) != 0) + if ((offset64 & Support::lsbMask(discardLsb)) != 0) { return false; + } offset64 >>= discardLsb; } - if (!Support::isEncodableOffset64(offset64, bitCount)) + if (!Support::isEncodableOffset64(offset64, bitCount)) { return false; + } value = uint64_t(offset64); } @@ -235,8 +254,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma switch (format.valueSize()) { case 1: { uint32_t mask; - if (!encodeOffset32(&mask, offset64, format)) + if (!encodeOffset32(&mask, offset64, format)) { return false; + } Support::writeU8(dst, uint8_t(Support::readU8(dst) | mask)); return true; @@ -244,8 +264,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma case 2: { uint32_t mask; - if (!encodeOffset32(&mask, offset64, format)) + if (!encodeOffset32(&mask, offset64, format)) { return false; + } Support::writeU16uLE(dst, uint16_t(Support::readU16uLE(dst) | mask)); return true; @@ -263,8 +284,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma case 8: { uint64_t mask; - if (!encodeOffset64(&mask, offset64, format)) + if (!encodeOffset64(&mask, offset64, format)) { return false; + } Support::writeU64uLE(dst, Support::readU64uLE(dst) | mask); return true; diff --git a/src/asmjit/core/codewriter_p.h b/src/asmjit/core/codewriter_p.h index c799241..2fc8415 100644 --- a/src/asmjit/core/codewriter_p.h +++ b/src/asmjit/core/codewriter_p.h @@ -23,10 +23,11 @@ class CodeWriter { public: uint8_t* _cursor; - ASMJIT_FORCE_INLINE explicit CodeWriter(BaseAssembler* a) noexcept + ASMJIT_INLINE_NODEBUG explicit CodeWriter(BaseAssembler* a) noexcept : _cursor(a->_bufferPtr) {} - ASMJIT_FORCE_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept { + [[nodiscard]] + ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept { size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor); if (ASMJIT_UNLIKELY(remainingSpace < n)) { CodeBuffer& buffer = a->_section->_buffer; @@ -38,25 +39,28 @@ public: return kErrorOk; } - ASMJIT_FORCE_INLINE uint8_t* cursor() const noexcept { return _cursor; } - ASMJIT_FORCE_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; } - ASMJIT_FORCE_INLINE void advance(size_t n) noexcept { _cursor += n; } + [[nodiscard]] + ASMJIT_INLINE_NODEBUG uint8_t* cursor() const noexcept { return _cursor; } - ASMJIT_FORCE_INLINE size_t offsetFrom(uint8_t* from) const noexcept { + ASMJIT_INLINE_NODEBUG void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; } + ASMJIT_INLINE_NODEBUG void advance(size_t n) noexcept { _cursor += n; } + + [[nodiscard]] + ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept { ASMJIT_ASSERT(_cursor >= from); return (size_t)(_cursor - from); } template - ASMJIT_FORCE_INLINE void emit8(T val) noexcept { - typedef typename std::make_unsigned::type U; + ASMJIT_INLINE void emit8(T val) noexcept { + using U = std::make_unsigned_t; _cursor[0] = uint8_t(U(val) & U(0xFF)); _cursor++; } template - ASMJIT_FORCE_INLINE void emit8If(T val, Y cond) noexcept { - typedef typename std::make_unsigned::type U; + ASMJIT_INLINE void emit8If(T val, Y cond) noexcept { + using U = std::make_unsigned_t; ASMJIT_ASSERT(size_t(cond) <= 1u); _cursor[0] = uint8_t(U(val) & U(0xFF)); @@ -64,42 +68,42 @@ public: } template - ASMJIT_FORCE_INLINE void emit16uLE(T val) noexcept { - typedef typename std::make_unsigned::type U; + ASMJIT_INLINE void emit16uLE(T val) noexcept { + using U = std::make_unsigned_t; Support::writeU16uLE(_cursor, uint16_t(U(val) & 0xFFFFu)); _cursor += 2; } template - ASMJIT_FORCE_INLINE void emit16uBE(T val) noexcept { - typedef typename std::make_unsigned::type U; + ASMJIT_INLINE void emit16uBE(T val) noexcept { + using U = std::make_unsigned_t; Support::writeU16uBE(_cursor, uint16_t(U(val) & 0xFFFFu)); _cursor += 2; } template - ASMJIT_FORCE_INLINE void emit32uLE(T val) noexcept { - typedef typename std::make_unsigned::type U; + ASMJIT_INLINE void emit32uLE(T val) noexcept { + using U = std::make_unsigned_t; Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu)); _cursor += 4; } template - ASMJIT_FORCE_INLINE void emit32uBE(T val) noexcept { - typedef typename std::make_unsigned::type U; + ASMJIT_INLINE void emit32uBE(T val) noexcept { + using U = std::make_unsigned_t; Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu)); _cursor += 4; } - ASMJIT_FORCE_INLINE void emitData(const void* data, size_t size) noexcept { + ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept { ASMJIT_ASSERT(size != 0); memcpy(_cursor, data, size); _cursor += size; } template - ASMJIT_FORCE_INLINE void emitValueLE(const T& value, size_t size) noexcept { - typedef typename std::make_unsigned::type U; + ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept { + using U = std::make_unsigned_t; ASMJIT_ASSERT(size <= sizeof(T)); U v = U(value); @@ -111,8 +115,8 @@ public: } template - ASMJIT_FORCE_INLINE void emitValueBE(const T& value, size_t size) noexcept { - typedef typename std::make_unsigned::type U; + ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept { + using U = std::make_unsigned_t; ASMJIT_ASSERT(size <= sizeof(T)); U v = U(value); @@ -123,13 +127,13 @@ public: _cursor += size; } - ASMJIT_FORCE_INLINE void emitZeros(size_t size) noexcept { + ASMJIT_INLINE void emitZeros(size_t size) noexcept { ASMJIT_ASSERT(size != 0); memset(_cursor, 0, size); _cursor += size; } - ASMJIT_FORCE_INLINE void remove8(uint8_t* where) noexcept { + ASMJIT_INLINE void remove8(uint8_t* where) noexcept { ASMJIT_ASSERT(where < _cursor); uint8_t* p = where; @@ -139,7 +143,7 @@ public: } template - ASMJIT_FORCE_INLINE void insert8(uint8_t* where, T val) noexcept { + ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept { uint8_t* p = _cursor; while (p != where) { @@ -151,7 +155,7 @@ public: _cursor++; } - ASMJIT_FORCE_INLINE void done(BaseAssembler* a) noexcept { + ASMJIT_INLINE void done(BaseAssembler* a) noexcept { CodeBuffer& buffer = a->_section->_buffer; size_t newSize = (size_t)(_cursor - a->_bufferData); ASMJIT_ASSERT(newSize <= buffer.capacity()); @@ -164,9 +168,13 @@ public: //! Code writer utilities. namespace CodeWriterUtils { +[[nodiscard]] bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept; + +[[nodiscard]] bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept; +[[nodiscard]] bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept; } // {CodeWriterUtils} diff --git a/src/asmjit/core/compiler.cpp b/src/asmjit/core/compiler.cpp index d0c8041..f140471 100644 --- a/src/asmjit/core/compiler.cpp +++ b/src/asmjit/core/compiler.cpp @@ -22,11 +22,11 @@ ASMJIT_BEGIN_NAMESPACE // =================== class GlobalConstPoolPass : public Pass { -public: - typedef Pass Base; -public: ASMJIT_NONCOPYABLE(GlobalConstPoolPass) +public: + using Base = Pass; + GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {} Error run(Zone* zone, Logger* logger) override { @@ -73,27 +73,31 @@ Error BaseCompiler::newFuncNode(FuncNode** out, const FuncSignature& signature) // Initialize the function's detail info. Error err = funcNode->detail().init(signature, environment()); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err); + } // If the Target guarantees greater stack alignment than required by the calling convention // then override it as we can prevent having to perform dynamic stack alignment uint32_t environmentStackAlignment = _environment.stackAlignment(); - if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment) + if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment) { funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment); + } // Initialize the function frame. err = funcNode->_frame.init(funcNode->_funcDetail); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err); + } // Allocate space for function arguments. funcNode->_args = nullptr; if (funcNode->argCount() != 0) { funcNode->_args = _allocator.allocT(funcNode->argCount() * sizeof(FuncNode::ArgPack)); - if (ASMJIT_UNLIKELY(!funcNode->_args)) + if (ASMJIT_UNLIKELY(!funcNode->_args)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack)); } @@ -159,8 +163,9 @@ Error BaseCompiler::endFunc() { FuncNode* func = _func; resetState(); - if (ASMJIT_UNLIKELY(!func)) + if (ASMJIT_UNLIKELY(!func)) { return reportError(DebugUtils::errored(kErrorInvalidState)); + } // Add the local constant pool at the end of the function (if exists). ConstPoolNode* localConstPool = _constPools[uint32_t(ConstPoolScope::kLocal)]; @@ -191,15 +196,17 @@ Error BaseCompiler::newInvokeNode(InvokeNode** out, InstId instId, const Operand node->resetOpRange(1, node->opCapacity()); Error err = node->detail().init(signature, environment()); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err); + } // Skip the allocation if there are no arguments. uint32_t argCount = signature.argCount(); if (argCount) { node->_args = static_cast(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack))); - if (!node->_args) + if (!node->_args) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack)); } @@ -235,15 +242,18 @@ Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature si *out = nullptr; uint32_t index = _vRegArray.size(); - if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount))) + if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount))) { return reportError(DebugUtils::errored(kErrorTooManyVirtRegs)); + } - if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk)) + if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } VirtReg* vReg = _vRegZone.allocZeroedT(); - if (ASMJIT_UNLIKELY(!vReg)) + if (ASMJIT_UNLIKELY(!vReg)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } uint32_t size = TypeUtils::sizeOf(typeId); uint32_t alignment = Support::min(size, 64); @@ -251,10 +261,12 @@ Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature si vReg = new(Support::PlacementNew{vReg}) VirtReg(signature, Operand::indexToVirtId(index), size, alignment, typeId); #ifndef ASMJIT_NO_LOGGING - if (name && name[0] != '\0') + if (name && name[0] != '\0') { vReg->_name.setData(&_dataZone, name, SIZE_MAX); - else + } + else { BaseCompiler_assignGenericName(this, vReg); + } #else DebugUtils::unused(name); #endif @@ -270,8 +282,9 @@ Error BaseCompiler::_newReg(BaseReg* out, TypeId typeId, const char* name) { out->reset(); Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, ®Signature); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err); + } VirtReg* vReg; ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name)); @@ -345,8 +358,9 @@ Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) } } - if (typeId == TypeId::kVoid) + if (typeId == TypeId::kVoid) { return reportError(DebugUtils::errored(kErrorInvalidState)); + } } } else { @@ -354,8 +368,9 @@ Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) } Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, ®Signature); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err); + } VirtReg* vReg; ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name)); @@ -379,17 +394,21 @@ Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) { out->reset(); - if (size == 0) + if (size == 0) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } - if (alignment == 0) + if (alignment == 0) { alignment = 1; + } - if (!Support::isPowerOf2(alignment)) + if (!Support::isPowerOf2(alignment)) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } - if (alignment > 64) + if (alignment > 64) { alignment = 64; + } VirtReg* vReg; ASMJIT_PROPAGATE(newVirtReg(&vReg, TypeId::kVoid, OperandSignature{0}, name)); @@ -408,21 +427,26 @@ Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, c } Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) { - if (!isVirtIdValid(virtId)) + if (!isVirtIdValid(virtId)) { return DebugUtils::errored(kErrorInvalidVirtId); + } - if (newAlignment && !Support::isPowerOf2(newAlignment)) + if (newAlignment && !Support::isPowerOf2(newAlignment)) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } - if (newAlignment > 64) + if (newAlignment > 64) { newAlignment = 64; + } VirtReg* vReg = virtRegById(virtId); - if (newSize) + if (newSize) { vReg->_virtSize = newSize; + } - if (newAlignment) + if (newAlignment) { vReg->_alignment = uint8_t(newAlignment); + } // This is required if the RAPass is already running. There is a chance that a stack-slot has been already // allocated and in that case it has to be updated as well, otherwise we would allocate wrong amount of memory. @@ -438,18 +462,21 @@ Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t new Error BaseCompiler::_newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size) { out->reset(); - if (uint32_t(scope) > 1) + if (uint32_t(scope) > 1) { return reportError(DebugUtils::errored(kErrorInvalidArgument)); + } - if (!_constPools[uint32_t(scope)]) + if (!_constPools[uint32_t(scope)]) { ASMJIT_PROPAGATE(newConstPoolNode(&_constPools[uint32_t(scope)])); + } ConstPoolNode* pool = _constPools[uint32_t(scope)]; size_t off; Error err = pool->add(data, size, off); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { return reportError(err); + } *out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) | OperandSignature::fromMemBaseType(RegType::kLabelTag) | @@ -462,7 +489,9 @@ void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) { if (!reg.isVirtReg()) return; VirtReg* vReg = virtRegById(reg.id()); - if (!vReg) return; + if (!vReg) { + return; + } if (fmt && fmt[0] != '\0') { char buf[128]; @@ -487,8 +516,9 @@ Error BaseCompiler::newJumpNode(JumpNode** out, InstId instId, InstOptions instO uint32_t opCount = 1; *out = node; - if (ASMJIT_UNLIKELY(!node)) + if (ASMJIT_UNLIKELY(!node)) { return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } node = new(Support::PlacementNew{node}) JumpNode(this, instId, instOptions, opCount, annotation); node->setOp(0, o0); diff --git a/src/asmjit/core/compiler.h b/src/asmjit/core/compiler.h index 2414815..60d9569 100644 --- a/src/asmjit/core/compiler.h +++ b/src/asmjit/core/compiler.h @@ -51,7 +51,7 @@ class InvokeNode; class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder { public: ASMJIT_NONCOPYABLE(BaseCompiler) - typedef BaseBuilder Base; + using Base = BaseBuilder; //! \name Members //! \{ @@ -96,6 +96,7 @@ public: ASMJIT_API Error addFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1); //! Returns the current function. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; } //! Creates a new \ref FuncNode with the given `signature` and returns it. @@ -163,31 +164,38 @@ public: ASMJIT_API Error _newRegFmt(BaseReg* ASMJIT_NONNULL(out), const BaseReg& ref, const char* fmt, ...); //! Tests whether the given `id` is a valid virtual register id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVirtIdValid(uint32_t id) const noexcept { uint32_t index = Operand::virtIdToIndex(id); return index < _vRegArray.size(); } + //! Tests whether the given `reg` is a virtual register having a valid id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVirtRegValid(const BaseReg& reg) const noexcept { return isVirtIdValid(reg.id()); } //! Returns \ref VirtReg associated with the given `id`. + [[nodiscard]] inline VirtReg* virtRegById(uint32_t id) const noexcept { ASMJIT_ASSERT(isVirtIdValid(id)); return _vRegArray[Operand::virtIdToIndex(id)]; } //! Returns \ref VirtReg associated with the given `reg`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); } //! Returns \ref VirtReg associated with the given virtual register `index`. //! //! \note This is not the same as virtual register id. The conversion between id and its index is implemented //! by \ref Operand_::virtIdToIndex() and \ref Operand_::indexToVirtId() functions. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; } //! Returns an array of all virtual registers managed by the Compiler. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& virtRegs() const noexcept { return _vRegArray; } //! \name Stack @@ -230,6 +238,7 @@ public: //! \name Jump Annotations //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& jumpAnnotations() const noexcept { return _jumpAnnotations; } @@ -239,6 +248,7 @@ public: //! Returns a new `JumpAnnotation` instance, which can be used to aggregate possible targets of a jump where the //! target is not a label, for example to implement jump tables. + [[nodiscard]] ASMJIT_API JumpAnnotation* newJumpAnnotation(); //! \} @@ -287,15 +297,23 @@ public: //! \{ //! Returns the compiler that owns this JumpAnnotation. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseCompiler* compiler() const noexcept { return _compiler; } + //! Returns the annotation id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t annotationId() const noexcept { return _annotationId; } + //! Returns a vector of label identifiers that lists all targets of the jump. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& labelIds() const noexcept { return _labelIds; } //! Tests whether the given `label` is a target of this JumpAnnotation. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); } + //! Tests whether the given `labelId` is a target of this JumpAnnotation. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); } //! \} @@ -342,9 +360,13 @@ public: //! \{ //! Tests whether this JumpNode has associated a \ref JumpAnnotation. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAnnotation() const noexcept { return _annotation != nullptr; } + //! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG JumpAnnotation* annotation() const noexcept { return _annotation; } + //! Sets the \ref JumpAnnotation associated with this jump to `annotation`. ASMJIT_INLINE_NODEBUG void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; } @@ -446,37 +468,54 @@ public: //! \name Accessors //! Returns function exit `LabelNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG LabelNode* exitNode() const noexcept { return _exitNode; } + //! Returns function exit label. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Label exitLabel() const noexcept { return _exitNode->label(); } //! Returns "End of Func" sentinel node. + [[nodiscard]] ASMJIT_INLINE_NODEBUG SentinelNode* endNode() const noexcept { return _end; } //! Returns function detail. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; } + //! Returns function detail. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; } //! Returns function frame. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncFrame& frame() noexcept { return _frame; } + //! Returns function frame. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FuncFrame& frame() const noexcept { return _frame; } //! Returns function attributes. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _frame.attributes(); } + //! Adds `attrs` to the function attributes. ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _frame.addAttributes(attrs); } //! Returns arguments count. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); } + //! Returns argument packs. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ArgPack* argPacks() const noexcept { return _args; } //! Tests whether the function has a return value. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); } //! Returns argument pack at `argIndex`. + [[nodiscard]] inline ArgPack& argPack(size_t argIndex) const noexcept { ASMJIT_ASSERT(argIndex < argCount()); return _args[argIndex]; @@ -557,12 +596,14 @@ public: } //! Returns an operand at the given `valueIndex`. + [[nodiscard]] inline Operand& operator[](size_t valueIndex) noexcept { ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack); return _data[valueIndex].as(); } //! Returns an operand at the given `valueIndex` (const). + [[nodiscard]] const inline Operand& operator[](size_t valueIndex) const noexcept { ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack); return _data[valueIndex].as(); @@ -601,52 +642,74 @@ public: //! \{ //! Sets the function signature. + [[nodiscard]] inline Error init(const FuncSignature& signature, const Environment& environment) noexcept { return _funcDetail.init(signature, environment); } //! Returns the function detail. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; } + //! Returns the function detail. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; } //! Returns the target operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Operand& target() noexcept { return op(0); } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const Operand& target() const noexcept { return op(0); } //! Returns the number of function return values. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); } + //! Returns the number of function arguments. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); } //! Returns operand pack representing function return value(s). + [[nodiscard]] ASMJIT_INLINE_NODEBUG OperandPack& retPack() noexcept { return _rets; } + //! Returns operand pack representing function return value(s). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const OperandPack& retPack() const noexcept { return _rets; } //! Returns the return value at the given `valueIndex`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; } //! Returns operand pack representing function return value(s). + [[nodiscard]] inline OperandPack& argPack(size_t argIndex) noexcept { ASMJIT_ASSERT(argIndex < argCount()); return _args[argIndex]; } + //! \overload + [[nodiscard]] inline const OperandPack& argPack(size_t argIndex) const noexcept { ASMJIT_ASSERT(argIndex < argCount()); return _args[argIndex]; } //! Returns a function argument at the given `argIndex`. + [[nodiscard]] inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept { ASMJIT_ASSERT(argIndex < argCount()); return _args[argIndex][valueIndex]; } + //! \overload + [[nodiscard]] inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept { ASMJIT_ASSERT(argIndex < argCount()); return _args[argIndex][valueIndex]; @@ -680,7 +743,7 @@ public: class ASMJIT_VIRTAPI FuncPass : public Pass { public: ASMJIT_NONCOPYABLE(FuncPass) - typedef Pass Base; + using Base = Pass; //! \name Construction & Destruction //! \{ @@ -693,6 +756,7 @@ public: //! \{ //! Returns the associated `BaseCompiler`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return static_cast(_cb); } //! \} diff --git a/src/asmjit/core/compilerdefs.h b/src/asmjit/core/compilerdefs.h index 4b2963c..1922560 100644 --- a/src/asmjit/core/compilerdefs.h +++ b/src/asmjit/core/compilerdefs.h @@ -96,18 +96,27 @@ public: //! \{ //! Returns the virtual register id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; } //! Returns the virtual register name. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); } + //! Returns the size of the virtual register name. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); } //! Returns a register signature of this virtual register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; } + //! Returns a virtual register type (maps to the physical register type as well). + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); } + //! Returns a virtual register group (maps to the physical register group as well). + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); } //! Returns a real size of the register this virtual register maps to. @@ -115,23 +124,29 @@ public: //! For example if this is a 128-bit SIMD register used for a scalar single precision floating point value then //! its virtSize would be 4, however, the `regSize` would still say 16 (128-bits), because it's the smallest size //! of that register type. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t regSize() const noexcept { return _signature.size(); } //! Returns the virtual register size. //! //! The virtual register size describes how many bytes the virtual register needs to store its content. It can be //! smaller than the physical register size, see `regSize()`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t virtSize() const noexcept { return _virtSize; } //! Returns the virtual register alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; } //! Returns the virtual register type id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _typeId; } //! Returns the virtual register weight - the register allocator can use it as explicit hint for alloc/spill //! decisions. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t weight() const noexcept { return _weight; } + //! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for //! alloc/spill decisions and initial bin-packing. ASMJIT_INLINE_NODEBUG void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); } @@ -139,17 +154,20 @@ public: //! Returns whether the virtual register is always allocated to a fixed physical register (and never reallocated). //! //! \note This is only used for special purposes and it's mostly internal. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return bool(_isFixed); } //! Tests whether the virtual register is in fact a stack that only uses the virtual register id. //! //! \note It's an error if a stack is accessed as a register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return bool(_isStack); } //! Tests whether this virtual register (or stack) has assigned a stack offset. //! //! If this is a virtual register that was never allocated on stack, it would return false, otherwise if //! it's a virtual register that was spilled or explicitly allocated stack, the return value would be true. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return bool(_hasStackSlot); } //! Assigns a stack offset of this virtual register to `stackOffset` and sets `_hasStackSlot` to true. @@ -159,9 +177,13 @@ public: } //! Tests whether this virtual register has assigned a physical register as a hint to the register allocator. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasHomeIdHint() const noexcept { return _homeIdHint != BaseReg::kIdBad; } + //! Returns a physical register hint, which will be used by the register allocator. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t homeIdHint() const noexcept { return _homeIdHint; } + //! Assigns a physical register hint, which will be used by the register allocator. ASMJIT_INLINE_NODEBUG void setHomeIdHint(uint32_t homeId) noexcept { _homeIdHint = uint8_t(homeId); } //! Resets a physical register hint. @@ -171,14 +193,20 @@ public: //! //! \note Always verify that the stack offset has been assigned by calling \ref hasStackSlot(). The return //! value will be zero when the stack offset was not assigned. + [[nodiscard]] ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return _stackOffset; } //! Tests whether the virtual register has an associated `RAWorkReg` at the moment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasWorkReg() const noexcept { return _workReg != nullptr; } + //! Returns an associated RAWorkReg with this virtual register (only valid during register allocation). + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAWorkReg* workReg() const noexcept { return _workReg; } + //! Associates a RAWorkReg with this virtual register (used by register allocator). ASMJIT_INLINE_NODEBUG void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; } + //! Reset the RAWorkReg association (used by register allocator). ASMJIT_INLINE_NODEBUG void resetWorkReg() noexcept { _workReg = nullptr; } diff --git a/src/asmjit/core/constpool.cpp b/src/asmjit/core/constpool.cpp index 1ebbae1..9e368ec 100644 --- a/src/asmjit/core/constpool.cpp +++ b/src/asmjit/core/constpool.cpp @@ -40,8 +40,9 @@ void ConstPool::reset(Zone* zone) noexcept { static inline ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept { ConstPool::Gap* gap = self->_gapPool; - if (!gap) + if (!gap) { return self->_zone->allocT(); + } self->_gapPool = gap->_next; return gap; @@ -87,8 +88,9 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce // We don't have to check for errors here, if this failed nothing really happened (just the gap won't be // visible) and it will fail again at place where the same check would generate `kErrorOutOfMemory` error. ConstPool::Gap* gap = ConstPool_allocGap(self); - if (!gap) + if (!gap) { return; + } gap->_next = self->_gaps[gapIndex]; self->_gaps[gapIndex] = gap; @@ -102,24 +104,19 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce } Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept { - size_t treeIndex; + constexpr size_t kMaxSize = size_t(1) << (kIndexCount - 1); - if (size == 64) - treeIndex = kIndex64; - else if (size == 32) - treeIndex = kIndex32; - else if (size == 16) - treeIndex = kIndex16; - else if (size == 8) - treeIndex = kIndex8; - else if (size == 4) - treeIndex = kIndex4; - else if (size == 2) - treeIndex = kIndex2; - else if (size == 1) - treeIndex = kIndex1; - else + // Avoid sizes outside of the supported range. + if (ASMJIT_UNLIKELY(size == 0 || size > kMaxSize)) { return DebugUtils::errored(kErrorInvalidArgument); + } + + size_t treeIndex = Support::ctz(size); + + // Avoid sizes, which are not aligned to power of 2. + if (ASMJIT_UNLIKELY((size_t(1) << treeIndex) != size)) { + return DebugUtils::errored(kErrorInvalidArgument); + } ConstPool::Node* node = _tree[treeIndex].get(data); if (node) { @@ -147,8 +144,9 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept ASMJIT_ASSERT(Support::isAligned(offset, size)); gapSize -= size; - if (gapSize > 0) + if (gapSize > 0) { ConstPool_addGap(this, gapOffset, gapSize); + } } gapIndex++; @@ -169,8 +167,9 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept // Add the initial node to the right index. node = ConstPool::Tree::_newNode(_zone, data, size, offset, false); - if (ASMJIT_UNLIKELY(!node)) + if (ASMJIT_UNLIKELY(!node)) { return DebugUtils::errored(kErrorOutOfMemory); + } _tree[treeIndex].insert(node); _alignment = Support::max(_alignment, size); @@ -192,18 +191,16 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept const uint8_t* pData = static_cast(data); for (size_t i = 0; i < pCount; i++, pData += smallerSize) { node = _tree[treeIndex].get(pData); - if (node) continue; + if (node) { + continue; + } node = ConstPool::Tree::_newNode(_zone, pData, smallerSize, offset + (i * smallerSize), true); _tree[treeIndex].insert(node); } } - if (_minItemSize == 0) - _minItemSize = size; - else - _minItemSize = Support::min(_minItemSize, size); - + _minItemSize = !_minItemSize ? size : Support::min(_minItemSize, size); return kErrorOk; } @@ -216,8 +213,9 @@ struct ConstPoolFill { _dataSize(dataSize) {} inline void operator()(const ConstPool::Node* node) noexcept { - if (!node->_shared) + if (!node->_shared) { memcpy(_dst + node->_offset, node->data(), _dataSize); + } } uint8_t* _dst; diff --git a/src/asmjit/core/constpool.h b/src/asmjit/core/constpool.h index 330d1fb..b545611 100644 --- a/src/asmjit/core/constpool.h +++ b/src/asmjit/core/constpool.h @@ -73,6 +73,7 @@ public: _shared(shared), _offset(uint32_t(offset)) {} + [[nodiscard]] ASMJIT_INLINE_NODEBUG void* data() const noexcept { return static_cast(const_cast(this) + 1); } @@ -86,10 +87,12 @@ public: ASMJIT_INLINE_NODEBUG Compare(size_t dataSize) noexcept : _dataSize(dataSize) {} + [[nodiscard]] ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const Node& b) const noexcept { return ::memcmp(a.data(), b.data(), _dataSize); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const void* data) const noexcept { return ::memcmp(a.data(), data, _dataSize); } @@ -114,7 +117,10 @@ public: _size = 0; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } inline void setDataSize(size_t dataSize) noexcept { @@ -122,6 +128,7 @@ public: _dataSize = dataSize; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG Node* get(const void* data) noexcept { Compare cmp(_dataSize); return _tree.get(data, cmp); @@ -166,6 +173,7 @@ public: } } + [[nodiscard]] static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept { Node* node = zone->allocT(Support::alignUp(sizeof(Node) + size, alignof(Node))); if (ASMJIT_UNLIKELY(!node)) return nullptr; @@ -221,12 +229,19 @@ public: //! \{ //! Tests whether the constant-pool is empty. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } + //! Returns the size of the constant-pool in bytes. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } + //! Returns minimum alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _alignment; } + //! Returns the minimum size of all items added to the constant pool. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t minItemSize() const noexcept { return _minItemSize; } //! \} diff --git a/src/asmjit/core/cpuinfo.cpp b/src/asmjit/core/cpuinfo.cpp index 2450543..a06bd16 100644 --- a/src/asmjit/core/cpuinfo.cpp +++ b/src/asmjit/core/cpuinfo.cpp @@ -58,13 +58,6 @@ #include #endif -// Unfortunately when compiling in C++11 mode MSVC would warn about unused functions as -// [[maybe_unused]] attribute is not used in that case (it's used only by C++17 mode and later). -#if defined(_MSC_VER) - #pragma warning(push) - #pragma warning(disable: 4505) // unreferenced local function has been removed. -#endif // _MSC_VER - ASMJIT_BEGIN_NAMESPACE // CpuInfo - Detect - Compatibility @@ -128,7 +121,7 @@ static inline uint32_t detectHWThreadCount() noexcept { namespace x86 { -typedef CpuFeatures::X86 Ext; +using Ext = CpuFeatures::X86; struct cpuid_t { uint32_t eax, ebx, ecx, edx; }; struct xgetbv_t { uint32_t eax, edx; }; @@ -190,9 +183,11 @@ static inline void simplifyCpuVendor(CpuInfo& cpu, uint32_t d0, uint32_t d1, uin }; uint32_t i; - for (i = 0; i < ASMJIT_ARRAY_SIZE(table) - 1; i++) - if (table[i].d[0] == d0 && table[i].d[1] == d1 && table[i].d[2] == d2) + for (i = 0; i < ASMJIT_ARRAY_SIZE(table) - 1; i++) { + if (table[i].d[0] == d0 && table[i].d[1] == d1 && table[i].d[2] == d2) { break; + } + } memcpy(cpu._vendor.str, table[i].normalized, 8); } @@ -207,8 +202,9 @@ static ASMJIT_FAVOR_SIZE void simplifyCpuBrand(char* s) noexcept { s[0] = '\0'; for (;;) { - if (!c) + if (!c) { break; + } if (!(c == ' ' && (prev == '@' || s[1] == ' ' || s[1] == '@' || s[1] == '\0'))) { *d++ = c; @@ -258,11 +254,13 @@ static ASMJIT_FAVOR_SIZE void detectX86Cpu(CpuInfo& cpu) noexcept { uint32_t familyId = (regs.eax >> 8) & 0x0F; // Use extended family and model fields. - if (familyId == 0x06u || familyId == 0x0Fu) + if (familyId == 0x06u || familyId == 0x0Fu) { modelId += (((regs.eax >> 16) & 0x0Fu) << 4); + } - if (familyId == 0x0Fu) + if (familyId == 0x0Fu) { familyId += ((regs.eax >> 20) & 0xFFu); + } cpu._modelId = modelId; cpu._familyId = familyId; @@ -621,7 +619,7 @@ static ASMJIT_FAVOR_SIZE void detectX86Cpu(CpuInfo& cpu) noexcept { namespace arm { // ARM commonly refers to CPU features using FEAT_ prefix, we use Ext:: to make it compatible with other parts. -typedef CpuFeatures::ARM Ext; +using Ext = CpuFeatures::ARM; // CpuInfo - Detect - ARM - OS Kernel Version // ========================================== @@ -632,14 +630,12 @@ struct UNameKernelVersion { inline bool atLeast(int major, int minor, int patch = 0) const noexcept { if (parts[0] >= major) { - if (parts[0] > major) + if (parts[0] > major) { return true; + } if (parts[1] >= minor) { - if (parts[1] > minor) - return true; - - return parts[2] >= patch; + return parts[1] > minor ? true : parts[2] >= patch; } } @@ -647,14 +643,15 @@ struct UNameKernelVersion { } }; -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static UNameKernelVersion getUNameKernelVersion() noexcept { UNameKernelVersion ver{}; ver.parts[0] = -1; utsname buffer; - if (uname(&buffer) != 0) + if (uname(&buffer) != 0) { return ver; + } size_t count = 0; char* p = buffer.release; @@ -662,8 +659,9 @@ static UNameKernelVersion getUNameKernelVersion() noexcept { uint32_t c = uint8_t(*p); if (c >= uint32_t('0') && c <= uint32_t('9')) { ver.parts[count] = int(strtol(p, &p, 10)); - if (++count == 3) + if (++count == 3) { break; + } } else if (c == '.' || c == '-') { p++; @@ -680,13 +678,13 @@ static UNameKernelVersion getUNameKernelVersion() noexcept { // CpuInfo - Detect - ARM - Baseline Features of ARM Architectures // =============================================================== -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void populateBaseAArch32Features(CpuFeatures::ARM& features) noexcept { // No baseline flags at the moment. DebugUtils::unused(features); } -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void populateBaseAArch64Features(CpuFeatures::ARM& features) noexcept { // AArch64 is based on ARMv8.0 and later. features.add(Ext::kARMv6); @@ -711,40 +709,40 @@ static inline void populateBaseARMFeatures(CpuInfo& cpu) noexcept { // ================================================================ // Populates mandatory ARMv8.[v]A features. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void populateARMv8AFeatures(CpuFeatures::ARM& features, uint32_t v) noexcept { switch (v) { default: - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 9: // ARMv8.9 features.add(Ext::kCLRBHB, Ext::kCSSC, Ext::kPRFMSLC, Ext::kSPECRES2, Ext::kRAS2); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 8: // ARMv8.8 features.add(Ext::kHBC, Ext::kMOPS, Ext::kNMI); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 7: // ARMv8.7 features.add(Ext::kHCX, Ext::kPAN3, Ext::kWFXT, Ext::kXS); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 6: // ARMv8.6 features.add(Ext::kAMU1_1, Ext::kBF16, Ext::kECV, Ext::kFGT, Ext::kI8MM); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 5: // ARMv8.5 features.add(Ext::kBTI, Ext::kCSV2, Ext::kDPB2, Ext::kFLAGM2, Ext::kFRINTTS, Ext::kSB, Ext::kSPECRES, Ext::kSSBS); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 4: // ARMv8.4 features.add(Ext::kAMU1, Ext::kDIT, Ext::kDOTPROD, Ext::kFLAGM, Ext::kLRCPC2, Ext::kLSE2, Ext::kMPAM, Ext::kNV, Ext::kSEL2, Ext::kTLBIOS, Ext::kTLBIRANGE, Ext::kTRF); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 3: // ARMv8.3 features.add(Ext::kCCIDX, Ext::kFCMA, Ext::kJSCVT, Ext::kLRCPC, Ext::kPAUTH); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 2: // ARMv8.2 features.add(Ext::kDPB, Ext::kPAN2, Ext::kRAS, Ext::kUAO); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 1: // ARMv8.1 features.add(Ext::kCRC32, Ext::kLOR, Ext::kLSE, Ext::kPAN, Ext::kRDM, Ext::kVHE); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 0: // ARMv8.0 features.add(Ext::kASIMD, Ext::kFP, Ext::kIDIVA, Ext::kVFP_D32); break; @@ -752,21 +750,21 @@ static ASMJIT_FAVOR_SIZE void populateARMv8AFeatures(CpuFeatures::ARM& features, } // Populates mandatory ARMv9.[v] features. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void populateARMv9AFeatures(CpuFeatures::ARM& features, uint32_t v) noexcept { populateARMv8AFeatures(features, v <= 4u ? 5u + v : 9u); switch (v) { default: - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 4: // ARMv9.4 - based on ARMv8.9. - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 3: // ARMv9.3 - based on ARMv8.8. - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 2: // ARMv9.2 - based on ARMv8.7. - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 1: // ARMv9.1 - based on ARMv8.6. - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case 0: // ARMv9.0 - based on ARMv8.5. features.add(Ext::kRME, Ext::kSVE, Ext::kSVE2); break; @@ -780,44 +778,45 @@ static ASMJIT_FAVOR_SIZE void populateARMv9AFeatures(CpuFeatures::ARM& features, // of the registers so it's an implementation that can theoretically be tested / used in mocks. // Merges a feature that contains 0b1111 when it doesn't exist and starts at 0b0000 when it does. -ASMJIT_MAYBE_UNUSED -static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeatureNA(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, +[[maybe_unused]] +static ASMJIT_INLINE void mergeAArch64CPUIDFeatureNA( + CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f0, Ext::Id f1 = Ext::kNone, Ext::Id f2 = Ext::kNone, Ext::Id f3 = Ext::kNone) noexcept { uint32_t val = uint32_t((regBits >> offset) & 0xFu); - - // If val == 0b1111 then the feature is not implemented in this case (some early extensions). - if (val == 0xFu) + if (val == 0xFu) { + // If val == 0b1111 then the feature is not implemented in this case (some early extensions). return; + } - if (f0 != Ext::kNone) features.add(f0); - if (f1 != Ext::kNone) features.addIf(val >= 1, f1); - if (f2 != Ext::kNone) features.addIf(val >= 2, f2); - if (f3 != Ext::kNone) features.addIf(val >= 3, f3); + features.addIf(f0 != Ext::kNone, f0); + features.addIf(f1 != Ext::kNone && val >= 1, f1); + features.addIf(f2 != Ext::kNone && val >= 2, f2); + features.addIf(f3 != Ext::kNone && val >= 3, f3); } // Merges a feature identified by a single bit at `offset`. -ASMJIT_MAYBE_UNUSED -static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature1B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1) noexcept { +[[maybe_unused]] +static ASMJIT_INLINE void mergeAArch64CPUIDFeature1B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1) noexcept { features.addIf((regBits & (uint64_t(1) << offset)) != 0, f1); } // Merges a feature-list starting from 0b01 when it does (0b00 means feature not supported). -ASMJIT_MAYBE_UNUSED -static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature2B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1, Ext::Id f2, Ext::Id f3) noexcept { +[[maybe_unused]] +static ASMJIT_INLINE void mergeAArch64CPUIDFeature2B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1, Ext::Id f2, Ext::Id f3) noexcept { uint32_t val = uint32_t((regBits >> offset) & 0x3u); - if (f1 != Ext::kNone) features.addIf(val >= 1, f1); - if (f2 != Ext::kNone) features.addIf(val >= 2, f2); - if (f3 != Ext::kNone) features.addIf(val == 3, f3); + features.addIf(f1 != Ext::kNone && val >= 1, f1); + features.addIf(f2 != Ext::kNone && val >= 2, f2); + features.addIf(f3 != Ext::kNone && val == 3, f3); } // Merges a feature-list starting from 0b0001 when it does (0b0000 means feature not supported). -ASMJIT_MAYBE_UNUSED -static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, +[[maybe_unused]] +static ASMJIT_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1, Ext::Id f2 = Ext::kNone, Ext::Id f3 = Ext::kNone, @@ -826,16 +825,15 @@ static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& fea uint32_t val = uint32_t((regBits >> offset) & 0xFu); // if val == 0 it means that this feature is not supported. - - if (f1 != Ext::kNone) features.addIf(val >= 1, f1); - if (f2 != Ext::kNone) features.addIf(val >= 2, f2); - if (f3 != Ext::kNone) features.addIf(val >= 3, f3); - if (f4 != Ext::kNone) features.addIf(val >= 4, f4); + features.addIf(f1 != Ext::kNone && val >= 1, f1); + features.addIf(f2 != Ext::kNone && val >= 2, f2); + features.addIf(f3 != Ext::kNone && val >= 3, f3); + features.addIf(f4 != Ext::kNone && val >= 4, f4); } // Merges a feature that is identified by an exact bit-combination of 4 bits. -ASMJIT_MAYBE_UNUSED -static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, uint32_t value, Ext::Id f1) noexcept { +[[maybe_unused]] +static ASMJIT_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, uint32_t value, Ext::Id f1) noexcept { features.addIf(uint32_t((regBits >> offset) & 0xFu) == value, f1); } @@ -846,7 +844,7 @@ static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& fea #define MERGE_FEATURE_4S(identifier, reg, offset, ...) mergeAArch64CPUIDFeature4S(cpu.features().arm(), reg, offset, __VA_ARGS__) // Detects features based on the content of ID_AA64PFR0_EL1 and ID_AA64PFR1_EL1 registers. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(CpuInfo& cpu, uint64_t fpr0, uint64_t fpr1) noexcept { // ID_AA64PFR0_EL1 // =============== @@ -911,12 +909,13 @@ static inline void detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(CpuInfo& cpu, uint32_t mpamMain = uint32_t((fpr0 >> 40) & 0xFu); uint32_t mpamFrac = uint32_t((fpr1 >> 16) & 0xFu); - if (mpamMain || mpamFrac) + if (mpamMain || mpamFrac) { cpu.features().arm().add(Ext::kMPAM); + } } // Detects features based on the content of ID_AA64ISAR0_EL1 and ID_AA64ISAR1_EL1 registers. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(CpuInfo& cpu, uint64_t isar0, uint64_t isar1) noexcept { // ID_AA64ISAR0_EL1 // ================ @@ -965,7 +964,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(CpuInfo& cp } // Detects features based on the content of ID_AA64ISAR2_EL1 register. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64ISAR2(CpuInfo& cpu, uint64_t isar2) noexcept { MERGE_FEATURE_4B("WFxT bits [3:0]" , isar2, 0, Ext::kNone, Ext::kWFXT); MERGE_FEATURE_4B("RPRES bits [7:4]" , isar2, 4, Ext::kRPRES); @@ -988,7 +987,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR2(CpuInfo& cpu, uint64_ // TODO: This register is not accessed at the moment. #if 0 // Detects features based on the content of ID_AA64ISAR3_EL1register. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64ISAR3(CpuInfo& cpu, uint64_t isar3) noexcept { // ID_AA64ISAR3_EL1 // ================ @@ -999,7 +998,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR3(CpuInfo& cpu, uint64_ } #endif -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64MMFR0(CpuInfo& cpu, uint64_t mmfr0) noexcept { // ID_AA64MMFR0_EL1 // ================ @@ -1022,7 +1021,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR0(CpuInfo& cpu, uint64_ MERGE_FEATURE_4B("ECV bits [63:60]" , mmfr0, 60, Ext::kECV); } -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64MMFR1(CpuInfo& cpu, uint64_t mmfr1) noexcept { // ID_AA64MMFR1_EL1 // ================ @@ -1051,7 +1050,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR1(CpuInfo& cpu, uint64_ MERGE_FEATURE_4B("ECBHB bits [63:60]" , mmfr1, 60, Ext::kECBHB); } -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64MMFR2(CpuInfo& cpu, uint64_t mmfr2) noexcept { // ID_AA64MMFR2_EL1 // ================ @@ -1082,7 +1081,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR2(CpuInfo& cpu, uint64_ } // Detects features based on the content of ID_AA64ZFR0_EL1 register. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64ZFR0(CpuInfo& cpu, uint64_t zfr0) noexcept { MERGE_FEATURE_4B("SVEver bits [3:0]" , zfr0, 0, Ext::kSVE2, Ext::kSVE2_1); MERGE_FEATURE_4B("AES bits [7:4]" , zfr0, 4, Ext::kSVE_AES, Ext::kSVE_PMULL128); @@ -1096,7 +1095,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ZFR0(CpuInfo& cpu, uint64_t MERGE_FEATURE_4B("F64MM bits [59:56]" , zfr0, 56, Ext::kSVE_F64MM); } -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void detectAArch64FeaturesViaCPUID_AA64SMFR0(CpuInfo& cpu, uint64_t smfr0) noexcept { MERGE_FEATURE_1B("SF8DP2 bit [28]" , smfr0, 29, Ext::kSSVE_FP8DOT2); MERGE_FEATURE_1B("SF8DP4 bit [29]" , smfr0, 29, Ext::kSSVE_FP8DOT4); @@ -1143,9 +1142,9 @@ enum class AppleFamilyId : uint32_t { kEVEREST_SAWTOOTH = 0X8765EDEAu // Apple A16. }; -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE bool detectARMFeaturesViaAppleFamilyId(CpuInfo& cpu) noexcept { - typedef AppleFamilyId Id; + using Id = AppleFamilyId; CpuFeatures::ARM& features = cpu.features().arm(); switch (cpu.familyId()) { @@ -1219,7 +1218,7 @@ static ASMJIT_FAVOR_SIZE bool detectARMFeaturesViaAppleFamilyId(CpuInfo& cpu) no // target it was compiled to. #if ASMJIT_ARCH_ARM == 32 -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void detectAArch32FeaturesViaCompilerFlags(CpuInfo& cpu) noexcept { DebugUtils::unused(cpu); @@ -1257,7 +1256,7 @@ static ASMJIT_FAVOR_SIZE void detectAArch32FeaturesViaCompilerFlags(CpuInfo& cpu #endif // ASMJIT_ARCH_ARM == 32 #if ASMJIT_ARCH_ARM == 64 -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCompilerFlags(CpuInfo& cpu) noexcept { DebugUtils::unused(cpu); @@ -1413,7 +1412,7 @@ static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCompilerFlags(CpuInfo& cpu } #endif // ASMJIT_ARCH_ARM == 64 -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void detectARMFeaturesViaCompilerFlags(CpuInfo& cpu) noexcept { #if ASMJIT_ARCH_ARM == 32 detectAArch32FeaturesViaCompilerFlags(cpu); @@ -1426,7 +1425,7 @@ static ASMJIT_FAVOR_SIZE void detectARMFeaturesViaCompilerFlags(CpuInfo& cpu) no // ===================================================== // Postprocesses AArch32 features. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void postProcessAArch32Features(CpuFeatures::ARM& features) noexcept { DebugUtils::unused(features); } @@ -1434,22 +1433,26 @@ static ASMJIT_FAVOR_SIZE void postProcessAArch32Features(CpuFeatures::ARM& featu // Postprocesses AArch64 features. // // The only reason to use this function is to deduce some flags from others. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void postProcessAArch64Features(CpuFeatures::ARM& features) noexcept { - if (features.hasFP16()) + if (features.hasFP16()) { features.add(Ext::kFP16CONV); + } - if (features.hasMTE3()) + if (features.hasMTE3()) { features.add(Ext::kMTE2); + } - if (features.hasMTE2()) + if (features.hasMTE2()) { features.add(Ext::kMTE); + } - if (features.hasSSBS2()) + if (features.hasSSBS2()) { features.add(Ext::kSSBS); + } } -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void postProcessARMCpuInfo(CpuInfo& cpu) noexcept { #if ASMJIT_ARCH_ARM == 32 postProcessAArch32Features(cpu.features().arm()); @@ -1466,7 +1469,7 @@ static ASMJIT_FAVOR_SIZE void postProcessARMCpuInfo(CpuInfo& cpu) noexcept { // Since the register ID is encoded with the instruction we have to create a function for each register ID to read. #define ASMJIT_AARCH64_DEFINE_CPUID_READ_FN(func, regId) \ -ASMJIT_MAYBE_UNUSED \ +[[maybe_unused]] \ static inline uint64_t func() noexcept { \ uint64_t output; \ __asm__ __volatile__("mrs %0, " #regId : "=r"(output)); \ @@ -1494,17 +1497,12 @@ ASMJIT_AARCH64_DEFINE_CPUID_READ_FN(aarch64ReadZFR0, S3_0_C0_C4_4) // ID_AA64ZFR // // References: // - https://docs.kernel.org/arch/arm64/cpu-feature-registers.html -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCPUID(CpuInfo& cpu) noexcept { populateBaseARMFeatures(cpu); - detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, - aarch64ReadPFR0(), - aarch64ReadPFR1()); - - detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, - aarch64ReadISAR0(), - aarch64ReadISAR1()); + detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, aarch64ReadPFR0(), aarch64ReadPFR1()); + detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, aarch64ReadISAR0(), aarch64ReadISAR1()); // TODO: Fix this on FreeBSD - I don't know what kernel version allows to access the registers below... @@ -1867,13 +1865,8 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept { const char sysctlCpuPath[] = "machdep.cpu0.cpu_id"; if (sysctlbyname(sysctlCpuPath, ®s, &len, nullptr, 0) == 0) { - detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, - regs.r64(Regs::k64_AA64PFR0), - regs.r64(Regs::k64_AA64PFR1)); - - detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, - regs.r64(Regs::k64_AA64ISAR0), - regs.r64(Regs::k64_AA64ISAR1)); + detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, regs.r64(Regs::k64_AA64PFR0), regs.r64(Regs::k64_AA64PFR1)); + detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, regs.r64(Regs::k64_AA64ISAR0), regs.r64(Regs::k64_AA64ISAR1)); // TODO: AA64ISAR2 should be added when it's provided by NetBSD. // detectAArch64FeaturesViaCPUID_AA64ISAR2(cpu, regs.r64Regs::k64_AA64ISAR2)); @@ -1925,18 +1918,12 @@ static uint64_t openbsdReadAArch64CPUID(OpenBSDAArch64CPUID id) noexcept { } static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept { - typedef OpenBSDAArch64CPUID ID; + using ID = OpenBSDAArch64CPUID; populateBaseARMFeatures(cpu); - detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, - openbsdReadAArch64CPUID(ID::kAA64PFR0), - openbsdReadAArch64CPUID(ID::kAA64PFR1)); - - detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, - openbsdReadAArch64CPUID(ID::kAA64ISAR0), - openbsdReadAArch64CPUID(ID::kAA64ISAR1)); - + detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, openbsdReadAArch64CPUID(ID::kAA64PFR0), openbsdReadAArch64CPUID(ID::kAA64PFR1)); + detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, openbsdReadAArch64CPUID(ID::kAA64ISAR0), openbsdReadAArch64CPUID(ID::kAA64ISAR1)); detectAArch64FeaturesViaCPUID_AA64ISAR2(cpu, openbsdReadAArch64CPUID(ID::kAA64ISAR2)); detectAArch64FeaturesViaCPUID_AA64MMFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64MMFR0)); detectAArch64FeaturesViaCPUID_AA64MMFR1(cpu, openbsdReadAArch64CPUID(ID::kAA64MMFR1)); @@ -1946,8 +1933,9 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept { if (cpu.features().arm().hasAny(Ext::kSVE, Ext::kSME)) { detectAArch64FeaturesViaCPUID_AA64ZFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64ZFR0)); - if (cpu.features().arm().hasSME()) + if (cpu.features().arm().hasSME()) { detectAArch64FeaturesViaCPUID_AA64SMFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64SMFR0)); + } } postProcessARMCpuInfo(cpu); @@ -1989,15 +1977,16 @@ static ASMJIT_FAVOR_SIZE long appleDetectARMFeatureViaSysctl(AppleFeatureType ty memcpy(sysctlName + prefixSize, featureName, featureNameSize + 1u); // Include NULL terminator. long val = 0; - if (appleSysctlByName(sysctlName, &val)) + if (appleSysctlByName(sysctlName, &val)) { return val; + } } return 0; } static ASMJIT_FAVOR_SIZE void appleDetectARMFeaturesViaSysctl(CpuInfo& cpu) noexcept { - typedef AppleFeatureType FT; + using FT = AppleFeatureType; // Based on: // - https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics @@ -2069,8 +2058,9 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept { memcpy(cpu._vendor.str, "APPLE", 6); bool cpuFeaturesPopulated = detectARMFeaturesViaAppleFamilyId(cpu); - if (!cpuFeaturesPopulated) + if (!cpuFeaturesPopulated) { appleDetectARMFeaturesViaSysctl(cpu); + } postProcessARMCpuInfo(cpu); } @@ -2125,8 +2115,4 @@ const CpuInfo& CpuInfo::host() noexcept { return cpuInfoGlobal; } -#if defined(_MSC_VER) - #pragma warning(pop) -#endif // _MSC_VER - ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/cpuinfo.h b/src/asmjit/core/cpuinfo.h index d365618..a0eabaf 100644 --- a/src/asmjit/core/cpuinfo.h +++ b/src/asmjit/core/cpuinfo.h @@ -26,18 +26,21 @@ public: //! \{ //! \cond INTERNAL - enum : uint32_t { - kMaxFeatures = 256, - kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits - }; + static inline constexpr uint32_t kMaxFeatures = 256; + static inline constexpr uint32_t kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits; //! \endcond - //! A word that is used to represents feature bits. - typedef Support::BitWord BitWord; - //! Iterator that can iterate all CPU features set. - typedef Support::BitVectorIterator Iterator; + //! \} - typedef Support::Array Bits; + //! \name Types + //! \{ + + //! A word that is used to represents feature bits. + using BitWord = Support::BitWord; + //! Iterator that can iterate all CPU features set. + using Iterator = Support::BitVectorIterator; + + using Bits = Support::Array; //! \} @@ -57,7 +60,10 @@ public: //! \name Overloaded Operators //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator==(const Data& other) const noexcept { return equals(other); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const Data& other) const noexcept { return !equals(other); } //! \} @@ -66,21 +72,28 @@ public: //! \{ //! Returns true if there are no features set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _bits.aggregate(0) == 0; } //! Returns all features as array of bitwords (see \ref Support::BitWord). + [[nodiscard]] ASMJIT_INLINE_NODEBUG BitWord* bits() noexcept { return _bits.data(); } + //! Returns all features as array of bitwords (const). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const BitWord* bits() const noexcept { return _bits.data(); } //! Returns the number of BitWords returned by \ref bits(). + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t bitWordCount() const noexcept { return kNumBitWords; } //! Returns \ref Support::BitVectorIterator, that can be used to iterate over all features efficiently. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Iterator iterator() const noexcept { return Iterator(_bits.data(), kNumBitWords); } //! Tests whether the feature `featureId` is present. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool has(const FeatureId& featureId) const noexcept { ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures); @@ -92,6 +105,7 @@ public: //! \cond NONE template + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAny(const FeatureId& featureId) const noexcept { return has(featureId); } @@ -101,11 +115,13 @@ public: //! //! \note This is a variadic function template that can be used with multiple features. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAny(const FeatureId& featureId, Args&&... otherFeatureIds) const noexcept { return bool(unsigned(has(featureId)) | unsigned(hasAny(std::forward(otherFeatureIds)...))); } //! Tests whether all features as defined by `other` are present. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAll(const Data& other) const noexcept { uint32_t result = 1; for (uint32_t i = 0; i < kNumBitWords; i++) @@ -123,7 +139,7 @@ public: //! Adds the given CPU `featureId` to the list of features. template - ASMJIT_INLINE_NODEBUG void add(const FeatureId& featureId) noexcept { + inline void add(const FeatureId& featureId) noexcept { ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures); uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits; @@ -133,13 +149,13 @@ public: } template - ASMJIT_INLINE_NODEBUG void add(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { + inline void add(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { add(featureId); add(std::forward(otherFeatureIds)...); } template - ASMJIT_INLINE_NODEBUG void addIf(bool condition, const FeatureId& featureId) noexcept { + inline void addIf(bool condition, const FeatureId& featureId) noexcept { ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures); uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits; @@ -149,14 +165,14 @@ public: } template - ASMJIT_INLINE_NODEBUG void addIf(bool condition, const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { + inline void addIf(bool condition, const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { addIf(condition, featureId); addIf(condition, std::forward(otherFeatureIds)...); } //! Removes the given CPU `featureId` from the list of features. template - ASMJIT_INLINE_NODEBUG void remove(const FeatureId& featureId) noexcept { + inline void remove(const FeatureId& featureId) noexcept { ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures); uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits; @@ -166,7 +182,7 @@ public: } template - ASMJIT_INLINE_NODEBUG void remove(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { + inline void remove(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { remove(featureId); remove(std::forward(otherFeatureIds)...); } @@ -1104,6 +1120,7 @@ public: //! Returns the host CPU information. //! //! \note The returned reference is global - it's setup only once and then shared. + [[nodiscard]] ASMJIT_API static const CpuInfo& host() noexcept; //! \} @@ -1134,15 +1151,18 @@ public: //! \{ //! Returns the CPU architecture this information relates to. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } //! Returns the CPU sub-architecture this information relates to. + [[nodiscard]] ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; } //! Returns whether the CPU was detected successfully. //! //! If the returned value is false it means that AsmJit either failed to detect the CPU or it doesn't have //! implementation targeting the host architecture and operating system. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool wasDetected() const noexcept { return _wasDetected; } //! Returns the CPU family ID. @@ -1152,6 +1172,7 @@ public: //! - Family identifier matches the FamilyId read by using CPUID. //! - ARM: //! - Apple - returns Apple Family identifier returned by sysctlbyname("hw.cpufamily"). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t familyId() const noexcept { return _familyId; } //! Returns the CPU model ID. @@ -1159,6 +1180,7 @@ public: //! The information provided depends on architecture and OS: //! - X86: //! - Model identifier matches the ModelId read by using CPUID. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t modelId() const noexcept { return _modelId; } //! Returns the CPU brand id. @@ -1166,6 +1188,7 @@ public: //! The information provided depends on architecture and OS: //! - X86: //! - Brand identifier matches the BrandId read by using CPUID. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t brandId() const noexcept { return _brandId; } //! Returns the CPU stepping. @@ -1173,6 +1196,7 @@ public: //! The information provided depends on architecture and OS: //! - X86: //! - Stepping identifier matches the Stepping information read by using CPUID. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t stepping() const noexcept { return _stepping; } //! Returns the processor type. @@ -1180,34 +1204,46 @@ public: //! The information provided depends on architecture and OS: //! - X86: //! - Processor type identifier matches the ProcessorType read by using CPUID. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t processorType() const noexcept { return _processorType; } //! Returns the maximum number of logical processors. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; } //! Returns the size of a CPU cache line. //! //! On a multi-architecture system this should return the smallest cache line of all CPUs. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t cacheLineSize() const noexcept { return _cacheLineSize; } //! Returns number of hardware threads available. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t hwThreadCount() const noexcept { return _hwThreadCount; } //! Returns a CPU vendor string. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* vendor() const noexcept { return _vendor.str; } + //! Tests whether the CPU vendor string is equal to `s`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVendor(const char* s) const noexcept { return _vendor.equals(s); } //! Returns a CPU brand string. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* brand() const noexcept { return _brand.str; } //! Returns CPU features. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CpuFeatures& features() noexcept { return _features; } + //! Returns CPU features (const). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const CpuFeatures& features() const noexcept { return _features; } //! Tests whether the CPU has the given `feature`. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFeature(const FeatureId& featureId) const noexcept { return _features.has(featureId); } //! Adds the given CPU `featureId` to the list of features. diff --git a/src/asmjit/core/emithelper.cpp b/src/asmjit/core/emithelper.cpp index 36b984f..f534e33 100644 --- a/src/asmjit/core/emithelper.cpp +++ b/src/asmjit/core/emithelper.cpp @@ -23,22 +23,27 @@ static void dumpFuncValue(String& sb, Arch arch, const FuncValue& value) noexcep Formatter::formatTypeId(sb, value.typeId()); sb.append('@'); - if (value.isIndirect()) + if (value.isIndirect()) { sb.append('['); + } - if (value.isReg()) + if (value.isReg()) { Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId()); - else if (value.isStack()) + } + else if (value.isStack()) { sb.appendFormat("[%d]", value.stackOffset()); - else + } + else { sb.append(""); + } - if (value.isIndirect()) + if (value.isIndirect()) { sb.append(']'); + } } static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept { - typedef FuncArgsContext::Var Var; + using Var = FuncArgsContext::Var; Arch arch = ctx.arch(); uint32_t varCount = ctx.varCount(); @@ -53,8 +58,9 @@ static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept { sb.append(" <- "); dumpFuncValue(sb, arch, cur); - if (var.isDone()) + if (var.isDone()) { sb.append(" {Done}"); + } sb.append('\n'); } @@ -83,8 +89,8 @@ Error BaseEmitHelper::emitArgMove(const BaseReg& dst_, TypeId dstTypeId, const O // =================================== ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) { - typedef FuncArgsContext::Var Var; - typedef FuncArgsContext::WorkData WorkData; + using Var = FuncArgsContext::Var; + using WorkData = FuncArgsContext::WorkData; enum WorkFlags : uint32_t { kWorkNone = 0x00, @@ -118,10 +124,12 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram BaseReg sa = sp; if (frame.hasDynamicAlignment()) { - if (frame.hasPreservedFP()) + if (frame.hasPreservedFP()) { sa.setId(archTraits.fpRegId()); - else + } + else { sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId()); + } } // Register to stack and stack to stack moves must be first as now we have @@ -135,8 +143,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram for (uint32_t varId = 0; varId < varCount; varId++) { Var& var = ctx._vars[varId]; - if (!var.out.isStack()) + if (!var.out.isStack()) { continue; + } FuncValue& cur = var.cur; FuncValue& out = var.out; @@ -169,13 +178,15 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram // we follow the rule that IntToInt moves will use GP regs with possibility to signature or zero extend, // and all other moves will either use GP or VEC regs depending on the size of the move. OperandSignature signature = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId()); - if (ASMJIT_UNLIKELY(!signature.isValid())) + if (ASMJIT_UNLIKELY(!signature.isValid())) { return DebugUtils::errored(kErrorInvalidState); + } WorkData& wd = workData[signature.regGroup()]; RegMask availableRegs = wd.availableRegs(); - if (ASMJIT_UNLIKELY(!availableRegs)) + if (ASMJIT_UNLIKELY(!availableRegs)) { return DebugUtils::errored(kErrorInvalidState); + } uint32_t availableId = Support::ctz(availableRegs); reg.setSignatureAndId(signature, availableId); @@ -183,8 +194,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId())); } - if (cur.isIndirect() && cur.isReg()) + if (cur.isIndirect() && cur.isReg()) { workData[RegGroup::kGp].unassign(varId, cur.regId()); + } // Register to stack move. ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId())); @@ -198,8 +210,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram for (;;) { for (uint32_t varId = 0; varId < varCount; varId++) { Var& var = ctx._vars[varId]; - if (var.isDone() || !var.cur.isReg()) + if (var.isDone() || !var.cur.isReg()) { continue; + } FuncValue& cur = var.cur; FuncValue& out = var.out; @@ -224,13 +237,15 @@ EmitMove: BaseReg(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId())); // Only reassign if this is not a sign/zero extension that happens on the same in/out register. - if (curId != outId) + if (curId != outId) { wd.reassign(varId, outId, curId); + } cur.initReg(out.regType(), outId, out.typeId()); - if (outId == out.regId()) + if (outId == out.regId()) { var.markDone(); + } workFlags |= kWorkDidSome | kWorkPending; } else { @@ -241,20 +256,21 @@ EmitMove: // Only few architectures provide swap operations, and only for few register groups. if (archTraits.hasInstRegSwap(curGroup)) { RegType highestType = Support::max(cur.regType(), altVar.cur.regType()); - if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16)) + if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16)) { highestType = RegType::kGp32; + } OperandSignature signature = archTraits.regTypeToSignature(highestType); - ASMJIT_PROPAGATE( - emitRegSwap(BaseReg(signature, outId), BaseReg(signature, curId))); + ASMJIT_PROPAGATE(emitRegSwap(BaseReg(signature, outId), BaseReg(signature, curId))); wd.swap(varId, curId, altId, outId); cur.setRegId(outId); var.markDone(); altVar.cur.setRegId(curId); - if (altVar.out.isInitialized()) + if (altVar.out.isInitialized()) { altVar.markDone(); + } workFlags |= kWorkDidSome; } else { @@ -262,8 +278,9 @@ EmitMove: RegMask availableRegs = wd.availableRegs(); if (availableRegs) { RegMask inOutRegs = wd.dstRegs(); - if (availableRegs & ~inOutRegs) + if (availableRegs & ~inOutRegs) { availableRegs &= ~inOutRegs; + } outId = Support::ctz(availableRegs); goto EmitMove; } @@ -279,12 +296,14 @@ EmitMove: } } - if (!(workFlags & kWorkPending)) + if (!(workFlags & kWorkPending)) { break; + } // If we did nothing twice it means that something is really broken. - if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed) + if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed) { return DebugUtils::errored(kErrorInvalidState); + } workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed; } @@ -294,8 +313,9 @@ EmitMove: if (ctx._hasStackSrc) { uint32_t iterCount = 1; - if (frame.hasDynamicAlignment() && !frame.hasPreservedFP()) + if (frame.hasDynamicAlignment() && !frame.hasPreservedFP()) { sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId()); + } // Base address of all arguments passed by stack. BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id()))); @@ -303,8 +323,9 @@ EmitMove: for (uint32_t iter = 0; iter < iterCount; iter++) { for (uint32_t varId = 0; varId < varCount; varId++) { Var& var = ctx._vars[varId]; - if (var.isDone()) + if (var.isDone()) { continue; + } if (var.cur.isStack()) { ASMJIT_ASSERT(var.out.isReg()); diff --git a/src/asmjit/core/emithelper_p.h b/src/asmjit/core/emithelper_p.h index 2240ae6..9d75550 100644 --- a/src/asmjit/core/emithelper_p.h +++ b/src/asmjit/core/emithelper_p.h @@ -26,7 +26,9 @@ public: ASMJIT_INLINE_NODEBUG virtual ~BaseEmitHelper() noexcept = default; + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseEmitter* emitter() const noexcept { return _emitter; } + ASMJIT_INLINE_NODEBUG void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; } //! Emits a pure move operation between two registers or the same type or between a register and its home diff --git a/src/asmjit/core/emitter.cpp b/src/asmjit/core/emitter.cpp index 4c855ea..01843b7 100644 --- a/src/asmjit/core/emitter.cpp +++ b/src/asmjit/core/emitter.cpp @@ -52,17 +52,21 @@ static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) n hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate); } - if (emitComments) + if (emitComments) { self->_addEmitterFlags(EmitterFlags::kLogComments); - else + } + else { self->_clearEmitterFlags(EmitterFlags::kLogComments); + } // The reserved option tells emitter (Assembler/Builder/Compiler) that there may be either a border // case (CodeHolder not attached, for example) or that logging or validation is required. - if (self->_code == nullptr || self->_logger || hasDiagnosticOptions) + if (self->_code == nullptr || self->_logger || hasDiagnosticOptions) { self->_forcedInstOptions |= InstOptions::kReserved; - else + } + else { self->_forcedInstOptions &= ~InstOptions::kReserved; + } } // BaseEmitter - Diagnostic Options @@ -90,8 +94,9 @@ void BaseEmitter::setLogger(Logger* logger) noexcept { else { _logger = nullptr; _clearEmitterFlags(EmitterFlags::kOwnLogger); - if (_code) + if (_code) { _logger = _code->logger(); + } } BaseEmitter_updateForcedOptions(this); #else @@ -110,16 +115,18 @@ void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept { else { _errorHandler = nullptr; _clearEmitterFlags(EmitterFlags::kOwnErrorHandler); - if (_code) + if (_code) { _errorHandler = _code->errorHandler(); + } } } Error BaseEmitter::reportError(Error err, const char* message) { ErrorHandler* eh = _errorHandler; if (eh) { - if (!message) + if (!message) { message = DebugUtils::errorAsString(err); + } eh->handleError(err, message, this); } return err; @@ -318,8 +325,9 @@ Error BaseEmitter::comment(const char* data, size_t size) { Error BaseEmitter::commentf(const char* fmt, ...) { if (!hasEmitterFlag(EmitterFlags::kLogComments)) { - if (!hasEmitterFlag(EmitterFlags::kAttached)) + if (!hasEmitterFlag(EmitterFlags::kAttached)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } return kErrorOk; } @@ -341,8 +349,9 @@ Error BaseEmitter::commentf(const char* fmt, ...) { Error BaseEmitter::commentv(const char* fmt, va_list ap) { if (!hasEmitterFlag(EmitterFlags::kLogComments)) { - if (!hasEmitterFlag(EmitterFlags::kAttached)) + if (!hasEmitterFlag(EmitterFlags::kAttached)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } return kErrorOk; } @@ -377,11 +386,13 @@ Error BaseEmitter::onAttach(CodeHolder* code) noexcept { Error BaseEmitter::onDetach(CodeHolder* code) noexcept { DebugUtils::unused(code); - if (!hasOwnLogger()) + if (!hasOwnLogger()) { _logger = nullptr; + } - if (!hasOwnErrorHandler()) + if (!hasOwnErrorHandler()) { _errorHandler = nullptr; + } _clearEmitterFlags(~kEmitterPreservedFlags); _instructionAlignment = uint8_t(0); @@ -403,11 +414,13 @@ void BaseEmitter::onSettingsUpdated() noexcept { // Only called when attached to CodeHolder by CodeHolder. ASMJIT_ASSERT(_code != nullptr); - if (!hasOwnLogger()) + if (!hasOwnLogger()) { _logger = _code->logger(); + } - if (!hasOwnErrorHandler()) + if (!hasOwnErrorHandler()) { _errorHandler = _code->errorHandler(); + } BaseEmitter_updateForcedOptions(this); } diff --git a/src/asmjit/core/emitter.h b/src/asmjit/core/emitter.h index 3053721..f811e5f 100644 --- a/src/asmjit/core/emitter.h +++ b/src/asmjit/core/emitter.h @@ -257,18 +257,18 @@ public: //! //! These are typically shared between Assembler/Builder/Compiler of a single backend. struct Funcs { - typedef Error (ASMJIT_CDECL* EmitProlog)(BaseEmitter* emitter, const FuncFrame& frame); - typedef Error (ASMJIT_CDECL* EmitEpilog)(BaseEmitter* emitter, const FuncFrame& frame); - typedef Error (ASMJIT_CDECL* EmitArgsAssignment)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args); + using EmitProlog = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame); + using EmitEpilog = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame); + using EmitArgsAssignment = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args); - typedef Error (ASMJIT_CDECL* FormatInstruction)( + using FormatInstruction = Error (ASMJIT_CDECL*)( String& sb, FormatFlags formatFlags, const BaseEmitter* emitter, Arch arch, - const BaseInst& inst, const Operand_* operands, size_t opCount) ASMJIT_NOEXCEPT_TYPE; + const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept; - typedef Error (ASMJIT_CDECL* ValidateFunc)(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) ASMJIT_NOEXCEPT_TYPE; + using ValidateFunc = Error (ASMJIT_CDECL*)(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept; //! Emit prolog implementation. EmitProlog emitProlog; @@ -306,9 +306,11 @@ public: //! \{ template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* as() noexcept { return reinterpret_cast(this); } template + [[nodiscard]] ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return reinterpret_cast(this); } //! \} @@ -317,24 +319,37 @@ public: //! \{ //! Returns the type of this emitter, see `EmitterType`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG EmitterType emitterType() const noexcept { return _emitterType; } + //! Returns emitter flags , see `Flags`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG EmitterFlags emitterFlags() const noexcept { return _emitterFlags; } //! Tests whether the emitter inherits from `BaseAssembler`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAssembler() const noexcept { return _emitterType == EmitterType::kAssembler; } + //! Tests whether the emitter inherits from `BaseBuilder`. //! //! \note Both Builder and Compiler emitters would return `true`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isBuilder() const noexcept { return uint32_t(_emitterType) >= uint32_t(EmitterType::kBuilder); } + //! Tests whether the emitter inherits from `BaseCompiler`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isCompiler() const noexcept { return _emitterType == EmitterType::kCompiler; } //! Tests whether the emitter has the given `flag` enabled. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasEmitterFlag(EmitterFlags flag) const noexcept { return Support::test(_emitterFlags, flag); } + //! Tests whether the emitter is finalized. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFinalized() const noexcept { return hasEmitterFlag(EmitterFlags::kFinalized); } + //! Tests whether the emitter is destroyed (only used during destruction). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isDestroyed() const noexcept { return hasEmitterFlag(EmitterFlags::kDestroyed); } //! \} @@ -353,27 +368,37 @@ public: //! \{ //! Returns the CodeHolder this emitter is attached to. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CodeHolder* code() const noexcept { return _code; } //! Returns the target environment. //! //! The returned \ref Environment reference matches \ref CodeHolder::environment(). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; } //! Tests whether the target architecture is 32-bit. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return environment().is32Bit(); } + //! Tests whether the target architecture is 64-bit. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return environment().is64Bit(); } //! Returns the target architecture type. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); } + //! Returns the target architecture sub-type. + [[nodiscard]] ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); } //! Returns the target architecture's GP register size (4 or 8 bytes). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return environment().registerSize(); } //! Returns a signature of a native general purpose register (either 32-bit or 64-bit depending on the architecture). + [[nodiscard]] ASMJIT_INLINE_NODEBUG OperandSignature gpSignature() const noexcept { return _gpSignature; } //! Returns instruction alignment. @@ -382,6 +407,7 @@ public: //! - X86 and X86_64 - instruction alignment is 1 //! - AArch32 - instruction alignment is 4 in A32 mode and 2 in THUMB mode. //! - AArch64 - instruction alignment is 4 + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t instructionAlignment() const noexcept { return _instructionAlignment; } //! \} @@ -390,6 +416,7 @@ public: //! \{ //! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _code != nullptr; } //! Finalizes this emitter. @@ -407,18 +434,21 @@ public: //! \{ //! Tests whether the emitter has a logger. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasLogger() const noexcept { return _logger != nullptr; } //! Tests whether the emitter has its own logger. //! //! Own logger means that it overrides the possible logger that may be used by \ref CodeHolder this emitter is //! attached to. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasOwnLogger() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnLogger); } //! Returns the logger this emitter uses. //! //! The returned logger is either the emitter's own logger or it's logger used by \ref CodeHolder this emitter //! is attached to. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; } //! Sets or resets the logger of the emitter. @@ -440,18 +470,21 @@ public: //! \{ //! Tests whether the emitter has an error handler attached. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; } //! Tests whether the emitter has its own error handler. //! //! Own error handler means that it overrides the possible error handler that may be used by \ref CodeHolder this //! emitter is attached to. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnErrorHandler); } //! Returns the error handler this emitter uses. //! //! The returned error handler is either the emitter's own error handler or it's error handler used by //! \ref CodeHolder this emitter is attached to. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; } //! Sets or resets the error handler of the emitter. @@ -472,8 +505,11 @@ public: //! \{ //! Returns encoding options. + [[nodiscard]] ASMJIT_INLINE_NODEBUG EncodingOptions encodingOptions() const noexcept { return _encodingOptions; } + //! Tests whether the encoding `option` is set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasEncodingOption(EncodingOptions option) const noexcept { return Support::test(_encodingOptions, option); } //! Enables the given encoding `options`. @@ -487,9 +523,11 @@ public: //! \{ //! Returns the emitter's diagnostic options. + [[nodiscard]] ASMJIT_INLINE_NODEBUG DiagnosticOptions diagnosticOptions() const noexcept { return _diagnosticOptions; } //! Tests whether the given `option` is present in the emitter's diagnostic options. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); } //! Activates the given diagnostic `options`. @@ -527,35 +565,49 @@ public: //! Forced instruction options are merged with next instruction options before the instruction is encoded. These //! options have some bits reserved that are used by error handling, logging, and instruction validation purposes. //! Other options are globals that affect each instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstOptions forcedInstOptions() const noexcept { return _forcedInstOptions; } //! Returns options of the next instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstOptions instOptions() const noexcept { return _instOptions; } + //! Returns options of the next instruction. ASMJIT_INLINE_NODEBUG void setInstOptions(InstOptions options) noexcept { _instOptions = options; } + //! Adds options of the next instruction. ASMJIT_INLINE_NODEBUG void addInstOptions(InstOptions options) noexcept { _instOptions |= options; } + //! Resets options of the next instruction. ASMJIT_INLINE_NODEBUG void resetInstOptions() noexcept { _instOptions = InstOptions::kNone; } //! Tests whether the extra register operand is valid. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); } + //! Returns an extra operand that will be used by the next instruction (architecture specific). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; } + //! Sets an extra operand that will be used by the next instruction (architecture specific). ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); } + //! Sets an extra operand that will be used by the next instruction (architecture specific). ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); } + //! Resets an extra operand that will be used by the next instruction (architecture specific). ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); } //! Returns comment/annotation of the next instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; } + //! Sets comment/annotation of the next instruction. //! //! \note This string is set back to null by `_emit()`, but until that it has to remain valid as the Emitter is not //! required to make a copy of it (and it would be slow to do that for each instruction). ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; } + //! Resets the comment/annotation to nullptr. ASMJIT_INLINE_NODEBUG void resetInlineComment() noexcept { _inlineComment = nullptr; } @@ -581,6 +633,7 @@ public: //! Grabs the current emitter state and resets the emitter state at the same time, returning the state the emitter //! had before the state was reset. + [[nodiscard]] ASMJIT_INLINE_NODEBUG State _grabState() noexcept { State s{_instOptions | _forcedInstOptions, _extraReg, _inlineComment}; resetState(); @@ -604,13 +657,19 @@ public: //! \{ //! Creates a new label. + [[nodiscard]] ASMJIT_API virtual Label newLabel(); + //! Creates a new named label. + [[nodiscard]] ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId); //! Creates a new anonymous label with a name, which can only be used for debugging purposes. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Label newAnonymousLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kAnonymous); } + //! Creates a new external label. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kExternal); } //! Returns `Label` by `name`. @@ -619,6 +678,7 @@ public: //! //! \note This function doesn't trigger ErrorHandler in case the name is invalid or no such label exist. You must //! always check the validity of the `Label` returned. + [[nodiscard]] ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept; //! Binds the `label` to the current position of the current section. @@ -627,8 +687,11 @@ public: ASMJIT_API virtual Error bind(const Label& label); //! Tests whether the label `id` is valid (i.e. registered). + [[nodiscard]] ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept; + //! Tests whether the `label` is valid (i.e. registered). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); } //! \} @@ -676,7 +739,7 @@ public: //! Similar to \ref emit(), but emits instruction with both instruction options and extra register, followed //! by an array of `operands`. - ASMJIT_FORCE_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) { + ASMJIT_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) { setInstOptions(inst.options()); setExtraReg(inst.extraReg()); return _emitOpArray(inst.id(), operands, opCount); @@ -794,6 +857,7 @@ public: //! Called after the emitter was attached to `CodeHolder`. ASMJIT_API virtual Error onAttach(CodeHolder* ASMJIT_NONNULL(code)) noexcept; + //! Called after the emitter was detached from `CodeHolder`. ASMJIT_API virtual Error onDetach(CodeHolder* ASMJIT_NONNULL(code)) noexcept; diff --git a/src/asmjit/core/emitterutils.cpp b/src/asmjit/core/emitterutils.cpp index d0a6872..888d079 100644 --- a/src/asmjit/core/emitterutils.cpp +++ b/src/asmjit/core/emitterutils.cpp @@ -88,10 +88,12 @@ void logInstructionEmitted( sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode)); self->_funcs.formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount); - if (Support::test(formatFlags, FormatFlags::kMachineCode)) + if (Support::test(formatFlags, FormatFlags::kMachineCode)) { finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment()); - else + } + else { finishFormattedLine(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inlineComment()); + } logger->log(sb); } diff --git a/src/asmjit/core/emitterutils_p.h b/src/asmjit/core/emitterutils_p.h index 8b6e1e0..b4faae3 100644 --- a/src/asmjit/core/emitterutils_p.h +++ b/src/asmjit/core/emitterutils_p.h @@ -31,7 +31,8 @@ enum kOpIndex : uint32_t { kOp5 = 2 }; -static ASMJIT_FORCE_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept { +[[nodiscard]] +static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept { uint32_t opCount = 0; if (opExt[kOp3].isNone()) { @@ -49,7 +50,7 @@ static ASMJIT_FORCE_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, cons return opCount; } -static ASMJIT_FORCE_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept { +static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept { dst[0].copyFrom(o0); dst[1].copyFrom(o1); dst[2].copyFrom(o2); diff --git a/src/asmjit/core/environment.cpp b/src/asmjit/core/environment.cpp index 9a694af..fa0b1c9 100644 --- a/src/asmjit/core/environment.cpp +++ b/src/asmjit/core/environment.cpp @@ -35,8 +35,9 @@ uint32_t Environment::stackAlignment() const noexcept { return 16u; } - if (isFamilyARM()) + if (isFamilyARM()) { return 8; + } // Bail to 4-byte alignment if we don't know. return 4; diff --git a/src/asmjit/core/environment.h b/src/asmjit/core/environment.h index c3678dc..a66586f 100644 --- a/src/asmjit/core/environment.h +++ b/src/asmjit/core/environment.h @@ -225,13 +225,13 @@ public: //! \{ //! Creates a default initialized environment (all values either unknown or set to safe defaults). - ASMJIT_INLINE_NODEBUG constexpr Environment() noexcept = default; + ASMJIT_INLINE_CONSTEXPR Environment() noexcept = default; //! Creates a copy of `other` instance. - ASMJIT_INLINE_NODEBUG constexpr Environment(const Environment& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR Environment(const Environment& other) noexcept = default; //! Creates \ref Environment initialized to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`, //! and `floatABI`. - ASMJIT_INLINE_NODEBUG constexpr explicit Environment( + ASMJIT_INLINE_CONSTEXPR explicit Environment( Arch arch, SubArch subArch = SubArch::kUnknown, Vendor vendor = Vendor::kUnknown, @@ -251,7 +251,7 @@ public: //! //! The returned environment should precisely match the target host architecture, sub-architecture, platform, //! and ABI. - static ASMJIT_INLINE_NODEBUG Environment host() noexcept { + static ASMJIT_INLINE_CONSTEXPR Environment host() noexcept { return Environment(Arch::kHost, SubArch::kHost, Vendor::kHost, Platform::kHost, PlatformABI::kHost, ObjectFormat::kUnknown, FloatABI::kHost); } @@ -262,7 +262,10 @@ public: ASMJIT_INLINE_NODEBUG Environment& operator=(const Environment& other) noexcept = default; - ASMJIT_INLINE_NODEBUG bool operator==(const Environment& other) const noexcept { return equals(other); } + [[nodiscard]] + ASMJIT_INLINE_NODEBUG bool operator==(const Environment& other) const noexcept { return equals(other); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const Environment& other) const noexcept { return !equals(other); } //! \} @@ -273,6 +276,7 @@ public: //! Tests whether the environment is not set up. //! //! Returns true if all members are zero, and thus unknown. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { // Unfortunately compilers won't optimize fields are checked one by one... return _packed() == 0; @@ -280,10 +284,12 @@ public: //! Tests whether the environment is initialized, which means it must have //! a valid architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _arch != Arch::kUnknown; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t _packed() const noexcept { uint64_t x; memcpy(&x, this, 8); @@ -294,21 +300,35 @@ public: ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Environment{}; } //! Tests whether this environment is equal to `other`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool equals(const Environment& other) const noexcept { return _packed() == other._packed(); } //! Returns the architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } + //! Returns the sub-architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; } + //! Returns vendor. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vendor vendor() const noexcept { return _vendor; } + //! Returns target's platform or operating system. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Platform platform() const noexcept { return _platform; } + //! Returns target's ABI. + [[nodiscard]] ASMJIT_INLINE_NODEBUG PlatformABI platformABI() const noexcept { return _platformABI; } + //! Returns target's object format. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ObjectFormat objectFormat() const noexcept { return _objectFormat; } + //! Returns floating point ABI. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FloatABI floatABI() const noexcept { return _floatABI; } //! Initializes \ref Environment to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`, @@ -333,57 +353,99 @@ public: } //! Tests whether this environment describes a 32-bit X86. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchX86() const noexcept { return _arch == Arch::kX86; } + //! Tests whether this environment describes a 64-bit X86. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchX64() const noexcept { return _arch == Arch::kX64; } + //! Tests whether this environment describes a 32-bit ARM. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchARM() const noexcept { return isArchARM(_arch); } + //! Tests whether this environment describes a 32-bit ARM in THUMB mode. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchThumb() const noexcept { return isArchThumb(_arch); } + //! Tests whether this environment describes a 64-bit X86. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchAArch64() const noexcept { return isArchAArch64(_arch); } + //! Tests whether this environment describes a 32-bit MIPS. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchMIPS32() const noexcept { return isArchMIPS32(_arch); } + //! Tests whether this environment describes a 64-bit MIPS. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchMIPS64() const noexcept { return isArchMIPS64(_arch); } + //! Tests whether this environment describes a 32-bit RISC-V. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchRISCV32() const noexcept { return _arch == Arch::kRISCV32; } + //! Tests whether this environment describes a 64-bit RISC-V. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isArchRISCV64() const noexcept { return _arch == Arch::kRISCV64; } //! Tests whether the architecture is 32-bit. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return is32Bit(_arch); } + //! Tests whether the architecture is 64-bit. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return is64Bit(_arch); } //! Tests whether the architecture is little endian. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLittleEndian() const noexcept { return isLittleEndian(_arch); } + //! Tests whether the architecture is big endian. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isBigEndian() const noexcept { return isBigEndian(_arch); } //! Tests whether this architecture is of X86 family. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFamilyX86() const noexcept { return isFamilyX86(_arch); } + //! Tests whether this architecture family is ARM, THUMB, or AArch64. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFamilyARM() const noexcept { return isFamilyARM(_arch); } + //! Tests whether this architecture family is AArch32 (ARM or THUMB). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFamilyAArch32() const noexcept { return isFamilyAArch32(_arch); } + //! Tests whether this architecture family is AArch64. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFamilyAArch64() const noexcept { return isFamilyAArch64(_arch); } + //! Tests whether this architecture family is MISP or MIPS64. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); } + //! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); } //! Tests whether the environment platform is Windows. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isPlatformWindows() const noexcept { return _platform == Platform::kWindows; } + //! Tests whether the environment platform is Linux. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isPlatformLinux() const noexcept { return _platform == Platform::kLinux; } + //! Tests whether the environment platform is Hurd. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isPlatformHurd() const noexcept { return _platform == Platform::kHurd; } + //! Tests whether the environment platform is Haiku. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isPlatformHaiku() const noexcept { return _platform == Platform::kHaiku; } //! Tests whether the environment platform is any BSD. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isPlatformBSD() const noexcept { return _platform == Platform::kFreeBSD || _platform == Platform::kOpenBSD || @@ -392,6 +454,7 @@ public: } //! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isPlatformApple() const noexcept { return _platform == Platform::kOSX || _platform == Platform::kIOS || @@ -400,16 +463,23 @@ public: } //! Tests whether the ABI is MSVC. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMSVC() const noexcept { return _platformABI == PlatformABI::kMSVC; } + //! Tests whether the ABI is GNU. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isGNU() const noexcept { return _platformABI == PlatformABI::kGNU; } + //! Tests whether the ABI is GNU. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isDarwin() const noexcept { return _platformABI == PlatformABI::kDarwin; } //! Returns a calculated stack alignment for this environment. + [[nodiscard]] ASMJIT_API uint32_t stackAlignment() const noexcept; //! Returns a native register size of this architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); } //! Sets the architecture to `arch`. @@ -433,90 +503,108 @@ public: //! \name Static Utilities //! \{ + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isDefinedArch(Arch arch) noexcept { return uint32_t(arch) <= uint32_t(Arch::kMaxValue); } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isValidArch(Arch arch) noexcept { return arch != Arch::kUnknown && uint32_t(arch) <= uint32_t(Arch::kMaxValue); } //! Tests whether the given architecture `arch` is 32-bit. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool is32Bit(Arch arch) noexcept { return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == uint32_t(Arch::k32BitMask); } //! Tests whether the given architecture `arch` is 64-bit. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool is64Bit(Arch arch) noexcept { return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == 0; } //! Tests whether the given architecture `arch` is little endian. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isLittleEndian(Arch arch) noexcept { return uint32_t(arch) < uint32_t(Arch::kBigEndian); } //! Tests whether the given architecture `arch` is big endian. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isBigEndian(Arch arch) noexcept { return uint32_t(arch) >= uint32_t(Arch::kBigEndian); } //! Tests whether the given architecture is Thumb or Thumb_BE. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isArchThumb(Arch arch) noexcept { return arch == Arch::kThumb || arch == Arch::kThumb_BE; } //! Tests whether the given architecture is ARM or ARM_BE. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isArchARM(Arch arch) noexcept { return arch == Arch::kARM || arch == Arch::kARM_BE; } //! Tests whether the given architecture is AArch64 or AArch64_BE. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isArchAArch64(Arch arch) noexcept { return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE; } //! Tests whether the given architecture is MIPS32_LE or MIPS32_BE. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isArchMIPS32(Arch arch) noexcept { return arch == Arch::kMIPS32_LE || arch == Arch::kMIPS32_BE; } //! Tests whether the given architecture is MIPS64_LE or MIPS64_BE. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isArchMIPS64(Arch arch) noexcept { return arch == Arch::kMIPS64_LE || arch == Arch::kMIPS64_BE; } //! Tests whether the given architecture family is X86 or X64. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isFamilyX86(Arch arch) noexcept { return arch == Arch::kX86 || arch == Arch::kX64; } //! Tests whether the given architecture family is AArch32 (ARM or THUMB). + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isFamilyAArch32(Arch arch) noexcept { return isArchARM(arch) || isArchThumb(arch); } //! Tests whether the given architecture family is AArch64. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isFamilyAArch64(Arch arch) noexcept { return isArchAArch64(arch); } //! Tests whether the given architecture family is ARM, THUMB, or AArch64. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isFamilyARM(Arch arch) noexcept { return isFamilyAArch32(arch) || isFamilyAArch64(arch); } //! Tests whether the given architecture family is MIPS or MIPS64. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isFamilyMIPS(Arch arch) noexcept { return isArchMIPS32(arch) || isArchMIPS64(arch); } //! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit). + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isFamilyRISCV(Arch arch) noexcept { return arch == Arch::kRISCV32 || arch == Arch::kRISCV64; } //! Returns a native general purpose register size from the given architecture. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t registerSizeFromArch(Arch arch) noexcept { return is32Bit(arch) ? 4u : 8u; } diff --git a/src/asmjit/core/formatter.cpp b/src/asmjit/core/formatter.cpp index 0f12e41..79caae1 100644 --- a/src/asmjit/core/formatter.cpp +++ b/src/asmjit/core/formatter.cpp @@ -51,11 +51,13 @@ static const char wordNameTable[][8] = { Error formatTypeId(String& sb, TypeId typeId) noexcept { - if (typeId == TypeId::kVoid) + if (typeId == TypeId::kVoid) { return sb.append("void"); + } - if (!TypeUtils::isValid(typeId)) + if (!TypeUtils::isValid(typeId)) { return sb.append("unknown"); + } const char* typeName = nullptr; uint32_t typeSize = TypeUtils::sizeOf(typeId); @@ -103,13 +105,15 @@ Error formatFeature( uint32_t featureId) noexcept { #if !defined(ASMJIT_NO_X86) - if (Environment::isFamilyX86(arch)) + if (Environment::isFamilyX86(arch)) { return x86::FormatterInternal::formatFeature(sb, featureId); + } #endif #if !defined(ASMJIT_NO_AARCH64) - if (Environment::isFamilyARM(arch)) + if (Environment::isFamilyARM(arch)) { return arm::FormatterInternal::formatFeature(sb, featureId); + } #endif return kErrorInvalidArch; @@ -125,26 +129,31 @@ Error formatLabel( if (emitter && emitter->code()) { const LabelEntry* le = emitter->code()->labelEntry(labelId); - if (ASMJIT_UNLIKELY(!le)) + if (ASMJIT_UNLIKELY(!le)) { return sb.appendFormat("", labelId); + } if (le->hasName()) { if (le->hasParent()) { uint32_t parentId = le->parentId(); const LabelEntry* pe = emitter->code()->labelEntry(parentId); - if (ASMJIT_UNLIKELY(!pe)) + if (ASMJIT_UNLIKELY(!pe)) { ASMJIT_PROPAGATE(sb.appendFormat("", labelId)); - else if (ASMJIT_UNLIKELY(!pe->hasName())) + } + else if (ASMJIT_UNLIKELY(!pe->hasName())) { ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId)); - else + } + else { ASMJIT_PROPAGATE(sb.append(pe->name())); + } ASMJIT_PROPAGATE(sb.append('.')); } - if (le->type() == LabelType::kAnonymous) + if (le->type() == LabelType::kAnonymous) { ASMJIT_PROPAGATE(sb.appendFormat("L%u@", labelId)); + } return sb.append(le->name()); } } @@ -161,13 +170,15 @@ Error formatRegister( uint32_t regId) noexcept { #if !defined(ASMJIT_NO_X86) - if (Environment::isFamilyX86(arch)) + if (Environment::isFamilyX86(arch)) { return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId); + } #endif #if !defined(ASMJIT_NO_AARCH64) - if (Environment::isFamilyARM(arch)) + if (Environment::isFamilyARM(arch)) { return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId); + } #endif return kErrorInvalidArch; @@ -181,13 +192,15 @@ Error formatOperand( const Operand_& op) noexcept { #if !defined(ASMJIT_NO_X86) - if (Environment::isFamilyX86(arch)) + if (Environment::isFamilyX86(arch)) { return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op); + } #endif #if !defined(ASMJIT_NO_AARCH64) - if (Environment::isFamilyARM(arch)) + if (Environment::isFamilyARM(arch)) { return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op); + } #endif return kErrorInvalidArch; @@ -201,12 +214,14 @@ ASMJIT_API Error formatDataType( { DebugUtils::unused(formatFlags); - if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue))) + if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue))) { return DebugUtils::errored(kErrorInvalidArch); + } uint32_t typeSize = TypeUtils::sizeOf(typeId); - if (typeSize == 0 || typeSize > 8) + if (typeSize == 0 || typeSize > 8) { return DebugUtils::errored(kErrorInvalidState); + } uint32_t typeSizeLog2 = Support::ctz(typeSize); return sb.append(wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]); @@ -220,8 +235,9 @@ static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSiz for (size_t i = 0; i < itemCount; i++) { uint64_t v = 0; - if (i != 0) + if (i != 0) { ASMJIT_PROPAGATE(sb.append(", ", 2)); + } switch (typeSize) { case 1: v = data[0]; break; @@ -241,16 +257,18 @@ Error formatData( String& sb, FormatFlags formatFlags, Arch arch, - TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) noexcept -{ + TypeId typeId, const void* data, size_t itemCount, size_t repeatCount +) noexcept { DebugUtils::unused(formatFlags); - if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch))) + if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch))) { return DebugUtils::errored(kErrorInvalidArch); + } uint32_t typeSize = TypeUtils::sizeOf(typeId); - if (typeSize == 0) + if (typeSize == 0) { return DebugUtils::errored(kErrorInvalidState); + } if (!Support::isPowerOf2(typeSize)) { itemCount *= typeSize; @@ -265,8 +283,9 @@ Error formatData( uint32_t typeSizeLog2 = Support::ctz(typeSize); const char* wordName = wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]; - if (repeatCount > 1) + if (repeatCount > 1) { ASMJIT_PROPAGATE(sb.appendFormat(".repeat %zu ", repeatCount)); + } return formatDataHelper(sb, wordName, typeSize, static_cast(data), itemCount); } @@ -279,13 +298,15 @@ Error formatInstruction( const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept { #if !defined(ASMJIT_NO_X86) - if (Environment::isFamilyX86(arch)) + if (Environment::isFamilyX86(arch)) { return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount); + } #endif #if !defined(ASMJIT_NO_AARCH64) - if (Environment::isFamilyAArch64(arch)) + if (Environment::isFamilyAArch64(arch)) { return a64::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount); + } #endif return kErrorInvalidArch; @@ -301,8 +322,9 @@ static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmit if (value.isAssigned()) { ASMJIT_PROPAGATE(sb.append('@')); - if (value.isIndirect()) + if (value.isIndirect()) { ASMJIT_PROPAGATE(sb.append('[')); + } // NOTE: It should be either reg or stack, but never both. We // use two IFs on purpose so if the FuncValue is both it would @@ -315,8 +337,9 @@ static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmit ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset()))); } - if (value.isIndirect()) + if (value.isIndirect()) { ASMJIT_PROPAGATE(sb.append(']')); + } } return kErrorOk; @@ -330,19 +353,23 @@ static Error formatFuncValuePack( const RegOnly* vRegs) noexcept { size_t count = pack.count(); - if (!count) + if (!count) { return sb.append("void"); + } - if (count > 1) - sb.append('['); + if (count > 1) { + ASMJIT_PROPAGATE(sb.append('[')); + } for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) { const FuncValue& value = pack[valueIndex]; - if (!value) + if (!value) { break; + } - if (valueIndex) + if (valueIndex) { ASMJIT_PROPAGATE(sb.append(", ")); + } ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, cc, value)); @@ -350,15 +377,17 @@ static Error formatFuncValuePack( const VirtReg* virtReg = nullptr; static const char nullReg[] = ""; - if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id())) + if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id())) { virtReg = cc->virtRegById(vRegs[valueIndex].id()); + } ASMJIT_PROPAGATE(sb.appendFormat(" %s", virtReg ? virtReg->name() : nullReg)); } } - if (count > 1) - sb.append(']'); + if (count > 1) { + ASMJIT_PROPAGATE(sb.append(']')); + } return kErrorOk; } @@ -380,13 +409,14 @@ static Error formatFuncArgs( const FuncNode::ArgPack* argPacks) noexcept { uint32_t argCount = fd.argCount(); - if (!argCount) + if (!argCount) { return sb.append("void"); + } for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) { - if (argIndex) + if (argIndex) { ASMJIT_PROPAGATE(sb.append(", ")); - + } ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, cc, fd.argPack(argIndex), argPacks[argIndex]._data)); } @@ -400,8 +430,9 @@ Error formatNode( const BaseBuilder* builder, const BaseNode* node) noexcept { - if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions)) + if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions)) { ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position())); + } size_t startLineIndex = sb.size(); @@ -542,8 +573,9 @@ Error formatNode( size_t requiredPadding = paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine); size_t currentPadding = sb.size() - startLineIndex; - if (currentPadding < requiredPadding) + if (currentPadding < requiredPadding) { ASMJIT_PROPAGATE(sb.appendChars(' ', requiredPadding - currentPadding)); + } ASMJIT_PROPAGATE(sb.append("; ")); ASMJIT_PROPAGATE(sb.append(node->inlineComment())); diff --git a/src/asmjit/core/formatter.h b/src/asmjit/core/formatter.h index d2c7655..780af0c 100644 --- a/src/asmjit/core/formatter.h +++ b/src/asmjit/core/formatter.h @@ -108,28 +108,39 @@ public: //! \{ //! Returns format flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _flags; } + //! Tests whether the given `flag` is set in format flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); } //! Resets all format flags to `flags`. ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _flags = flags; } + //! Adds `flags` to format flags. ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _flags |= flags; } + //! Removes `flags` from format flags. ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _flags &= ~flags; } //! Returns indentation for the given indentation `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t indentation(FormatIndentationGroup group) const noexcept { return _indentation[group]; } + //! Sets indentation for the given indentation `group`. ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); } + //! Resets indentation for the given indentation `group` to zero. ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); } //! Returns padding for the given padding `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup group) const noexcept { return _padding[group]; } + //! Sets padding for the given padding `group`. ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); } + //! Resets padding for the given padding `group` to zero, which means that a default padding will be used //! based on the target architecture properties. ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); } diff --git a/src/asmjit/core/formatter_p.h b/src/asmjit/core/formatter_p.h index 6070fd7..941e62e 100644 --- a/src/asmjit/core/formatter_p.h +++ b/src/asmjit/core/formatter_p.h @@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE namespace Formatter { -static ASMJIT_FORCE_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept { +static ASMJIT_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept { static constexpr uint16_t _defaultPaddingTable[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 }; static_assert(uint32_t(FormatPaddingGroup::kMaxValue) + 1 == 2, "If a new group is defined it must be added here"); diff --git a/src/asmjit/core/func.cpp b/src/asmjit/core/func.cpp index a8a6d3a..b273b92 100644 --- a/src/asmjit/core/func.cpp +++ b/src/asmjit/core/func.cpp @@ -27,13 +27,15 @@ ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId ccId, const Environment& envir reset(); #if !defined(ASMJIT_NO_X86) - if (environment.isFamilyX86()) + if (environment.isFamilyX86()) { return x86::FuncInternal::initCallConv(*this, ccId, environment); + } #endif #if !defined(ASMJIT_NO_AARCH64) - if (environment.isFamilyAArch64()) + if (environment.isFamilyAArch64()) { return a64::FuncInternal::initCallConv(*this, ccId, environment); + } #endif return DebugUtils::errored(kErrorInvalidArgument); @@ -46,8 +48,9 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E CallConvId ccId = signature.callConvId(); uint32_t argCount = signature.argCount(); - if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs)) + if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs)) { return DebugUtils::errored(kErrorInvalidArgument); + } CallConv& cc = _callConv; ASMJIT_PROPAGATE(cc.init(ccId, environment)); @@ -65,17 +68,20 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E _vaIndex = uint8_t(signature.vaIndex()); TypeId ret = signature.ret(); - if (ret != TypeId::kVoid) + if (ret != TypeId::kVoid) { _rets[0].initTypeId(TypeUtils::deabstract(ret, deabstractDelta)); + } #if !defined(ASMJIT_NO_X86) - if (environment.isFamilyX86()) + if (environment.isFamilyX86()) { return x86::FuncInternal::initFuncDetail(*this, signature, registerSize); + } #endif #if !defined(ASMJIT_NO_AARCH64) - if (environment.isFamilyAArch64()) + if (environment.isFamilyAArch64()) { return a64::FuncInternal::initFuncDetail(*this, signature); + } #endif // We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current @@ -88,8 +94,9 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept { Arch arch = func.callConv().arch(); - if (!Environment::isValidArch(arch)) + if (!Environment::isValidArch(arch)) { return DebugUtils::errored(kErrorInvalidArch); + } const ArchTraits& archTraits = ArchTraits::byArch(arch); @@ -104,8 +111,9 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept { uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment(); uint32_t minDynamicAlignment = Support::max(naturalStackAlignment, 16); - if (minDynamicAlignment == naturalStackAlignment) + if (minDynamicAlignment == naturalStackAlignment) { minDynamicAlignment <<= 1; + } _naturalStackAlignment = uint8_t(naturalStackAlignment); _minDynamicAlignment = uint8_t(minDynamicAlignment); @@ -137,8 +145,9 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept { // ==================== ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept { - if (!Environment::isValidArch(arch())) + if (!Environment::isValidArch(arch())) { return DebugUtils::errored(kErrorInvalidArch); + } const ArchTraits& archTraits = ArchTraits::byArch(arch()); @@ -148,9 +157,7 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept { // The final stack alignment must be updated accordingly to call and local stack alignments. uint32_t stackAlignment = _finalStackAlignment; - ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment, - _callStackAlignment, - _localStackAlignment)); + ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment)); bool hasFP = hasPreservedFP(); bool hasDA = hasDynamicAlignment(); @@ -165,32 +172,37 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept { // Currently required by ARM, if this works differently across architectures we would have to generalize most // likely in CallConv. - if (kLr != BaseReg::kIdBad) + if (kLr != BaseReg::kIdBad) { _dirtyRegs[RegGroup::kGp] |= Support::bitMask(kLr); + } } // These two are identical if the function doesn't align its stack dynamically. uint32_t saRegId = _saRegId; - if (saRegId == BaseReg::kIdBad) + if (saRegId == BaseReg::kIdBad) { saRegId = kSp; + } // Fix stack arguments base-register from SP to FP in case it was not picked before and the function performs // dynamic stack alignment. - if (hasDA && saRegId == kSp) + if (hasDA && saRegId == kSp) { saRegId = kFp; + } // Mark as dirty any register but SP if used as SA pointer. - if (saRegId != kSp) + if (saRegId != kSp) { _dirtyRegs[RegGroup::kGp] |= Support::bitMask(saRegId); + } _spRegId = uint8_t(kSp); _saRegId = uint8_t(saRegId); // Setup stack size used to save preserved registers. uint32_t saveRestoreSizes[2] {}; - for (RegGroup group : RegGroupVirtValues{}) + for (RegGroup group : RegGroupVirtValues{}) { saveRestoreSizes[size_t(!archTraits.hasInstPushPop(group))] += Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group)); + } _pushPopSaveSize = uint16_t(saveRestoreSizes[0]); _extraRegSaveSize = uint16_t(saveRestoreSizes[1]); @@ -235,22 +247,25 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept { // (basically the native register/pointer size). We don't adjust it now as `v` now contains the exact size // that the function requires to adjust (call frame + stack frame, vec stack size). The stack (if we consider // this size) is misaligned now, as it's always aligned before the function call - when `call()` is executed - // it pushes the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes (depending on the + // it pushes the current EIP|RIP onto the stack, and unaligns it by 12 or 8 bytes (depending on the // architecture). So count number of bytes needed to align it up to the function's CallFrame (the beginning). - if (v || hasFuncCalls() || !returnAddressSize) + if (v || hasFuncCalls() || !returnAddressSize) { v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment); + } _pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here. _stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'. v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here. _finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function. - if (!archTraits.hasLinkReg()) + if (!archTraits.hasLinkReg()) { v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack. + } // If the function performs dynamic stack alignment then the stack-adjustment must be aligned. - if (hasDA) + if (hasDA) { _stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment); + } // Calculate where the function arguments start relative to SP. _saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v; @@ -269,8 +284,9 @@ ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) co Arch arch = frame.arch(); const FuncDetail* func = funcDetail(); - if (!func) + if (!func) { return DebugUtils::errored(kErrorInvalidState); + } RAConstraints constraints; ASMJIT_PROPAGATE(constraints.init(arch)); diff --git a/src/asmjit/core/func.h b/src/asmjit/core/func.h index bb517c3..8fee7fc 100644 --- a/src/asmjit/core/func.h +++ b/src/asmjit/core/func.h @@ -152,7 +152,7 @@ struct CallConv { //! \note This is not really AsmJit's limitation, it's just the number that makes sense considering all common //! calling conventions. Usually even conventions that use registers to pass function arguments are limited to 8 //! and less arguments passed via registers per group. - static constexpr uint32_t kMaxRegArgsPerGroup = 16; + static inline constexpr uint32_t kMaxRegArgsPerGroup = 16; //! \} @@ -228,46 +228,66 @@ struct CallConv { //! \{ //! Returns the target architecture of this calling convention. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } + //! Sets the target architecture of this calling convention. ASMJIT_INLINE_NODEBUG void setArch(Arch arch) noexcept { _arch = arch; } //! Returns the calling convention id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CallConvId id() const noexcept { return _id; } + //! Sets the calling convention id. ASMJIT_INLINE_NODEBUG void setId(CallConvId ccId) noexcept { _id = ccId; } //! Returns the strategy used to assign registers to arguments. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CallConvStrategy strategy() const noexcept { return _strategy; } + //! Sets the strategy used to assign registers to arguments. ASMJIT_INLINE_NODEBUG void setStrategy(CallConvStrategy ccStrategy) noexcept { _strategy = ccStrategy; } //! Tests whether the calling convention has the given `flag` set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(CallConvFlags flag) const noexcept { return Support::test(_flags, flag); } + //! Returns the calling convention flags, see `Flags`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CallConvFlags flags() const noexcept { return _flags; } + //! Adds the calling convention flags, see `Flags`. ASMJIT_INLINE_NODEBUG void setFlags(CallConvFlags flag) noexcept { _flags = flag; }; + //! Adds the calling convention flags, see `Flags`. ASMJIT_INLINE_NODEBUG void addFlags(CallConvFlags flags) noexcept { _flags |= flags; }; //! Tests whether this calling convention specifies 'RedZone'. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRedZone() const noexcept { return _redZoneSize != 0; } + //! Tests whether this calling convention specifies 'SpillZone'. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; } //! Returns size of 'RedZone'. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _redZoneSize; } - //! Returns size of 'SpillZone'. - ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; } //! Sets size of 'RedZone'. ASMJIT_INLINE_NODEBUG void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = uint8_t(size); } + + //! Returns size of 'SpillZone'. + [[nodiscard]] + ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; } + //! Sets size of 'SpillZone'. ASMJIT_INLINE_NODEBUG void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = uint8_t(size); } //! Returns a natural stack alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; } + //! Sets a natural stack alignment. //! //! This function can be used to override the default stack alignment in case that you know that it's alignment is @@ -275,22 +295,28 @@ struct CallConv { ASMJIT_INLINE_NODEBUG void setNaturalStackAlignment(uint32_t value) noexcept { _naturalStackAlignment = uint8_t(value); } //! Returns the size of a register (or its part) to be saved and restored of the given `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t saveRestoreRegSize(RegGroup group) const noexcept { return _saveRestoreRegSize[group]; } + //! Sets the size of a vector register (or its part) to be saved and restored. ASMJIT_INLINE_NODEBUG void setSaveRestoreRegSize(RegGroup group, uint32_t size) noexcept { _saveRestoreRegSize[group] = uint8_t(size); } //! Returns the alignment of a save-restore area of the given `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t saveRestoreAlignment(RegGroup group) const noexcept { return _saveRestoreAlignment[group]; } + //! Sets the alignment of a save-restore area of the given `group`. ASMJIT_INLINE_NODEBUG void setSaveRestoreAlignment(RegGroup group, uint32_t alignment) noexcept { _saveRestoreAlignment[group] = uint8_t(alignment); } //! Returns the order of passed registers of the given `group`. + [[nodiscard]] inline const uint8_t* passedOrder(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _passedOrder[size_t(group)].id; } //! Returns the mask of passed registers of the given `group`. + [[nodiscard]] inline RegMask passedRegs(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _passedRegs[size_t(group)]; @@ -335,6 +361,7 @@ struct CallConv { } //! Returns preserved register mask of the given `group`. + [[nodiscard]] inline RegMask preservedRegs(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _preservedRegs[group]; @@ -365,7 +392,7 @@ struct FuncSignature { //! \{ //! Doesn't have variable number of arguments (`...`). - static constexpr uint8_t kNoVarArgs = 0xFFu; + static inline constexpr uint8_t kNoVarArgs = 0xFFu; //! \} @@ -391,19 +418,19 @@ struct FuncSignature { //! \{ //! Default constructed function signature, initialized to \ref CallConvId::kCDecl, having no return value and no arguments. - ASMJIT_FORCE_INLINE constexpr FuncSignature() = default; + ASMJIT_INLINE_CONSTEXPR FuncSignature() = default; //! Copy constructor, which is initialized to the same function signature as `other`. - ASMJIT_FORCE_INLINE constexpr FuncSignature(const FuncSignature& other) = default; + ASMJIT_INLINE_CONSTEXPR FuncSignature(const FuncSignature& other) = default; //! Initializes the function signature with calling convention id `ccId` and variable argument's index `vaIndex`. - ASMJIT_FORCE_INLINE constexpr FuncSignature(CallConvId ccId, uint32_t vaIndex = kNoVarArgs) noexcept + ASMJIT_INLINE_CONSTEXPR FuncSignature(CallConvId ccId, uint32_t vaIndex = kNoVarArgs) noexcept : _ccId(ccId), _vaIndex(uint8_t(vaIndex)) {} //! Initializes the function signature with calling convention id `ccId`, `vaIndex`, return value, and function arguments. template - ASMJIT_FORCE_INLINE constexpr FuncSignature(CallConvId ccId, uint32_t vaIndex, TypeId ret, Args&&...args) noexcept + ASMJIT_INLINE_CONSTEXPR FuncSignature(CallConvId ccId, uint32_t vaIndex, TypeId ret, Args&&...args) noexcept : _ccId(ccId), _argCount(uint8_t(sizeof...(args))), _vaIndex(uint8_t(vaIndex)), @@ -417,7 +444,8 @@ struct FuncSignature { //! a convenience function that allows to build function signature statically based on types known at compile time, //! which is common in JIT code generation. template - static ASMJIT_INLINE_NODEBUG constexpr FuncSignature build(CallConvId ccId = CallConvId::kCDecl, uint32_t vaIndex = kNoVarArgs) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR FuncSignature build(CallConvId ccId = CallConvId::kCDecl, uint32_t vaIndex = kNoVarArgs) noexcept { return FuncSignature(ccId, vaIndex, (TypeId(TypeUtils::TypeIdOfT::kTypeId))... ); } @@ -427,12 +455,15 @@ struct FuncSignature { //! \{ //! Copy assignment - function signature can be copied by value. - ASMJIT_FORCE_INLINE FuncSignature& operator=(const FuncSignature& other) noexcept = default; + ASMJIT_INLINE FuncSignature& operator=(const FuncSignature& other) noexcept = default; //! Compares this function signature with `other` for equality.. - ASMJIT_FORCE_INLINE bool operator==(const FuncSignature& other) const noexcept { return equals(other); } + [[nodiscard]] + ASMJIT_INLINE bool operator==(const FuncSignature& other) const noexcept { return equals(other); } + //! Compares this function signature with `other` for inequality.. - ASMJIT_FORCE_INLINE bool operator!=(const FuncSignature& other) const noexcept { return !equals(other); } + [[nodiscard]] + ASMJIT_INLINE bool operator!=(const FuncSignature& other) const noexcept { return !equals(other); } //! \} @@ -448,6 +479,7 @@ struct FuncSignature { //! \{ //! Compares this function signature with `other` for equality.. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool equals(const FuncSignature& other) const noexcept { return _ccId == other._ccId && _argCount == other._argCount && @@ -462,27 +494,38 @@ struct FuncSignature { //! \{ //! Returns the calling convention. - ASMJIT_INLINE_NODEBUG CallConvId callConvId() const noexcept { return _ccId; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR CallConvId callConvId() const noexcept { return _ccId; } + //! Sets the calling convention to `ccId`; - ASMJIT_INLINE_NODEBUG void setCallConvId(CallConvId ccId) noexcept { _ccId = ccId; } + ASMJIT_INLINE_CONSTEXPR void setCallConvId(CallConvId ccId) noexcept { _ccId = ccId; } //! Tests whether the function signature has a return value. - ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _ret != TypeId::kVoid; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasRet() const noexcept { return _ret != TypeId::kVoid; } + //! Returns the type of the return value. - ASMJIT_INLINE_NODEBUG TypeId ret() const noexcept { return _ret; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR TypeId ret() const noexcept { return _ret; } + //! Sets the return type to `retType`. - ASMJIT_INLINE_NODEBUG void setRet(TypeId retType) noexcept { _ret = retType; } + ASMJIT_INLINE_CONSTEXPR void setRet(TypeId retType) noexcept { _ret = retType; } + //! Sets the return type based on `T`. template - ASMJIT_INLINE_NODEBUG void setRetT() noexcept { setRet(TypeId(TypeUtils::TypeIdOfT::kTypeId)); } + ASMJIT_INLINE_CONSTEXPR void setRetT() noexcept { setRet(TypeId(TypeUtils::TypeIdOfT::kTypeId)); } //! Returns the array of function arguments' types. - ASMJIT_INLINE_NODEBUG const TypeId* args() const noexcept { return _args; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const TypeId* args() const noexcept { return _args; } + //! Returns the number of function arguments. - ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _argCount; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t argCount() const noexcept { return _argCount; } //! Returns the type of the argument at index `i`. + [[nodiscard]] inline TypeId arg(uint32_t i) const noexcept { ASMJIT_ASSERT(i < _argCount); return _args[i]; @@ -493,6 +536,7 @@ struct FuncSignature { ASMJIT_ASSERT(index < _argCount); _args[index] = argType; } + //! Sets the argument at index `i` to the type based on `T`. template inline void setArgT(uint32_t index) noexcept { setArg(index, TypeId(TypeUtils::TypeIdOfT::kTypeId)); } @@ -503,6 +547,7 @@ struct FuncSignature { //! to use this function. However, if you are adding arguments based on user input, for example, then either check //! the number of arguments before using function signature or use \ref canAddArg() before actually adding them to //! the function signature. + [[nodiscard]] inline bool canAddArg() const noexcept { return _argCount < Globals::kMaxFuncArgs; } //! Appends an argument of `type` to the function prototype. @@ -516,11 +561,16 @@ struct FuncSignature { inline void addArgT() noexcept { addArg(TypeId(TypeUtils::TypeIdOfT::kTypeId)); } //! Tests whether the function has variable number of arguments (...). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; } + //! Returns the variable arguments (...) index, `kNoVarArgs` if none. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t vaIndex() const noexcept { return _vaIndex; } + //! Sets the variable arguments (...) index to `index`. ASMJIT_INLINE_NODEBUG void setVaIndex(uint32_t index) noexcept { _vaIndex = uint8_t(index); } + //! Resets the variable arguments index (making it a non-va function). ASMJIT_INLINE_NODEBUG void resetVaIndex() noexcept { _vaIndex = kNoVarArgs; } @@ -620,45 +670,68 @@ struct FuncValue { //! \endcond //! Tests whether the `FuncValue` has a flag `flag` set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const noexcept { return Support::test(_data, flag); } + //! Adds `flags` to `FuncValue`. ASMJIT_INLINE_NODEBUG void addFlags(uint32_t flags) noexcept { _data |= flags; } + //! Clears `flags` of `FuncValue`. ASMJIT_INLINE_NODEBUG void clearFlags(uint32_t flags) noexcept { _data &= ~flags; } //! Tests whether the value is initialized (i.e. contains a valid data). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _data != 0; } + //! Tests whether the argument is passed by register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isReg() const noexcept { return hasFlag(kFlagIsReg); } + //! Tests whether the argument is passed by stack. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return hasFlag(kFlagIsStack); } + //! Tests whether the argument is passed by register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAssigned() const noexcept { return hasFlag(kFlagIsReg | kFlagIsStack); } + //! Tests whether the argument is passed through a pointer (used by WIN64 to pass XMM|YMM|ZMM). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isIndirect() const noexcept { return hasFlag(kFlagIsIndirect); } //! Tests whether the argument was already processed (used internally). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isDone() const noexcept { return hasFlag(kFlagIsDone); } //! Returns a register type of the register used to pass function argument or return value. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegType regType() const noexcept { return RegType((_data & kRegTypeMask) >> kRegTypeShift); } + //! Sets a register type of the register used to pass function argument or return value. ASMJIT_INLINE_NODEBUG void setRegType(RegType regType) noexcept { _replaceValue(kRegTypeMask, uint32_t(regType) << kRegTypeShift); } //! Returns a physical id of the register used to pass function argument or return value. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t regId() const noexcept { return (_data & kRegIdMask) >> kRegIdShift; } + //! Sets a physical id of the register used to pass function argument or return value. ASMJIT_INLINE_NODEBUG void setRegId(uint32_t regId) noexcept { _replaceValue(kRegIdMask, regId << kRegIdShift); } //! Returns a stack offset of this argument. + [[nodiscard]] ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return int32_t(_data & kStackOffsetMask) >> kStackOffsetShift; } + //! Sets a stack offset of this argument. ASMJIT_INLINE_NODEBUG void setStackOffset(int32_t offset) noexcept { _replaceValue(kStackOffsetMask, uint32_t(offset) << kStackOffsetShift); } //! Tests whether the argument or return value has associated `TypeId`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasTypeId() const noexcept { return Support::test(_data, kTypeIdMask); } + //! Returns a TypeId of this argument or return value. + [[nodiscard]] ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return TypeId((_data & kTypeIdMask) >> kTypeIdShift); } + //! Sets a TypeId of this argument or return value. ASMJIT_INLINE_NODEBUG void setTypeId(TypeId typeId) noexcept { _replaceValue(kTypeIdMask, uint32_t(typeId) << kTypeIdShift); } @@ -692,6 +765,7 @@ public: //! \{ //! Calculates how many values are in the pack, checking for non-values from the end. + [[nodiscard]] inline uint32_t count() const noexcept { uint32_t n = Globals::kMaxValuePack; while (n && !_values[n - 1]) @@ -702,8 +776,11 @@ public: //! Returns values in this value in the pack. //! //! \note The returned array has exactly \ref Globals::kMaxValuePack elements. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncValue* values() noexcept { return _values; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FuncValue* values() const noexcept { return _values; } //! Resets a value at the given `index` in the pack, which makes it unassigned. @@ -740,11 +817,14 @@ public: //! Accesses the value in the pack at the given `index`. //! //! \note The maximum index value is `Globals::kMaxValuePack - 1`. + [[nodiscard]] inline FuncValue& operator[](size_t index) { ASMJIT_ASSERT(index < Globals::kMaxValuePack); return _values[index]; } + //! \overload + [[nodiscard]] inline const FuncValue& operator[](size_t index) const { ASMJIT_ASSERT(index < Globals::kMaxValuePack); return _values[index]; @@ -809,7 +889,7 @@ public: //! \{ //! Function doesn't have a variable number of arguments (`...`). - static constexpr uint8_t kNoVarArgs = 0xFFu; + static inline constexpr uint8_t kNoVarArgs = 0xFFu; //! \} @@ -871,52 +951,72 @@ public: //! \{ //! Returns the function's calling convention, see `CallConv`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const CallConv& callConv() const noexcept { return _callConv; } //! Returns the associated calling convention flags, see `CallConv::Flags`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CallConvFlags flags() const noexcept { return _callConv.flags(); } + //! Checks whether a CallConv `flag` is set, see `CallConv::Flags`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(CallConvFlags ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); } //! Tests whether the function has a return value. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return bool(_rets[0]); } + //! Returns the number of function arguments. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _argCount; } //! Returns function return values. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncValuePack& retPack() noexcept { return _rets; } + //! Returns function return values. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FuncValuePack& retPack() const noexcept { return _rets; } //! Returns a function return value associated with the given `valueIndex`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncValue& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; } + //! Returns a function return value associated with the given `valueIndex` (const). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FuncValue& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; } //! Returns function argument packs array. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncValuePack* argPacks() noexcept { return _args; } + //! Returns function argument packs array (const). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FuncValuePack* argPacks() const noexcept { return _args; } //! Returns function argument pack at the given `argIndex`. + [[nodiscard]] inline FuncValuePack& argPack(size_t argIndex) noexcept { ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs); return _args[argIndex]; } //! Returns function argument pack at the given `argIndex` (const). + [[nodiscard]] inline const FuncValuePack& argPack(size_t argIndex) const noexcept { ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs); return _args[argIndex]; } //! Returns an argument at `valueIndex` from the argument pack at the given `argIndex`. + [[nodiscard]] inline FuncValue& arg(size_t argIndex, size_t valueIndex = 0) noexcept { ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs); return _args[argIndex][valueIndex]; } //! Returns an argument at `valueIndex` from the argument pack at the given `argIndex` (const). + [[nodiscard]] inline const FuncValue& arg(size_t argIndex, size_t valueIndex = 0) const noexcept { ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs); return _args[argIndex][valueIndex]; @@ -931,28 +1031,43 @@ public: } //! Tests whether the function has variable arguments. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; } + //! Returns an index of a first variable argument. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t vaIndex() const noexcept { return _vaIndex; } //! Tests whether the function passes one or more argument by stack. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasStackArgs() const noexcept { return _argStackSize != 0; } + //! Returns stack size needed for function arguments passed on the stack. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t argStackSize() const noexcept { return _argStackSize; } //! Returns red zone size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _callConv.redZoneSize(); } + //! Returns spill zone size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _callConv.spillZoneSize(); } + //! Returns natural stack alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _callConv.naturalStackAlignment(); } //! Returns a mask of all passed registers of the given register `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask passedRegs(RegGroup group) const noexcept { return _callConv.passedRegs(group); } + //! Returns a mask of all preserved registers of the given register `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask preservedRegs(RegGroup group) const noexcept { return _callConv.preservedRegs(group); } //! Returns a mask of all used registers of the given register `group`. + [[nodiscard]] inline RegMask usedRegs(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _usedRegs[size_t(group)]; @@ -1012,10 +1127,8 @@ public: //! \name Constants //! \{ - enum : uint32_t { - //! Tag used to inform that some offset is invalid. - kTagInvalidOffset = 0xFFFFFFFFu - }; + //! Tag used to inform that some offset is invalid. + static inline constexpr uint32_t kTagInvalidOffset = 0xFFFFFFFFu; //! \} @@ -1129,90 +1242,133 @@ public: //! \{ //! Returns the target architecture of the function frame. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } //! Returns function frame attributes, see `Attributes`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _attributes; } + //! Checks whether the FuncFame contains an attribute `attr`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAttribute(FuncAttributes attr) const noexcept { return Support::test(_attributes, attr); } + //! Adds attributes `attrs` to the FuncFrame. ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _attributes |= attrs; } + //! Clears attributes `attrs` from the FrameFrame. ASMJIT_INLINE_NODEBUG void clearAttributes(FuncAttributes attrs) noexcept { _attributes &= ~attrs; } //! Tests whether the function has variable number of arguments. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return hasAttribute(FuncAttributes::kHasVarArgs); } + //! Sets the variable arguments flag. ASMJIT_INLINE_NODEBUG void setVarArgs() noexcept { addAttributes(FuncAttributes::kHasVarArgs); } + //! Resets variable arguments flag. ASMJIT_INLINE_NODEBUG void resetVarArgs() noexcept { clearAttributes(FuncAttributes::kHasVarArgs); } //! Tests whether the function preserves frame pointer (EBP|ESP on X86). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasPreservedFP() const noexcept { return hasAttribute(FuncAttributes::kHasPreservedFP); } + //! Enables preserved frame pointer. ASMJIT_INLINE_NODEBUG void setPreservedFP() noexcept { addAttributes(FuncAttributes::kHasPreservedFP); } + //! Disables preserved frame pointer. ASMJIT_INLINE_NODEBUG void resetPreservedFP() noexcept { clearAttributes(FuncAttributes::kHasPreservedFP); } //! Tests whether the function calls other functions. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFuncCalls() const noexcept { return hasAttribute(FuncAttributes::kHasFuncCalls); } + //! Sets `FuncAttributes::kHasFuncCalls` to true. ASMJIT_INLINE_NODEBUG void setFuncCalls() noexcept { addAttributes(FuncAttributes::kHasFuncCalls); } + //! Sets `FuncAttributes::kHasFuncCalls` to false. ASMJIT_INLINE_NODEBUG void resetFuncCalls() noexcept { clearAttributes(FuncAttributes::kHasFuncCalls); } //! Tests whether the function uses indirect branch protection, see \ref FuncAttributes::kIndirectBranchProtection. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasIndirectBranchProtection() const noexcept { return hasAttribute(FuncAttributes::kIndirectBranchProtection); } + //! Enabled indirect branch protection (sets `FuncAttributes::kIndirectBranchProtection` attribute to true). ASMJIT_INLINE_NODEBUG void setIndirectBranchProtection() noexcept { addAttributes(FuncAttributes::kIndirectBranchProtection); } + //! Disables indirect branch protection (sets `FuncAttributes::kIndirectBranchProtection` attribute to false). ASMJIT_INLINE_NODEBUG void resetIndirectBranchProtection() noexcept { clearAttributes(FuncAttributes::kIndirectBranchProtection); } //! Tests whether the function has AVX enabled. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAvxEnabled() const noexcept { return hasAttribute(FuncAttributes::kX86_AVXEnabled); } + //! Enables AVX use. ASMJIT_INLINE_NODEBUG void setAvxEnabled() noexcept { addAttributes(FuncAttributes::kX86_AVXEnabled); } + //! Disables AVX use. ASMJIT_INLINE_NODEBUG void resetAvxEnabled() noexcept { clearAttributes(FuncAttributes::kX86_AVXEnabled); } //! Tests whether the function has AVX-512 enabled. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAvx512Enabled() const noexcept { return hasAttribute(FuncAttributes::kX86_AVX512Enabled); } + //! Enables AVX-512 use. ASMJIT_INLINE_NODEBUG void setAvx512Enabled() noexcept { addAttributes(FuncAttributes::kX86_AVX512Enabled); } + //! Disables AVX-512 use. ASMJIT_INLINE_NODEBUG void resetAvx512Enabled() noexcept { clearAttributes(FuncAttributes::kX86_AVX512Enabled); } //! Tests whether the function has MMX cleanup - 'emms' instruction in epilog. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasMmxCleanup() const noexcept { return hasAttribute(FuncAttributes::kX86_MMXCleanup); } + //! Enables MMX cleanup. ASMJIT_INLINE_NODEBUG void setMmxCleanup() noexcept { addAttributes(FuncAttributes::kX86_MMXCleanup); } + //! Disables MMX cleanup. ASMJIT_INLINE_NODEBUG void resetMmxCleanup() noexcept { clearAttributes(FuncAttributes::kX86_MMXCleanup); } //! Tests whether the function has AVX cleanup - 'vzeroupper' instruction in epilog. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvxCleanup() const noexcept { return hasAttribute(FuncAttributes::kX86_AVXCleanup); } + //! Enables AVX cleanup. ASMJIT_INLINE_NODEBUG void setAvxCleanup() noexcept { addAttributes(FuncAttributes::kX86_AVXCleanup); } + //! Disables AVX cleanup. ASMJIT_INLINE_NODEBUG void resetAvxCleanup() noexcept { clearAttributes(FuncAttributes::kX86_AVXCleanup); } //! Tests whether the function uses call stack. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasCallStack() const noexcept { return _callStackSize != 0; } + //! Tests whether the function uses local stack. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasLocalStack() const noexcept { return _localStackSize != 0; } + //! Tests whether vector registers can be saved and restored by using aligned reads and writes. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAlignedVecSR() const noexcept { return hasAttribute(FuncAttributes::kAlignedVecSR); } + //! Tests whether the function has to align stack dynamically. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasDynamicAlignment() const noexcept { return _finalStackAlignment >= _minDynamicAlignment; } //! Tests whether the calling convention specifies 'RedZone'. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRedZone() const noexcept { return _redZoneSize != 0; } - //! Tests whether the calling convention specifies 'SpillZone'. - ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; } //! Returns the size of 'RedZone'. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _redZoneSize; } + + //! Tests whether the calling convention specifies 'SpillZone'. + [[nodiscard]] + ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; } + //! Returns the size of 'SpillZone'. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; } //! Resets the size of red zone, which would disable it entirely. @@ -1224,20 +1380,31 @@ public: ASMJIT_INLINE_NODEBUG void resetRedZone() noexcept { _redZoneSize = 0; } //! Returns natural stack alignment (guaranteed stack alignment upon entry). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; } + //! Returns natural stack alignment (guaranteed stack alignment upon entry). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t minDynamicAlignment() const noexcept { return _minDynamicAlignment; } //! Tests whether the callee must adjust SP before returning (X86-STDCALL only) + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasCalleeStackCleanup() const noexcept { return _calleeStackCleanup != 0; } + //! Returns home many bytes of the stack the callee must adjust before returning (X86-STDCALL only) + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t calleeStackCleanup() const noexcept { return _calleeStackCleanup; } //! Returns call stack alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t callStackAlignment() const noexcept { return _callStackAlignment; } + //! Returns local stack alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t localStackAlignment() const noexcept { return _localStackAlignment; } + //! Returns final stack alignment (the maximum value of call, local, and natural stack alignments). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t finalStackAlignment() const noexcept { return _finalStackAlignment; } //! Sets call stack alignment. @@ -1273,42 +1440,57 @@ public: } //! Returns call stack size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t callStackSize() const noexcept { return _callStackSize; } + //! Returns local stack size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t localStackSize() const noexcept { return _localStackSize; } //! Sets call stack size. ASMJIT_INLINE_NODEBUG void setCallStackSize(uint32_t size) noexcept { _callStackSize = size; } + //! Sets local stack size. ASMJIT_INLINE_NODEBUG void setLocalStackSize(uint32_t size) noexcept { _localStackSize = size; } //! Combines call stack size with `size`, updating it to the greater value. ASMJIT_INLINE_NODEBUG void updateCallStackSize(uint32_t size) noexcept { _callStackSize = Support::max(_callStackSize, size); } + //! Combines local stack size with `size`, updating it to the greater value. ASMJIT_INLINE_NODEBUG void updateLocalStackSize(uint32_t size) noexcept { _localStackSize = Support::max(_localStackSize, size); } //! Returns final stack size (only valid after the FuncFrame is finalized). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t finalStackSize() const noexcept { return _finalStackSize; } //! Returns an offset to access the local stack (non-zero only if call stack is used). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t localStackOffset() const noexcept { return _localStackOffset; } //! Tests whether the function prolog/epilog requires a memory slot for storing unaligned SP. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasDAOffset() const noexcept { return _daOffset != kTagInvalidOffset; } + //! Returns a memory offset used to store DA (dynamic alignment) slot (relative to SP). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t daOffset() const noexcept { return _daOffset; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t saOffset(uint32_t regId) const noexcept { return regId == _spRegId ? saOffsetFromSP() : saOffsetFromSA(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t saOffsetFromSP() const noexcept { return _saOffsetFromSP; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t saOffsetFromSA() const noexcept { return _saOffsetFromSA; } //! Returns mask of registers of the given register `group` that are modified by the function. The engine would //! then calculate which registers must be saved & restored by the function by using the data provided by the //! calling convention. + [[nodiscard]] inline RegMask dirtyRegs(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _dirtyRegs[group]; @@ -1360,61 +1542,82 @@ public: //! Returns a calculated mask of registers of the given `group` that will be saved and restored in the function's //! prolog and epilog, respectively. The register mask is calculated from both `dirtyRegs` (provided by user) and //! `preservedMask` (provided by the calling convention). + [[nodiscard]] inline RegMask savedRegs(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _dirtyRegs[group] & _preservedRegs[group]; } //! Returns all dirty registers as a Support::Array<> type. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RegMasks& dirtyRegs() const noexcept { return _dirtyRegs; } //! Returns all preserved registers as a Support::Array<> type. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RegMasks& preservedRegs() const noexcept { return _preservedRegs; } //! Returns the mask of preserved registers of the given register `group`. //! //! Preserved registers are those that must survive the function call unmodified. The function can only modify //! preserved registers it they are saved and restored in function's prolog and epilog, respectively. + [[nodiscard]] inline RegMask preservedRegs(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _preservedRegs[group]; } //! Returns the size of a save-restore are for the required register `group`. + [[nodiscard]] inline uint32_t saveRestoreRegSize(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _saveRestoreRegSize[group]; } + //! Returns the alignment that must be guaranteed to save/restore the required register `group`. + [[nodiscard]] inline uint32_t saveRestoreAlignment(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _saveRestoreAlignment[group]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t saRegId() const noexcept { return _saRegId; } + ASMJIT_INLINE_NODEBUG void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); } + ASMJIT_INLINE_NODEBUG void resetSARegId() { setSARegId(BaseReg::kIdBad); } //! Returns stack size required to save/restore registers via push/pop. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t pushPopSaveSize() const noexcept { return _pushPopSaveSize; } + //! Returns an offset to the stack where registers are saved via push/pop. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t pushPopSaveOffset() const noexcept { return _pushPopSaveOffset; } //! Returns stack size required to save/restore extra registers that don't use push/pop/ //! //! \note On X86 this covers all registers except GP registers, on other architectures it can be always //! zero (for example AArch64 saves all registers via push/pop like instructions, so this would be zero). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t extraRegSaveSize() const noexcept { return _extraRegSaveSize; } + //! Returns an offset to the stack where extra registers are saved. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t extraRegSaveOffset() const noexcept { return _extraRegSaveOffset; } //! Tests whether the functions contains stack adjustment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasStackAdjustment() const noexcept { return _stackAdjustment != 0; } + //! Returns function's stack adjustment used in function's prolog and epilog. //! //! If the returned value is zero it means that the stack is not adjusted. This can mean both that the stack //! is not used and/or the stack is only adjusted by instructions that pust/pop registers into/from stack. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t stackAdjustment() const noexcept { return _stackAdjustment; } //! \} @@ -1477,30 +1680,41 @@ public: //! \{ //! Returns the associated \ref FuncDetail of this `FuncArgsAssignment`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FuncDetail* funcDetail() const noexcept { return _funcDetail; } + //! Associates \ref FuncDetails with this `FuncArgsAssignment`. ASMJIT_INLINE_NODEBUG void setFuncDetail(const FuncDetail* fd) noexcept { _funcDetail = fd; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t saRegId() const noexcept { return _saRegId; } + ASMJIT_INLINE_NODEBUG void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); } + ASMJIT_INLINE_NODEBUG void resetSARegId() { _saRegId = uint8_t(BaseReg::kIdBad); } //! Returns assigned argument at `argIndex` and `valueIndex`. //! //! \note `argIndex` refers to he function argument and `valueIndex` refers to a value pack (in case multiple //! values are passed as a single argument). + [[nodiscard]] inline FuncValue& arg(size_t argIndex, size_t valueIndex) noexcept { ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks)); return _argPacks[argIndex][valueIndex]; } + //! \overload + [[nodiscard]] inline const FuncValue& arg(size_t argIndex, size_t valueIndex) const noexcept { ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks)); return _argPacks[argIndex][valueIndex]; } //! Tests whether argument at `argIndex` and `valueIndex` has been assigned. + [[nodiscard]] inline bool isAssigned(size_t argIndex, size_t valueIndex) const noexcept { ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks)); return _argPacks[argIndex][valueIndex].isAssigned(); diff --git a/src/asmjit/core/funcargscontext_p.h b/src/asmjit/core/funcargscontext_p.h index 3c6814f..4124305 100644 --- a/src/asmjit/core/funcargscontext_p.h +++ b/src/asmjit/core/funcargscontext_p.h @@ -28,26 +28,29 @@ static inline OperandSignature getSuitableRegForMemToMemMove(Arch arch, TypeId d uint32_t regSize = Environment::registerSizeFromArch(arch); OperandSignature signature{0}; - if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId))) + if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId))) { signature = maxSize <= 4 ? archTraits.regTypeToSignature(RegType::kGp32) : archTraits.regTypeToSignature(RegType::kGp64); - else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64)) + } + else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64)) { signature = archTraits.regTypeToSignature(RegType::kVec64); - else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128)) + } + else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128)) { signature = archTraits.regTypeToSignature(RegType::kVec128); - else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256)) + } + else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256)) { signature = archTraits.regTypeToSignature(RegType::kVec256); - else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512)) + } + else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512)) { signature = archTraits.regTypeToSignature(RegType::kVec512); + } return signature; } class FuncArgsContext { public: - enum VarId : uint32_t { - kVarIdNone = 0xFF - }; + static inline constexpr uint32_t kVarIdNone = 0xFF; //! Contains information about a single argument or SA register that may need shuffling. struct Var { @@ -107,6 +110,7 @@ public: memset(_physToVarId, kVarIdNone, 32); } + [[nodiscard]] inline bool isAssigned(uint32_t regId) const noexcept { ASMJIT_ASSERT(regId < 32); return Support::bitTest(_assignedRegs, regId); @@ -150,11 +154,22 @@ public: _assignedRegs ^= Support::bitMask(regId); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask archRegs() const noexcept { return _archRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask workRegs() const noexcept { return _workRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask usedRegs() const noexcept { return _usedRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask assignedRegs() const noexcept { return _assignedRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask dstRegs() const noexcept { return _dstRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask availableRegs() const noexcept { return _workRegs & ~_assignedRegs; } }; @@ -179,13 +194,22 @@ public: FuncArgsContext() noexcept; + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ArchTraits& archTraits() const noexcept { return *_archTraits; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t varCount() const noexcept { return _varCount; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG Var& var(size_t varId) noexcept { return _vars[varId]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const Var& var(size_t varId) const noexcept { return _vars[varId]; } Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept; diff --git a/src/asmjit/core/globals.h b/src/asmjit/core/globals.h index db921cf..1d35de8 100644 --- a/src/asmjit/core/globals.h +++ b/src/asmjit/core/globals.h @@ -26,19 +26,19 @@ struct PlacementNew { void* ptr; }; #if defined(ASMJIT_NO_STDCXX) namespace Support { - ASMJIT_FORCE_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); } - ASMJIT_FORCE_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); } + ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); } + ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); } } // {Support} #define ASMJIT_BASE_CLASS(TYPE) \ - ASMJIT_FORCE_INLINE void* operator new(size_t n) noexcept { return Support::operatorNew(n); } \ - ASMJIT_FORCE_INLINE void operator delete(void* ptr) noexcept { Support::operatorDelete(ptr); } \ + ASMJIT_INLINE void* operator new(size_t n) noexcept { return Support::operatorNew(n); } \ + ASMJIT_INLINE void operator delete(void* ptr) noexcept { Support::operatorDelete(ptr); } \ \ - ASMJIT_FORCE_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \ - ASMJIT_FORCE_INLINE void operator delete(void*, void*) noexcept {} \ + ASMJIT_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \ + ASMJIT_INLINE void operator delete(void*, void*) noexcept {} \ \ - ASMJIT_FORCE_INLINE void* operator new(size_t, Support::PlacementNew ptr) noexcept { return ptr.ptr; } \ - ASMJIT_FORCE_INLINE void operator delete(void*, Support::PlacementNew) noexcept {} + ASMJIT_INLINE void* operator new(size_t, Support::PlacementNew ptr) noexcept { return ptr.ptr; } \ + ASMJIT_INLINE void operator delete(void*, Support::PlacementNew) noexcept {} #else #define ASMJIT_BASE_CLASS(TYPE) #endif @@ -69,7 +69,7 @@ enum class ResetPolicy : uint32_t { kHard = 1 }; -//! Contains typedefs, constants, and variables used globally by AsmJit. +//! Contains constants and variables used globally across AsmJit. namespace Globals { //! Host memory allocator overhead. @@ -152,7 +152,7 @@ static ASMJIT_INLINE_NODEBUG void* func_as_ptr(Func func) noexcept { return Supp //! \{ //! AsmJit error type (uint32_t). -typedef uint32_t Error; +using Error = uint32_t; //! AsmJit error codes. enum ErrorCode : uint32_t { @@ -357,9 +357,11 @@ static ASMJIT_INLINE_NODEBUG void unused(Args&&...) noexcept {} //! //! Provided for debugging purposes. Putting a breakpoint inside `errored` can help with tracing the origin of any //! error reported / returned by AsmJit. +[[nodiscard]] static constexpr Error errored(Error err) noexcept { return err; } //! Returns a printable version of `asmjit::Error` code. +[[nodiscard]] ASMJIT_API const char* errorAsString(Error err) noexcept; //! Called to output debugging message(s). @@ -375,7 +377,8 @@ ASMJIT_API void debugOutput(const char* str) noexcept; //! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit //! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out //! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc... -ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept; +[[noreturn]] +ASMJIT_API void assertionFailed(const char* file, int line, const char* msg) noexcept; } // {DebugUtils} @@ -385,9 +388,9 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons #if defined(ASMJIT_BUILD_DEBUG) #define ASMJIT_ASSERT(...) \ do { \ - if (ASMJIT_LIKELY(__VA_ARGS__)) \ - break; \ - ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \ + if (ASMJIT_UNLIKELY(!(__VA_ARGS__))) { \ + ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \ + } \ } while (0) #else #define ASMJIT_ASSERT(...) ((void)0) @@ -399,9 +402,10 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons //! internally, but kept public for users that want to use the same technique to propagate errors to the caller. #define ASMJIT_PROPAGATE(...) \ do { \ - ::asmjit::Error _err = __VA_ARGS__; \ - if (ASMJIT_UNLIKELY(_err)) \ - return _err; \ + ::asmjit::Error _err_ = __VA_ARGS__; \ + if (ASMJIT_UNLIKELY(_err_)) { \ + return _err_; \ + } \ } while (0) //! \} diff --git a/src/asmjit/core/inst.h b/src/asmjit/core/inst.h index f857f2e..110e144 100644 --- a/src/asmjit/core/inst.h +++ b/src/asmjit/core/inst.h @@ -26,7 +26,7 @@ ASMJIT_BEGIN_NAMESPACE //! //! - \ref x86::Inst (X86 and X86_64) //! - \ref a64::Inst (AArch64) -typedef uint32_t InstId; +using InstId = uint32_t; //! Instruction id parts. //! @@ -267,16 +267,21 @@ public: //! \{ //! Returns the instruction id with modifiers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _id; } + //! Sets the instruction id and modiiers from `id`. ASMJIT_INLINE_NODEBUG void setId(InstId id) noexcept { _id = id; } + //! Resets the instruction id and modifiers to zero, see \ref kIdNone. ASMJIT_INLINE_NODEBUG void resetId() noexcept { _id = 0; } //! Returns a real instruction id that doesn't contain any modifiers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _id & uint32_t(InstIdParts::kRealId); } template + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t getInstIdPart() const noexcept { return (uint32_t(_id) & uint32_t(kPart)) >> Support::ConstCTZ::value; } @@ -291,11 +296,24 @@ public: //! \name Instruction Options //! \{ + //! Returns instruction options associated with this instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _options; } + + //! Tests whether the given instruction `option` is enabled. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return Support::test(_options, option); } + + //! Replaces all instruction options by the given `options`. ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _options = options; } + + //! Adds instruction options provided by `options`. ASMJIT_INLINE_NODEBUG void addOptions(InstOptions options) noexcept { _options |= options; } + + //! Clears instruction options provided by `options`. ASMJIT_INLINE_NODEBUG void clearOptions(InstOptions options) noexcept { _options &= ~options; } + + //! Resets all instruction options to `InstOptions::kNone` (there will be no instruction options active after reset). ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options = InstOptions::kNone; } //! \} @@ -303,11 +321,23 @@ public: //! \name Extra Register //! \{ + //! Tests whether the instruction has associated an extra register. + //! + //! \note Extra registers are currently only used on X86 by AVX-512 masking such as `{k}` and `{k}{z}` and by repeated + //! instructions to explicitly assign a virtual register that would be ECX/RCX. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _extraReg; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; } + ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); } + ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); } + ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); } //! \} @@ -315,10 +345,15 @@ public: //! \name ARM Specific //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG arm::CondCode armCondCode() const noexcept { return (arm::CondCode)getInstIdPart(); } + ASMJIT_INLINE_NODEBUG void setArmCondCode(arm::CondCode cc) noexcept { setInstIdPart(uint32_t(cc)); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG a32::DataType armDt() const noexcept { return (a32::DataType)getInstIdPart(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG a32::DataType armDt2() const noexcept { return (a32::DataType)getInstIdPart(); } //! \} @@ -326,26 +361,31 @@ public: //! \name Statics //! \{ - static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept { return id | (uint32_t(cc) << Support::ConstCTZ::value); } - static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept { return id | (uint32_t(dt) << Support::ConstCTZ::value) | (uint32_t(cc) << Support::ConstCTZ::value); } - static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept { return id | (uint32_t(dt) << Support::ConstCTZ::value) | (uint32_t(dt2) << Support::ConstCTZ::value) | (uint32_t(cc) << Support::ConstCTZ::value); } - static ASMJIT_INLINE_NODEBUG constexpr InstId extractRealId(uint32_t id) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR InstId extractRealId(uint32_t id) noexcept { return id & uint32_t(InstIdParts::kRealId); } - static ASMJIT_INLINE_NODEBUG constexpr arm::CondCode extractARMCondCode(uint32_t id) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR arm::CondCode extractARMCondCode(uint32_t id) noexcept { return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ConstCTZ::value); } @@ -543,39 +583,56 @@ struct OpRWInfo { //! \{ //! Returns operand flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG OpRWFlags opFlags() const noexcept { return _opFlags; } + //! Tests whether operand flags contain the given `flag`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasOpFlag(OpRWFlags flag) const noexcept { return Support::test(_opFlags, flag); } //! Adds the given `flags` to operand flags. ASMJIT_INLINE_NODEBUG void addOpFlags(OpRWFlags flags) noexcept { _opFlags |= flags; } + //! Removes the given `flags` from operand flags. ASMJIT_INLINE_NODEBUG void clearOpFlags(OpRWFlags flags) noexcept { _opFlags &= ~flags; } //! Tests whether this operand is read from. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasOpFlag(OpRWFlags::kRead); } + //! Tests whether this operand is written to. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasOpFlag(OpRWFlags::kWrite); } + //! Tests whether this operand is both read and write. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRW; } + //! Tests whether this operand is read only. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRead; } + //! Tests whether this operand is write only. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kWrite; } //! Returns the type of a lead register, which is followed by consecutive registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t consecutiveLeadCount() const noexcept { return _consecutiveLeadCount; } //! Tests whether this operand is Reg/Mem //! //! Reg/Mem operands can use either register or memory. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isRm() const noexcept { return hasOpFlag(OpRWFlags::kRegMem); } //! Tests whether the operand will be zero extended. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isZExt() const noexcept { return hasOpFlag(OpRWFlags::kZExt); } //! Tests whether the operand must have allocated a unique physical id that cannot be shared with other register //! operands. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasOpFlag(OpRWFlags::kUnique); } //! \} @@ -585,37 +642,63 @@ struct OpRWInfo { //! Tests whether this is a fake memory operand, which is only used, because of encoding. Fake memory operands do //! not access any memory, they are only used to encode registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemFake() const noexcept { return hasOpFlag(OpRWFlags::kMemFake); } //! Tests whether the instruction's memory BASE register is used. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemBaseUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRW); } + //! Tests whether the instruction reads from its BASE registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemBaseRead() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRead); } + //! Tests whether the instruction writes to its BASE registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemBaseWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseWrite); } + //! Tests whether the instruction reads and writes from/to its BASE registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemBaseReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; } + //! Tests whether the instruction only reads from its BASE registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemBaseReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; } + //! Tests whether the instruction only writes to its BASE registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemBaseWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; } //! Tests whether the instruction modifies the BASE register before it uses it to calculate the target address. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemBasePreModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePreModify); } + //! Tests whether the instruction modifies the BASE register after it uses it to calculate the target address. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemBasePostModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePostModify); } //! Tests whether the instruction's memory INDEX register is used. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemIndexUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRW); } + //! Tests whether the instruction reads the INDEX registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemIndexRead() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRead); } + //! Tests whether the instruction writes to its INDEX registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemIndexWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexWrite); } + //! Tests whether the instruction reads and writes from/to its INDEX registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemIndexReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; } + //! Tests whether the instruction only reads from its INDEX registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemIndexReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; } + //! Tests whether the instruction only writes to its INDEX registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMemIndexWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; } //! \} @@ -626,9 +709,13 @@ struct OpRWInfo { //! Returns a physical id of the register that is fixed for this operand. //! //! Returns \ref BaseReg::kIdBad if any register can be used. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t physId() const noexcept { return _physId; } + //! Tests whether \ref physId() would return a valid physical register id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; } + //! Sets physical register id, which would be fixed for this operand. ASMJIT_INLINE_NODEBUG void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); } @@ -638,7 +725,9 @@ struct OpRWInfo { //! \{ //! Returns Reg/Mem size of the operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; } + //! Sets Reg/Mem size of the operand. ASMJIT_INLINE_NODEBUG void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); } @@ -648,16 +737,23 @@ struct OpRWInfo { //! \{ //! Returns read mask. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t readByteMask() const noexcept { return _readByteMask; } - //! Returns write mask. - ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; } - //! Returns extend mask. - ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; } //! Sets read mask. ASMJIT_INLINE_NODEBUG void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; } + + //! Returns write mask. + [[nodiscard]] + ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; } + //! Sets write mask. ASMJIT_INLINE_NODEBUG void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; } + + //! Returns extend mask. + [[nodiscard]] + ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; } + //! Sets extend mask. ASMJIT_INLINE_NODEBUG void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; } @@ -712,12 +808,15 @@ struct InstRWInfo { //! \{ //! Returns flags associated with the instruction, see \ref InstRWFlags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstRWFlags instFlags() const noexcept { return _instFlags; } //! Tests whether the instruction flags contain `flag`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasInstFlag(InstRWFlags flag) const noexcept { return Support::test(_instFlags, flag); } //! Tests whether the instruction flags contain \ref InstRWFlags::kMovOp. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMovOp() const noexcept { return hasInstFlag(InstRWFlags::kMovOp); } //! \} @@ -726,8 +825,11 @@ struct InstRWInfo { //! \{ //! Returns a mask of CPU flags read. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CpuRWFlags readFlags() const noexcept { return _readFlags; } + //! Returns a mask of CPU flags written. + [[nodiscard]] ASMJIT_INLINE_NODEBUG CpuRWFlags writeFlags() const noexcept { return _writeFlags; } //! \} @@ -745,6 +847,7 @@ struct InstRWInfo { //! Some AVX+ instructions may require extra features for replacing registers with memory operands, for example //! VPSLLDQ instruction only supports `vpslldq reg, reg, imm` combination on AVX/AVX2 capable CPUs and requires //! AVX-512 for `vpslldq reg, mem, imm` combination. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t rmFeature() const noexcept { return _rmFeature; } //! \} @@ -753,18 +856,22 @@ struct InstRWInfo { //! \{ //! Returns RW information of extra register operand (extraReg). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const OpRWInfo& extraReg() const noexcept { return _extraReg; } //! Returns RW information of all instruction's operands. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const OpRWInfo* operands() const noexcept { return _operands; } //! Returns RW information of the operand at the given `index`. + [[nodiscard]] inline const OpRWInfo& operand(size_t index) const noexcept { ASMJIT_ASSERT(index < Globals::kMaxOpCount); return _operands[index]; } //! Returns the number of operands this instruction has. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _opCount; } //! \} @@ -789,7 +896,7 @@ namespace InstAPI { //! If `instOptions` is zero then only raw instruction name (without any additional text) will be appended. ASMJIT_API Error instIdToString(Arch arch, InstId instId, InstStringifyOptions options, String& output) noexcept; -ASMJIT_DEPRECATED("Use `instIdToString()` with `InstStringifyOptions` parameter") +[[deprecated("Use `instIdToString()` with `InstStringifyOptions` parameter")]] static inline Error instIdToString(Arch arch, InstId instId, String& output) noexcept { return instIdToString(arch, instId, InstStringifyOptions::kNone, output); } @@ -798,11 +905,13 @@ static inline Error instIdToString(Arch arch, InstId instId, String& output) noe //! `SIZE_MAX` if `s` is known to be null terminated. //! //! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such instruction exists. +[[nodiscard]] ASMJIT_API InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept; #endif // !ASMJIT_NO_TEXT #ifndef ASMJIT_NO_VALIDATION //! Validates the given instruction considering the given `validationFlags`. +[[nodiscard]] ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags = ValidationFlags::kNone) noexcept; #endif // !ASMJIT_NO_VALIDATION diff --git a/src/asmjit/core/instdb.cpp b/src/asmjit/core/instdb.cpp index 5fae708..a7cfcea 100644 --- a/src/asmjit/core/instdb.cpp +++ b/src/asmjit/core/instdb.cpp @@ -12,12 +12,12 @@ namespace InstNameUtils { static constexpr uint32_t kBufferSize = 32; -static ASMJIT_FORCE_INLINE char decode5BitChar(uint32_t c) noexcept { +static ASMJIT_INLINE_CONSTEXPR char decode5BitChar(uint32_t c) noexcept { uint32_t base = c <= 26 ? uint32_t('a') - 1u : uint32_t('0') - 27u; return char(base + c); } -static ASMJIT_FORCE_INLINE size_t decodeToBuffer(char nameOut[kBufferSize], uint32_t nameValue, InstStringifyOptions options, const char* stringTable) noexcept { +static ASMJIT_INLINE size_t decodeToBuffer(char nameOut[kBufferSize], uint32_t nameValue, InstStringifyOptions options, const char* stringTable) noexcept { size_t i; if (nameValue & 0x80000000u) { diff --git a/src/asmjit/core/jitallocator.cpp b/src/asmjit/core/jitallocator.cpp index 2256fb5..7a107b4 100644 --- a/src/asmjit/core/jitallocator.cpp +++ b/src/asmjit/core/jitallocator.cpp @@ -57,22 +57,22 @@ public: size_t _end; T _bitWord; - enum : uint32_t { kBitWordSize = Support::bitSizeOf() }; - enum : T { kXorMask = B == 0 ? Support::allOnes() : T(0) }; + static inline constexpr uint32_t kBitWordSize = Support::bitSizeOf(); + static inline constexpr T kXorMask = B == 0 ? Support::allOnes() : T(0); - ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept { + ASMJIT_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept { init(data, numBitWords); } - ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept { + ASMJIT_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept { init(data, numBitWords, start, end); } - ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords) noexcept { + ASMJIT_INLINE void init(const T* data, size_t numBitWords) noexcept { init(data, numBitWords, 0, numBitWords * kBitWordSize); } - ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept { + ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept { ASMJIT_ASSERT(numBitWords >= (end + kBitWordSize - 1) / kBitWordSize); DebugUtils::unused(numBitWords); @@ -80,8 +80,9 @@ public: const T* ptr = data + (idx / kBitWordSize); T bitWord = 0; - if (idx < end) + if (idx < end) { bitWord = (*ptr ^ kXorMask) & (Support::allOnes() << (start % kBitWordSize)); + } _ptr = ptr; _idx = idx; @@ -89,12 +90,13 @@ public: _bitWord = bitWord; } - ASMJIT_FORCE_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits::max()) noexcept { + ASMJIT_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits::max()) noexcept { // Skip all empty BitWords. while (_bitWord == 0) { _idx += kBitWordSize; - if (_idx >= _end) + if (_idx >= _end) { return false; + } _bitWord = (*++_ptr) ^ kXorMask; } @@ -107,8 +109,9 @@ public: *rangeEnd = Support::min(_idx + kBitWordSize, _end); while (*rangeEnd - *rangeStart < rangeHint) { _idx += kBitWordSize; - if (_idx >= _end) + if (_idx >= _end) { break; + } _bitWord = (*++_ptr) ^ kXorMask; if (_bitWord != Support::allOnes()) { @@ -248,7 +251,8 @@ public: uint32_t blockFlags, Support::BitWord* usedBitVector, Support::BitWord* stopBitVector, - uint32_t areaSize) noexcept + uint32_t areaSize + ) noexcept : ZoneTreeNodeT(), _pool(pool), _mapping(mapping), @@ -265,29 +269,51 @@ public: clearBlock(); } + [[nodiscard]] inline JitAllocatorPool* pool() const noexcept { return _pool; } + [[nodiscard]] inline uint8_t* rxPtr() const noexcept { return static_cast(_mapping.rx); } + + [[nodiscard]] inline uint8_t* rwPtr() const noexcept { return static_cast(_mapping.rw); } + [[nodiscard]] inline bool hasFlag(uint32_t f) const noexcept { return (_flags & f) != 0; } + inline void addFlags(uint32_t f) noexcept { _flags |= f; } inline void clearFlags(uint32_t f) noexcept { _flags &= ~f; } + [[nodiscard]] inline bool empty() const noexcept { return hasFlag(kFlagEmpty); } + + [[nodiscard]] inline bool isDirty() const noexcept { return hasFlag(kFlagDirty); } + inline void makeDirty() noexcept { addFlags(kFlagDirty); } + [[nodiscard]] inline bool hasLargePages() const noexcept { return hasFlag(kFlagLargePages); } + + [[nodiscard]] inline bool hasInitialPadding() const noexcept { return hasFlag(kFlagInitialPadding); } + [[nodiscard]] inline uint32_t initialAreaStart() const noexcept { return initialAreaStartByFlags(_flags); } + [[nodiscard]] inline size_t blockSize() const noexcept { return _blockSize; } + [[nodiscard]] inline uint32_t areaSize() const noexcept { return _areaSize; } + + [[nodiscard]] inline uint32_t areaUsed() const noexcept { return _areaUsed; } + + [[nodiscard]] inline uint32_t areaAvailable() const noexcept { return _areaSize - _areaUsed; } + + [[nodiscard]] inline uint32_t largestUnusedArea() const noexcept { return _largestUnusedArea; } inline void decreaseUsedArea(uint32_t value) noexcept { @@ -334,10 +360,13 @@ public: clearFlags(kFlagDirty | kFlagEmpty); } else { - if (_searchStart == allocatedAreaStart) + if (_searchStart == allocatedAreaStart) { _searchStart = allocatedAreaEnd; - if (_searchEnd == allocatedAreaEnd) + } + + if (_searchEnd == allocatedAreaEnd) { _searchEnd = allocatedAreaStart; + } addFlags(kFlagDirty); clearFlags(kFlagEmpty); @@ -447,33 +476,40 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator:: // Setup pool count to [1..3]. size_t poolCount = 1; - if (Support::test(options, JitAllocatorOptions::kUseMultiplePools)) + if (Support::test(options, JitAllocatorOptions::kUseMultiplePools)) { poolCount = kJitAllocatorMultiPoolCount; + } // Setup block size [64kB..256MB]. - if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize)) + if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize)) { blockSize = vmInfo.pageGranularity; + } // Setup granularity [64..256]. - if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity)) + if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity)) { granularity = kJitAllocatorBaseGranularity; + } // Setup fill-pattern. - if (uint32_t(options & JitAllocatorOptions::kCustomFillPattern) == 0) + if (uint32_t(options & JitAllocatorOptions::kCustomFillPattern) == 0) { fillPattern = JitAllocator_defaultFillPattern(); + } size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount; void* p = ::malloc(size); - if (ASMJIT_UNLIKELY(!p)) + + if (ASMJIT_UNLIKELY(!p)) { return nullptr; + } VirtMem::HardenedRuntimeInfo hardenedRtInfo = VirtMem::hardenedRuntimeInfo(); if (Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kEnabled)) { // If we are running within a hardened environment (mapping RWX is not allowed) then we have to use dual mapping // or other runtime capabilities like Apple specific MAP_JIT. There is no point in not enabling these as otherwise // the allocation would fail and JitAllocator would not be able to allocate memory. - if (!Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kMapJit)) + if (!Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kMapJit)) { options |= JitAllocatorOptions::kUseDualMapping; + } } JitAllocatorPool* pools = reinterpret_cast((uint8_t*)p + sizeof(JitAllocatorPrivateImpl)); @@ -485,8 +521,9 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator:: impl->fillPattern = fillPattern; impl->pageSize = vmInfo.pageSize; - for (size_t poolId = 0; poolId < poolCount; poolId++) + for (size_t poolId = 0; poolId < poolCount; poolId++) { new(Support::PlacementNew{&pools[poolId]}) JitAllocatorPool(granularity << poolId); + } return impl; } @@ -501,8 +538,9 @@ static inline size_t JitAllocatorImpl_sizeToPoolId(const JitAllocatorPrivateImpl size_t granularity = size_t(impl->granularity) << poolId; while (poolId) { - if (Support::alignUp(size, granularity) == size) + if (Support::alignUp(size, granularity) == size) { break; + } poolId--; granularity >>= 1; } @@ -522,18 +560,21 @@ static inline size_t JitAllocatorImpl_calculateIdealBlockSize(JitAllocatorPrivat // We have to increase the allocationSize if we know that the block must provide padding. if (!Support::test(impl->options, JitAllocatorOptions::kDisableInitialPadding)) { size_t granularity = pool->granularity; - if (SIZE_MAX - allocationSize < granularity) + if (SIZE_MAX - allocationSize < granularity) { return 0; // Overflown + } allocationSize += granularity; } - if (blockSize < kJitAllocatorMaxBlockSize) + if (blockSize < kJitAllocatorMaxBlockSize) { blockSize *= 2u; + } if (allocationSize > blockSize) { blockSize = Support::alignUp(allocationSize, impl->blockSize); - if (ASMJIT_UNLIKELY(blockSize < allocationSize)) + if (ASMJIT_UNLIKELY(blockSize < allocationSize)) { return 0; // Overflown. + } } return blockSize; @@ -562,8 +603,9 @@ ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t uint32_t* mem32 = static_cast(mem); size_t n = byteSize / 4u; - for (size_t i = 0; i < n; i++) + for (size_t i = 0; i < n; i++) { mem32[i] = pattern; + } } // Allocate a new `JitAllocatorBlock` for the given `blockSize`. @@ -619,10 +661,12 @@ static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocat // Out of memory... if (ASMJIT_UNLIKELY(blockPtr == nullptr)) { - if (Support::test(impl->options, JitAllocatorOptions::kUseDualMapping)) - VirtMem::releaseDualMapping(&virtMem, blockSize); - else - VirtMem::release(virtMem.rx, blockSize); + if (Support::test(impl->options, JitAllocatorOptions::kUseDualMapping)) { + (void)VirtMem::releaseDualMapping(&virtMem, blockSize); + } + else { + (void)VirtMem::release(virtMem.rx, blockSize); + } return DebugUtils::errored(kErrorOutOfMemory); } @@ -640,10 +684,12 @@ static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocat static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { DebugUtils::unused(impl); - if (block->hasFlag(JitAllocatorBlock::kFlagDualMapped)) - VirtMem::releaseDualMapping(&block->_mapping, block->blockSize()); - else - VirtMem::release(block->rxPtr(), block->blockSize()); + if (block->hasFlag(JitAllocatorBlock::kFlagDualMapped)) { + (void)VirtMem::releaseDualMapping(&block->_mapping, block->blockSize()); + } + else { + (void)VirtMem::release(block->rxPtr(), block->blockSize()); + } ::free(block); } @@ -651,8 +697,9 @@ static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAlloc static void JitAllocatorImpl_insertBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { JitAllocatorPool* pool = block->pool(); - if (!pool->cursor) + if (!pool->cursor) { pool->cursor = block; + } // Add to RBTree and List. impl->tree.insert(block); @@ -670,8 +717,9 @@ static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAlloc JitAllocatorPool* pool = block->pool(); // Remove from RBTree and List. - if (pool->cursor == block) + if (pool->cursor == block) { pool->cursor = block->hasPrev() ? block->prev() : block->next(); + } impl->tree.remove(block); pool->blocks.unlink(block); @@ -685,8 +733,9 @@ static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAlloc } static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { - if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) + if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) { return; + } JitAllocatorPool* pool = block->pool(); if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) { @@ -717,13 +766,15 @@ static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllo JitAllocator::JitAllocator(const CreateParams* params) noexcept { _impl = JitAllocatorImpl_new(params); - if (ASMJIT_UNLIKELY(!_impl)) + if (ASMJIT_UNLIKELY(!_impl)) { _impl = const_cast(&JitAllocatorImpl_none); + } } JitAllocator::~JitAllocator() noexcept { - if (_impl == &JitAllocatorImpl_none) + if (_impl == &JitAllocatorImpl_none) { return; + } reset(ResetPolicy::kHard); JitAllocatorImpl_destroy(static_cast(_impl)); @@ -733,8 +784,9 @@ JitAllocator::~JitAllocator() noexcept { // ==================== void JitAllocator::reset(ResetPolicy resetPolicy) noexcept { - if (_impl == &JitAllocatorImpl_none) + if (_impl == &JitAllocatorImpl_none) { return; + } JitAllocatorPrivateImpl* impl = static_cast(_impl); impl->tree.reset(); @@ -802,19 +854,22 @@ JitAllocator::Statistics JitAllocator::statistics() const noexcept { Error JitAllocator::alloc(Span& out, size_t size) noexcept { out = Span{}; - if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) + if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) { return DebugUtils::errored(kErrorNotInitialized); + } JitAllocatorPrivateImpl* impl = static_cast(_impl); constexpr uint32_t kNoIndex = std::numeric_limits::max(); // Align to the minimum granularity by default. size = Support::alignUp(size, impl->granularity); - if (ASMJIT_UNLIKELY(size == 0)) + if (ASMJIT_UNLIKELY(size == 0)) { return DebugUtils::errored(kErrorInvalidArgument); + } - if (ASMJIT_UNLIKELY(size > std::numeric_limits::max() / 2)) + if (ASMJIT_UNLIKELY(size > std::numeric_limits::max() / 2)) { return DebugUtils::errored(kErrorTooLarge); + } LockGuard guard(impl->lock); JitAllocatorPool* pool = &impl->pools[JitAllocatorImpl_sizeToPoolId(impl, size)]; @@ -849,8 +904,9 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept { largestArea = Support::max(largestArea, rangeSize); } - if (areaIndex != kNoIndex) + if (areaIndex != kNoIndex) { break; + } if (searchStart != SIZE_MAX) { // Because we have iterated over the entire block, we can now mark the @@ -872,8 +928,9 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept { // Allocate a new block if there is no region of a required size. if (areaIndex == kNoIndex) { size_t blockSize = JitAllocatorImpl_calculateIdealBlockSize(impl, pool, size); - if (ASMJIT_UNLIKELY(!blockSize)) + if (ASMJIT_UNLIKELY(!blockSize)) { return DebugUtils::errored(kErrorOutOfMemory); + } ASMJIT_PROPAGATE(JitAllocatorImpl_newBlock(impl, &block, pool, blockSize)); areaIndex = block->initialAreaStart(); @@ -904,18 +961,21 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept { } Error JitAllocator::release(void* rx) noexcept { - if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) + if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) { return DebugUtils::errored(kErrorNotInitialized); + } - if (ASMJIT_UNLIKELY(!rx)) + if (ASMJIT_UNLIKELY(!rx)) { return DebugUtils::errored(kErrorInvalidArgument); + } JitAllocatorPrivateImpl* impl = static_cast(_impl); LockGuard guard(impl->lock); JitAllocatorBlock* block = impl->tree.get(static_cast(rx)); - if (ASMJIT_UNLIKELY(!block)) + if (ASMJIT_UNLIKELY(!block)) { return DebugUtils::errored(kErrorInvalidState); + } // Offset relative to the start of the block. JitAllocatorPool* pool = block->pool(); @@ -954,8 +1014,9 @@ Error JitAllocator::release(void* rx) noexcept { static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator::Span& span, size_t newSize, bool alreadyUnderWriteScope) noexcept { JitAllocatorBlock* block = static_cast(span._block); - if (ASMJIT_UNLIKELY(!block)) + if (ASMJIT_UNLIKELY(!block)) { return DebugUtils::errored(kErrorInvalidArgument); + } LockGuard guard(impl->lock); @@ -968,16 +1029,18 @@ static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator // Don't trust `span.size()` - if it has been already truncated we would be off... bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart); - if (ASMJIT_UNLIKELY(!isUsed)) + if (ASMJIT_UNLIKELY(!isUsed)) { return DebugUtils::errored(kErrorInvalidArgument); + } uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1; uint32_t areaPrevSize = areaEnd - areaStart; uint32_t spanPrevSize = areaPrevSize * pool->granularity; uint32_t areaShrunkSize = pool->areaSizeFromByteSize(newSize); - if (ASMJIT_UNLIKELY(areaShrunkSize > areaPrevSize)) + if (ASMJIT_UNLIKELY(areaShrunkSize > areaPrevSize)) { return DebugUtils::errored(kErrorInvalidArgument); + } uint32_t areaDiff = areaPrevSize - areaShrunkSize; if (areaDiff) { @@ -1003,11 +1066,13 @@ static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator } Error JitAllocator::shrink(Span& span, size_t newSize) noexcept { - if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) + if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) { return DebugUtils::errored(kErrorNotInitialized); + } - if (ASMJIT_UNLIKELY(!span.rx())) + if (ASMJIT_UNLIKELY(!span.rx())) { return DebugUtils::errored(kErrorInvalidArgument); + } if (ASMJIT_UNLIKELY(newSize == 0)) { Error err = release(span.rx()); @@ -1021,15 +1086,17 @@ Error JitAllocator::shrink(Span& span, size_t newSize) noexcept { Error JitAllocator::query(Span& out, void* rx) const noexcept { out = Span{}; - if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) + if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) { return DebugUtils::errored(kErrorNotInitialized); + } JitAllocatorPrivateImpl* impl = static_cast(_impl); LockGuard guard(impl->lock); JitAllocatorBlock* block = impl->tree.get(static_cast(rx)); - if (ASMJIT_UNLIKELY(!block)) + if (ASMJIT_UNLIKELY(!block)) { return DebugUtils::errored(kErrorInvalidArgument); + } // Offset relative to the start of the block. JitAllocatorPool* pool = block->pool(); @@ -1039,8 +1106,9 @@ Error JitAllocator::query(Span& out, void* rx) const noexcept { uint32_t areaStart = uint32_t(offset >> pool->granularityLog2); bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart); - if (ASMJIT_UNLIKELY(!isUsed)) + if (ASMJIT_UNLIKELY(!isUsed)) { return DebugUtils::errored(kErrorInvalidArgument); + } uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1; size_t byteOffset = pool->byteSizeFromAreaSize(areaStart); @@ -1057,22 +1125,27 @@ Error JitAllocator::query(Span& out, void* rx) const noexcept { // JitAllocator - Write // ==================== -static ASMJIT_FORCE_INLINE VirtMem::CachePolicy JitAllocator_defaultPolicyForSpan(const JitAllocator::Span& span) noexcept { - if (Support::test(span.flags(), JitAllocator::Span::Flags::kInstructionCacheClean)) +static ASMJIT_INLINE VirtMem::CachePolicy JitAllocator_defaultPolicyForSpan(const JitAllocator::Span& span) noexcept { + if (Support::test(span.flags(), JitAllocator::Span::Flags::kInstructionCacheClean)) { return VirtMem::CachePolicy::kNeverFlush; - else + } + else { return VirtMem::CachePolicy::kFlushAfterWrite; + } } Error JitAllocator::write(Span& span, size_t offset, const void* src, size_t size, VirtMem::CachePolicy policy) noexcept { - if (ASMJIT_UNLIKELY(span._block == nullptr || offset > span.size() || span.size() - offset < size)) + if (ASMJIT_UNLIKELY(span._block == nullptr || offset > span.size() || span.size() - offset < size)) { return DebugUtils::errored(kErrorInvalidArgument); + } - if (ASMJIT_UNLIKELY(size == 0)) + if (ASMJIT_UNLIKELY(size == 0)) { return kErrorOk; + } - if (policy == VirtMem::CachePolicy::kDefault) + if (policy == VirtMem::CachePolicy::kDefault) { policy = JitAllocator_defaultPolicyForSpan(span); + } VirtMem::ProtectJitReadWriteScope writeScope(span.rx(), span.size(), policy); memcpy(static_cast(span.rw()) + offset, src, size); @@ -1080,15 +1153,18 @@ Error JitAllocator::write(Span& span, size_t offset, const void* src, size_t siz } Error JitAllocator::write(Span& span, WriteFunc writeFunc, void* userData, VirtMem::CachePolicy policy) noexcept { - if (ASMJIT_UNLIKELY(span._block == nullptr) || span.size() == 0) + if (ASMJIT_UNLIKELY(span._block == nullptr) || span.size() == 0) { return DebugUtils::errored(kErrorInvalidArgument); + } size_t size = span.size(); - if (ASMJIT_UNLIKELY(size == 0)) + if (ASMJIT_UNLIKELY(size == 0)) { return kErrorOk; + } - if (policy == VirtMem::CachePolicy::kDefault) + if (policy == VirtMem::CachePolicy::kDefault) { policy = JitAllocator_defaultPolicyForSpan(span); + } VirtMem::ProtectJitReadWriteScope writeScope(span.rx(), span.size(), policy); ASMJIT_PROPAGATE(writeFunc(span, userData)); @@ -1113,30 +1189,34 @@ Error JitAllocator::beginWriteScope(WriteScopeData& scope, VirtMem::CachePolicy } Error JitAllocator::endWriteScope(WriteScopeData& scope) noexcept { - if (ASMJIT_UNLIKELY(!scope._allocator)) + if (ASMJIT_UNLIKELY(!scope._allocator)) { return DebugUtils::errored(kErrorInvalidArgument); + } return kErrorOk; } Error JitAllocator::flushWriteScope(WriteScopeData& scope) noexcept { - if (ASMJIT_UNLIKELY(!scope._allocator)) + if (ASMJIT_UNLIKELY(!scope._allocator)) { return DebugUtils::errored(kErrorInvalidArgument); + } return kErrorOk; } Error JitAllocator::scopedWrite(WriteScopeData& scope, Span& span, size_t offset, const void* src, size_t size) noexcept { - if (ASMJIT_UNLIKELY(!scope._allocator)) + if (ASMJIT_UNLIKELY(!scope._allocator)) { return DebugUtils::errored(kErrorInvalidArgument); + } VirtMem::CachePolicy policy = VirtMem::CachePolicy(scope._data[0]); return scope._allocator->write(span, offset, src, size, policy); } Error JitAllocator::scopedWrite(WriteScopeData& scope, Span& span, WriteFunc writeFunc, void* userData) noexcept { - if (ASMJIT_UNLIKELY(!scope._allocator)) + if (ASMJIT_UNLIKELY(!scope._allocator)) { return DebugUtils::errored(kErrorInvalidArgument); + } VirtMem::CachePolicy policy = VirtMem::CachePolicy(scope._data[0]); return scope._allocator->write(span, writeFunc, userData, policy); @@ -1334,8 +1414,9 @@ public: Record* record = _records.get(static_cast(p)); EXPECT_NOT_NULL(record); - if (!newSize) + if (!newSize) { return release(p); + } JitAllocator::Span span; EXPECT_EQ(_allocator.query(span, p), kErrorOk); @@ -1375,10 +1456,12 @@ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcep BitVectorRangeIterator it(in, kPatternSize); size_t rangeStart, rangeEnd; while (it.nextRange(&rangeStart, &rangeEnd)) { - if (Bit) + if (Bit) { Support::bitVectorFill(out, rangeStart, rangeEnd - rangeStart); - else + } + else { Support::bitVectorClear(out, rangeStart, rangeEnd - rangeStart); + } } } @@ -1463,80 +1546,94 @@ static void test_jit_allocator_alloc_release() noexcept { // Random blocks tests... INFO(" Allocating random blocks..."); - for (i = 0; i < kCount; i++) + for (i = 0; i < kCount; i++) { ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Releasing all allocated blocks from the beginning..."); - for (i = 0; i < kCount; i++) + for (i = 0; i < kCount; i++) { wrapper.release(ptrArray[i]); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Allocating random blocks again...", kCount); - for (i = 0; i < kCount; i++) + for (i = 0; i < kCount; i++) { ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Shuffling allocated blocks..."); JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng); INFO(" Releasing 50%% of allocated blocks..."); - for (i = 0; i < kCount / 2; i++) + for (i = 0; i < kCount / 2; i++) { wrapper.release(ptrArray[i]); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Allocating 50%% more blocks again..."); - for (i = 0; i < kCount / 2; i++) + for (i = 0; i < kCount / 2; i++) { ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Releasing all allocated blocks from the end..."); - for (i = 0; i < kCount; i++) + for (i = 0; i < kCount; i++) { wrapper.release(ptrArray[kCount - i - 1]); + } JitAllocatorTest_usage(wrapper._allocator); // Fixed blocks tests... INFO(" Allocating %zuB blocks...", fixedBlockSize); - for (i = 0; i < kCount / 2; i++) + for (i = 0; i < kCount / 2; i++) { ptrArray[i] = wrapper.alloc(fixedBlockSize); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Shrinking each %zuB block to 1 byte", fixedBlockSize); - for (i = 0; i < kCount / 2; i++) + for (i = 0; i < kCount / 2; i++) { wrapper.shrink(ptrArray[i], 1); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Allocating more 64B blocks...", 64); - for (i = kCount / 2; i < kCount; i++) + for (i = kCount / 2; i < kCount; i++) { ptrArray[i] = wrapper.alloc(64); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Releasing all blocks from the beginning..."); - for (i = 0; i < kCount; i++) + for (i = 0; i < kCount; i++) { wrapper.release(ptrArray[i]); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Allocating %zuB blocks...", fixedBlockSize); - for (i = 0; i < kCount; i++) + for (i = 0; i < kCount; i++) { ptrArray[i] = wrapper.alloc(fixedBlockSize); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Shuffling allocated blocks..."); JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng); INFO(" Releasing 50%% of allocated blocks..."); - for (i = 0; i < kCount / 2; i++) + for (i = 0; i < kCount / 2; i++) { wrapper.release(ptrArray[i]); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Allocating 50%% more %zuB blocks again...", fixedBlockSize); - for (i = 0; i < kCount / 2; i++) + for (i = 0; i < kCount / 2; i++) { ptrArray[i] = wrapper.alloc(fixedBlockSize); + } JitAllocatorTest_usage(wrapper._allocator); INFO(" Releasing all allocated blocks from the end..."); - for (i = 0; i < kCount; i++) + for (i = 0; i < kCount; i++) { wrapper.release(ptrArray[kCount - i - 1]); + } JitAllocatorTest_usage(wrapper._allocator); ::free(ptrArray); diff --git a/src/asmjit/core/jitallocator.h b/src/asmjit/core/jitallocator.h index ed0b76b..28ec4d5 100644 --- a/src/asmjit/core/jitallocator.h +++ b/src/asmjit/core/jitallocator.h @@ -182,6 +182,7 @@ public: //! Destroys the `JitAllocator` instance and release all blocks held. ASMJIT_API ~JitAllocator() noexcept; + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _impl->blockSize == 0; } //! Free all allocated memory - makes all pointers returned by `alloc()` invalid. @@ -196,15 +197,23 @@ public: //! \{ //! Returns allocator options, see `Flags`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG JitAllocatorOptions options() const noexcept { return _impl->options; } + //! Tests whether the allocator has the given `option` set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasOption(JitAllocatorOptions option) const noexcept { return uint32_t(_impl->options & option) != 0; } //! Returns a base block size (a minimum size of block that the allocator would allocate). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t blockSize() const noexcept { return _impl->blockSize; } + //! Returns granularity of the allocator. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t granularity() const noexcept { return _impl->granularity; } + //! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t fillPattern() const noexcept { return _impl->fillPattern; } //! \} @@ -265,6 +274,7 @@ public: //! Returns a pointer having Read & Execute permissions (references executable memory). //! //! This pointer is never NULL if the allocation succeeded, it points to an executable memory. + [[nodiscard]] ASMJIT_INLINE_NODEBUG void* rx() const noexcept { return _rx; } //! Returns a pointer having Read & Write permissions (references writable memory). @@ -284,12 +294,15 @@ public: //! //! If \ref VirtMem::ProtectJitReadWriteScope is not used it's important to clear the instruction cache via //! \ref VirtMem::flushInstructionCache() after the write is done. + [[nodiscard]] ASMJIT_INLINE_NODEBUG void* rw() const noexcept { return _rw; } //! Returns size of this span, aligned to the allocator granularity. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } //! Returns span flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Flags flags() const noexcept { return _flags; } //! Shrinks this span to `newSize`. @@ -300,12 +313,14 @@ public: ASMJIT_INLINE_NODEBUG void shrink(size_t newSize) noexcept { _size = Support::min(_size, newSize); } //! Returns whether \ref rw() returns a non-null pointer. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isDirectlyWritable() const noexcept { return _rw != nullptr; } //! \} }; //! Allocates a new memory span of the requested `size`. + [[nodiscard]] ASMJIT_API Error alloc(Span& out, size_t size) noexcept; //! Releases a memory block returned by `alloc()`. @@ -321,6 +336,7 @@ public: //! Queries information about an allocated memory block that contains the given `rx`, and writes it to `out`. //! //! If the pointer is matched, the function returns `kErrorOk` and fills `out` with the corresponding span. + [[nodiscard]] ASMJIT_API Error query(Span& out, void* rx) const noexcept; //! \} @@ -328,7 +344,7 @@ public: //! \name Write Operations //! \{ - typedef Error (ASMJIT_CDECL* WriteFunc)(Span& span, void* userData) ASMJIT_NOEXCEPT_TYPE; + using WriteFunc = Error (ASMJIT_CDECL*)(Span& span, void* userData) noexcept; ASMJIT_API Error write( Span& span, @@ -344,7 +360,7 @@ public: VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept; template - ASMJIT_FORCE_INLINE Error write( + ASMJIT_INLINE Error write( Span& span, Lambda&& lambdaFunc, VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept { @@ -445,7 +461,12 @@ public: //! \name Accessors //! \{ + //! Returns \ref JitAllocator associated with this write scope. + [[nodiscard]] ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return _allocator; } + + //! Returns cache policy this write scope is using. + [[nodiscard]] ASMJIT_INLINE_NODEBUG VirtMem::CachePolicy policy() const noexcept { return _policy; } //! \} @@ -499,27 +520,40 @@ public: ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Statistics{}; } //! Returns count of blocks managed by `JitAllocator` at the moment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t blockCount() const noexcept { return _blockCount; } + //! Returns the number of active allocations. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t allocationCount() const noexcept { return _allocationCount; } //! Returns how many bytes are currently used. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t usedSize() const noexcept { return _usedSize; } + //! Returns the number of bytes unused by the allocator at the moment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t unusedSize() const noexcept { return _reservedSize - _usedSize; } + //! Returns the total number of bytes reserved by the allocator (sum of sizes of all blocks). + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t reservedSize() const noexcept { return _reservedSize; } + //! Returns the number of bytes the allocator needs to manage the allocated memory. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t overheadSize() const noexcept { return _overheadSize; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG double usedSizeAsPercent() const noexcept { return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG double unusedSizeAsPercent() const noexcept { return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG double overheadSizeAsPercent() const noexcept { return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0; } @@ -528,6 +562,7 @@ public: //! Returns JIT allocator statistics. //! //! \remarks This function is thread-safe. + [[nodiscard]] ASMJIT_API Statistics statistics() const noexcept; //! \} diff --git a/src/asmjit/core/jitruntime.cpp b/src/asmjit/core/jitruntime.cpp index 0cc0269..c117c75 100644 --- a/src/asmjit/core/jitruntime.cpp +++ b/src/asmjit/core/jitruntime.cpp @@ -27,8 +27,9 @@ Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept { ASMJIT_PROPAGATE(code->resolveUnresolvedLinks()); size_t estimatedCodeSize = code->codeSize(); - if (ASMJIT_UNLIKELY(estimatedCodeSize == 0)) + if (ASMJIT_UNLIKELY(estimatedCodeSize == 0)) { return DebugUtils::errored(kErrorNoCodeGenerated); + } JitAllocator::Span span; ASMJIT_PROPAGATE(_allocator.alloc(span, estimatedCodeSize)); diff --git a/src/asmjit/core/jitruntime.h b/src/asmjit/core/jitruntime.h index 717a6b5..acea4a3 100644 --- a/src/asmjit/core/jitruntime.h +++ b/src/asmjit/core/jitruntime.h @@ -59,6 +59,7 @@ public: //! \{ //! Returns the associated `JitAllocator`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return const_cast(&_allocator); } //! \} diff --git a/src/asmjit/core/logger.cpp b/src/asmjit/core/logger.cpp index 9bc14ba..2b9dd9b 100644 --- a/src/asmjit/core/logger.cpp +++ b/src/asmjit/core/logger.cpp @@ -52,11 +52,13 @@ FileLogger::FileLogger(FILE* file) noexcept FileLogger::~FileLogger() noexcept {} Error FileLogger::_log(const char* data, size_t size) noexcept { - if (!_file) + if (!_file) { return kErrorOk; + } - if (size == SIZE_MAX) + if (size == SIZE_MAX) { size = strlen(data); + } fwrite(data, 1, size, _file); return kErrorOk; diff --git a/src/asmjit/core/logger.h b/src/asmjit/core/logger.h index 54c169f..1d8337e 100644 --- a/src/asmjit/core/logger.h +++ b/src/asmjit/core/logger.h @@ -47,36 +47,53 @@ public: //! \{ //! Returns \ref FormatOptions of this logger. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FormatOptions& options() noexcept { return _options; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const FormatOptions& options() const noexcept { return _options; } + //! Sets formatting options of this Logger to `options`. ASMJIT_INLINE_NODEBUG void setOptions(const FormatOptions& options) noexcept { _options = options; } + //! Resets formatting options of this Logger to defaults. ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options.reset(); } //! Returns formatting flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _options.flags(); } + //! Tests whether the logger has the given `flag` enabled. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return _options.hasFlag(flag); } + //! Sets formatting flags to `flags`. ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _options.setFlags(flags); } + //! Enables the given formatting `flags`. ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _options.addFlags(flags); } + //! Disables the given formatting `flags`. ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _options.clearFlags(flags); } //! Returns indentation of a given indentation `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t indentation(FormatIndentationGroup type) const noexcept { return _options.indentation(type); } + //! Sets indentation of the given indentation `group` to `n` spaces. ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup type, uint32_t n) noexcept { _options.setIndentation(type, n); } + //! Resets indentation of the given indentation `group` to 0 spaces. ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup type) noexcept { _options.resetIndentation(type); } //! Returns padding of a given padding `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup type) const noexcept { return _options.padding(type); } + //! Sets padding of a given padding `group` to `n`. ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup type, uint32_t n) noexcept { _options.setPadding(type, n); } + //! Resets padding of a given padding `group` to 0, which means that a default will be used. ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup type) noexcept { _options.resetPadding(type); } @@ -127,6 +144,7 @@ public: //! \{ //! Returns the logging output stream or null if the logger has no output stream. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FILE* file() const noexcept { return _file; } //! Sets the logging output stream to `stream` or null. @@ -165,15 +183,21 @@ public: //! Returns the content of the logger as \ref String. //! //! It can be moved, if desired. + [[nodiscard]] ASMJIT_INLINE_NODEBUG String& content() noexcept { return _content; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const String& content() const noexcept { return _content; } //! Returns aggregated logger data as `char*` pointer. //! //! The pointer is owned by `StringLogger`, it can't be modified or freed. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _content.data(); } + //! Returns size of the data returned by `data()`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return _content.size(); } //! \} diff --git a/src/asmjit/core/operand.h b/src/asmjit/core/operand.h index 01b1cb9..8a90084 100644 --- a/src/asmjit/core/operand.h +++ b/src/asmjit/core/operand.h @@ -40,7 +40,7 @@ static_assert(uint32_t(OperandType::kMem) == uint32_t(OperandType::kReg) + 1, //! Register mask is a convenience typedef that describes a mask where each bit describes a physical register id //! in the same \ref RegGroup. At the moment 32 bits are enough as AsmJit doesn't support any architecture that //! would provide more than 32 registers for a register group. -typedef uint32_t RegMask; +using RegMask = uint32_t; //! Register type. //! @@ -216,7 +216,7 @@ enum class RegGroup : uint8_t { }; ASMJIT_DEFINE_ENUM_COMPARE(RegGroup) -typedef Support::EnumValues RegGroupVirtValues; +using RegGroupVirtValues = Support::EnumValues; //! Operand signature is a 32-bit number describing \ref Operand and some of its payload. //! @@ -228,57 +228,55 @@ struct OperandSignature { //! \name Constants //! \{ - enum : uint32_t { - // Operand type (3 least significant bits). - // |........|........|........|.....XXX| - kOpTypeShift = 0, - kOpTypeMask = 0x07u << kOpTypeShift, + // Operand type (3 least significant bits). + // |........|........|........|.....XXX| + static inline constexpr uint32_t kOpTypeShift = 0; + static inline constexpr uint32_t kOpTypeMask = 0x07u << kOpTypeShift; - // Register type (5 bits). - // |........|........|........|XXXXX...| - kRegTypeShift = 3, - kRegTypeMask = 0x1Fu << kRegTypeShift, + // Register type (5 bits). + // |........|........|........|XXXXX...| + static inline constexpr uint32_t kRegTypeShift = 3; + static inline constexpr uint32_t kRegTypeMask = 0x1Fu << kRegTypeShift; - // Register group (4 bits). - // |........|........|....XXXX|........| - kRegGroupShift = 8, - kRegGroupMask = 0x0Fu << kRegGroupShift, + // Register group (4 bits). + // |........|........|....XXXX|........| + static inline constexpr uint32_t kRegGroupShift = 8; + static inline constexpr uint32_t kRegGroupMask = 0x0Fu << kRegGroupShift; - // Memory base type (5 bits). - // |........|........|........|XXXXX...| - kMemBaseTypeShift = 3, - kMemBaseTypeMask = 0x1Fu << kMemBaseTypeShift, + // Memory base type (5 bits). + // |........|........|........|XXXXX...| + static inline constexpr uint32_t kMemBaseTypeShift = 3; + static inline constexpr uint32_t kMemBaseTypeMask = 0x1Fu << kMemBaseTypeShift; - // Memory index type (5 bits). - // |........|........|...XXXXX|........| - kMemIndexTypeShift = 8, - kMemIndexTypeMask = 0x1Fu << kMemIndexTypeShift, + // Memory index type (5 bits). + // |........|........|...XXXXX|........| + static inline constexpr uint32_t kMemIndexTypeShift = 8; + static inline constexpr uint32_t kMemIndexTypeMask = 0x1Fu << kMemIndexTypeShift; - // Memory base+index combined (10 bits). - // |........|........|...XXXXX|XXXXX...| - kMemBaseIndexShift = 3, - kMemBaseIndexMask = 0x3FFu << kMemBaseIndexShift, + // Memory base+index combined (10 bits). + // |........|........|...XXXXX|XXXXX...| + static inline constexpr uint32_t kMemBaseIndexShift = 3; + static inline constexpr uint32_t kMemBaseIndexMask = 0x3FFu << kMemBaseIndexShift; - // This memory operand represents a home-slot or stack (Compiler) (1 bit). - // |........|........|..X.....|........| - kMemRegHomeShift = 13, - kMemRegHomeFlag = 0x01u << kMemRegHomeShift, + // This memory operand represents a home-slot or stack (Compiler) (1 bit). + // |........|........|..X.....|........| + static inline constexpr uint32_t kMemRegHomeShift = 13; + static inline constexpr uint32_t kMemRegHomeFlag = 0x01u << kMemRegHomeShift; - // Immediate type (1 bit). - // |........|........|........|....X...| - kImmTypeShift = 3, - kImmTypeMask = 0x01u << kImmTypeShift, + // Immediate type (1 bit). + // |........|........|........|....X...| + static inline constexpr uint32_t kImmTypeShift = 3; + static inline constexpr uint32_t kImmTypeMask = 0x01u << kImmTypeShift; - // Predicate used by either registers or immediate values (4 bits). - // |........|XXXX....|........|........| - kPredicateShift = 20, - kPredicateMask = 0x0Fu << kPredicateShift, + // Predicate used by either registers or immediate values (4 bits). + // |........|XXXX....|........|........| + static inline constexpr uint32_t kPredicateShift = 20; + static inline constexpr uint32_t kPredicateMask = 0x0Fu << kPredicateShift; - // Operand size (8 most significant bits). - // |XXXXXXXX|........|........|........| - kSizeShift = 24, - kSizeMask = 0xFFu << kSizeShift - }; + // Operand size (8 most significant bits). + // |XXXXXXXX|........|........|........| + static inline constexpr uint32_t kSizeShift = 24; + static inline constexpr uint32_t kSizeMask = 0xFFu << kSizeShift; //! \} @@ -295,150 +293,204 @@ struct OperandSignature { //! //! \{ - ASMJIT_INLINE_NODEBUG constexpr bool operator!() const noexcept { return _bits == 0; } - ASMJIT_INLINE_NODEBUG constexpr explicit operator bool() const noexcept { return _bits != 0; } + ASMJIT_INLINE_CONSTEXPR bool operator!() const noexcept { return _bits == 0; } + ASMJIT_INLINE_CONSTEXPR explicit operator bool() const noexcept { return _bits != 0; } - ASMJIT_INLINE_NODEBUG OperandSignature& operator|=(uint32_t x) noexcept { _bits |= x; return *this; } - ASMJIT_INLINE_NODEBUG OperandSignature& operator&=(uint32_t x) noexcept { _bits &= x; return *this; } - ASMJIT_INLINE_NODEBUG OperandSignature& operator^=(uint32_t x) noexcept { _bits ^= x; return *this; } + ASMJIT_INLINE_CONSTEXPR OperandSignature& operator|=(uint32_t x) noexcept { _bits |= x; return *this; } + ASMJIT_INLINE_CONSTEXPR OperandSignature& operator&=(uint32_t x) noexcept { _bits &= x; return *this; } + ASMJIT_INLINE_CONSTEXPR OperandSignature& operator^=(uint32_t x) noexcept { _bits ^= x; return *this; } - ASMJIT_INLINE_NODEBUG OperandSignature& operator|=(const OperandSignature& other) noexcept { return operator|=(other._bits); } - ASMJIT_INLINE_NODEBUG OperandSignature& operator&=(const OperandSignature& other) noexcept { return operator&=(other._bits); } - ASMJIT_INLINE_NODEBUG OperandSignature& operator^=(const OperandSignature& other) noexcept { return operator^=(other._bits); } + ASMJIT_INLINE_CONSTEXPR OperandSignature& operator|=(const OperandSignature& other) noexcept { return operator|=(other._bits); } + ASMJIT_INLINE_CONSTEXPR OperandSignature& operator&=(const OperandSignature& other) noexcept { return operator&=(other._bits); } + ASMJIT_INLINE_CONSTEXPR OperandSignature& operator^=(const OperandSignature& other) noexcept { return operator^=(other._bits); } - ASMJIT_INLINE_NODEBUG constexpr OperandSignature operator~() const noexcept { return OperandSignature{~_bits}; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature operator~() const noexcept { return OperandSignature{~_bits}; } - ASMJIT_INLINE_NODEBUG constexpr OperandSignature operator|(uint32_t x) const noexcept { return OperandSignature{_bits | x}; } - ASMJIT_INLINE_NODEBUG constexpr OperandSignature operator&(uint32_t x) const noexcept { return OperandSignature{_bits & x}; } - ASMJIT_INLINE_NODEBUG constexpr OperandSignature operator^(uint32_t x) const noexcept { return OperandSignature{_bits ^ x}; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature operator|(uint32_t x) const noexcept { return OperandSignature{_bits | x}; } - ASMJIT_INLINE_NODEBUG constexpr OperandSignature operator|(const OperandSignature& other) const noexcept { return OperandSignature{_bits | other._bits}; } - ASMJIT_INLINE_NODEBUG constexpr OperandSignature operator&(const OperandSignature& other) const noexcept { return OperandSignature{_bits & other._bits}; } - ASMJIT_INLINE_NODEBUG constexpr OperandSignature operator^(const OperandSignature& other) const noexcept { return OperandSignature{_bits ^ other._bits}; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature operator&(uint32_t x) const noexcept { return OperandSignature{_bits & x}; } - ASMJIT_INLINE_NODEBUG constexpr bool operator==(uint32_t x) const noexcept { return _bits == x; } - ASMJIT_INLINE_NODEBUG constexpr bool operator!=(uint32_t x) const noexcept { return _bits != x; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature operator^(uint32_t x) const noexcept { return OperandSignature{_bits ^ x}; } - ASMJIT_INLINE_NODEBUG constexpr bool operator==(const OperandSignature& other) const noexcept { return _bits == other._bits; } - ASMJIT_INLINE_NODEBUG constexpr bool operator!=(const OperandSignature& other) const noexcept { return _bits != other._bits; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature operator|(const OperandSignature& other) const noexcept { return OperandSignature{_bits | other._bits}; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature operator&(const OperandSignature& other) const noexcept { return OperandSignature{_bits & other._bits}; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature operator^(const OperandSignature& other) const noexcept { return OperandSignature{_bits ^ other._bits}; } + + ASMJIT_INLINE_CONSTEXPR bool operator==(uint32_t x) const noexcept { return _bits == x; } + ASMJIT_INLINE_CONSTEXPR bool operator!=(uint32_t x) const noexcept { return _bits != x; } + + ASMJIT_INLINE_CONSTEXPR bool operator==(const OperandSignature& other) const noexcept { return _bits == other._bits; } + ASMJIT_INLINE_CONSTEXPR bool operator!=(const OperandSignature& other) const noexcept { return _bits != other._bits; } //! \} //! \name Accessors //! \{ - ASMJIT_INLINE_NODEBUG void reset() noexcept { _bits = 0; } + ASMJIT_INLINE_CONSTEXPR void reset() noexcept { _bits = 0; } - ASMJIT_INLINE_NODEBUG constexpr uint32_t bits() const noexcept { return _bits; } - ASMJIT_INLINE_NODEBUG void setBits(uint32_t bits) noexcept { _bits = bits; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t bits() const noexcept { return _bits; } + + ASMJIT_INLINE_CONSTEXPR void setBits(uint32_t bits) noexcept { _bits = bits; } template - ASMJIT_INLINE_NODEBUG constexpr bool hasField() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasField() const noexcept { return (_bits & kFieldMask) != 0; } template - ASMJIT_INLINE_NODEBUG constexpr bool hasField(uint32_t value) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasField(uint32_t value) const noexcept { return (_bits & kFieldMask) != value << Support::ConstCTZ::value; } template - ASMJIT_INLINE_NODEBUG constexpr uint32_t getField() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t getField() const noexcept { return (_bits >> Support::ConstCTZ::value) & (kFieldMask >> Support::ConstCTZ::value); } template - ASMJIT_INLINE_NODEBUG void setField(uint32_t value) noexcept { + ASMJIT_INLINE_CONSTEXPR void setField(uint32_t value) noexcept { ASMJIT_ASSERT(((value << Support::ConstCTZ::value) & ~kFieldMask) == 0); _bits = (_bits & ~kFieldMask) | (value << Support::ConstCTZ::value); } - ASMJIT_INLINE_NODEBUG constexpr OperandSignature subset(uint32_t mask) const noexcept { return OperandSignature{_bits & mask}; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature subset(uint32_t mask) const noexcept { return OperandSignature{_bits & mask}; } template::value> - ASMJIT_INLINE_NODEBUG constexpr OperandSignature replacedValue(uint32_t value) const noexcept { return OperandSignature{(_bits & ~kFieldMask) | (value << kFieldShift)}; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature replacedValue(uint32_t value) const noexcept { return OperandSignature{(_bits & ~kFieldMask) | (value << kFieldShift)}; } template - ASMJIT_INLINE_NODEBUG constexpr bool matchesSignature(const OperandSignature& signature) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool matchesSignature(const OperandSignature& signature) const noexcept { return (_bits & kFieldMask) == signature._bits; } template - ASMJIT_INLINE_NODEBUG constexpr bool matchesFields(uint32_t bits) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool matchesFields(uint32_t bits) const noexcept { return (_bits & kFieldMask) == bits; } template - ASMJIT_INLINE_NODEBUG constexpr bool matchesFields(const OperandSignature& fields) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool matchesFields(const OperandSignature& fields) const noexcept { return (_bits & kFieldMask) == fields._bits; } - ASMJIT_INLINE_NODEBUG constexpr bool isValid() const noexcept { return _bits != 0; } + //! Tests whether the operand signature is valid (describes a valid operand, and not \ref OperandType::kNone. + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isValid() const noexcept { return _bits != 0; } - ASMJIT_INLINE_NODEBUG constexpr OperandType opType() const noexcept { return (OperandType)getField(); } + //! Returns operand type this operand signature describes. + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandType opType() const noexcept { return (OperandType)getField(); } - ASMJIT_INLINE_NODEBUG constexpr RegType regType() const noexcept { return (RegType)getField(); } - ASMJIT_INLINE_NODEBUG constexpr RegGroup regGroup() const noexcept { return (RegGroup)getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegType regType() const noexcept { return (RegType)getField(); } - ASMJIT_INLINE_NODEBUG constexpr RegType memBaseType() const noexcept { return (RegType)getField(); } - ASMJIT_INLINE_NODEBUG constexpr RegType memIndexType() const noexcept { return (RegType)getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegGroup regGroup() const noexcept { return (RegGroup)getField(); } - ASMJIT_INLINE_NODEBUG constexpr uint32_t predicate() const noexcept { return getField(); } - ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegType memBaseType() const noexcept { return (RegType)getField(); } - ASMJIT_INLINE_NODEBUG void setOpType(OperandType opType) noexcept { setField(uint32_t(opType)); } - ASMJIT_INLINE_NODEBUG void setRegType(RegType regType) noexcept { setField(uint32_t(regType)); } - ASMJIT_INLINE_NODEBUG void setRegGroup(RegGroup regGroup) noexcept { setField(uint32_t(regGroup)); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegType memIndexType() const noexcept { return (RegType)getField(); } - ASMJIT_INLINE_NODEBUG void setMemBaseType(RegType baseType) noexcept { setField(uint32_t(baseType)); } - ASMJIT_INLINE_NODEBUG void setMemIndexType(RegType indexType) noexcept { setField(uint32_t(indexType)); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t predicate() const noexcept { return getField(); } - ASMJIT_INLINE_NODEBUG void setPredicate(uint32_t predicate) noexcept { setField(predicate); } - ASMJIT_INLINE_NODEBUG void setSize(uint32_t size) noexcept { setField(size); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t size() const noexcept { return getField(); } + + ASMJIT_INLINE_CONSTEXPR void setOpType(OperandType opType) noexcept { setField(uint32_t(opType)); } + ASMJIT_INLINE_CONSTEXPR void setRegType(RegType regType) noexcept { setField(uint32_t(regType)); } + ASMJIT_INLINE_CONSTEXPR void setRegGroup(RegGroup regGroup) noexcept { setField(uint32_t(regGroup)); } + + ASMJIT_INLINE_CONSTEXPR void setMemBaseType(RegType baseType) noexcept { setField(uint32_t(baseType)); } + ASMJIT_INLINE_CONSTEXPR void setMemIndexType(RegType indexType) noexcept { setField(uint32_t(indexType)); } + + ASMJIT_INLINE_CONSTEXPR void setPredicate(uint32_t predicate) noexcept { setField(predicate); } + ASMJIT_INLINE_CONSTEXPR void setSize(uint32_t size) noexcept { setField(size); } //! \} //! \name Static Constructors //! \{ - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromBits(uint32_t bits) noexcept { + //! Constructs operand signature from the given `bits`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromBits(uint32_t bits) noexcept { return OperandSignature{bits}; } + //! Constructs operand signature from the given `value`, use `kFieldMask` to describe where the value is in the signature. template - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromValue(const T& value) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromValue(const T& value) noexcept { return OperandSignature{uint32_t(value) << Support::ConstCTZ::value}; } - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromOpType(OperandType opType) noexcept { + //! Constructs operand signature describing the given operand type `opType`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromOpType(OperandType opType) noexcept { return OperandSignature{uint32_t(opType) << kOpTypeShift}; } - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromRegType(RegType regType) noexcept { + //! Constructs operand signature describing the given register type `regType`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromRegType(RegType regType) noexcept { return OperandSignature{uint32_t(regType) << kRegTypeShift}; } - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromRegGroup(RegGroup regGroup) noexcept { + //! Constructs operand signature describing the given register group `regGroup`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromRegGroup(RegGroup regGroup) noexcept { return OperandSignature{uint32_t(regGroup) << kRegGroupShift}; } - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromRegTypeAndGroup(RegType regType, RegGroup regGroup) noexcept { + //! Constructs operand signature describing both register type `regType` and register group `regGroup`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromRegTypeAndGroup(RegType regType, RegGroup regGroup) noexcept { return fromRegType(regType) | fromRegGroup(regGroup); } - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromMemBaseType(RegType baseType) noexcept { + //! Constructs operand signature describing a memory base type `baseType`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromMemBaseType(RegType baseType) noexcept { return OperandSignature{uint32_t(baseType) << kMemBaseTypeShift}; } - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromMemIndexType(RegType indexType) noexcept { + //! Constructs operand signature describing a memory index type `indexType`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromMemIndexType(RegType indexType) noexcept { return OperandSignature{uint32_t(indexType) << kMemIndexTypeShift}; } - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromPredicate(uint32_t predicate) noexcept { + //! Constructs operand signature describing a `predicate`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromPredicate(uint32_t predicate) noexcept { return OperandSignature{predicate << kPredicateShift}; } - static ASMJIT_INLINE_NODEBUG constexpr OperandSignature fromSize(uint32_t size) noexcept { + //! Constructs operand signature describing a `size`. + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR OperandSignature fromSize(uint32_t size) noexcept { return OperandSignature{size << kSizeShift}; } @@ -462,7 +514,7 @@ struct Operand_ { //! \name Types //! \{ - typedef OperandSignature Signature; + using Signature = OperandSignature; //! \} @@ -510,18 +562,23 @@ struct Operand_ { //! Tests whether the given `id` is a valid virtual register id. Since AsmJit supports both physical and virtual //! registers it must be able to distinguish between these two. The idea is that physical registers are always //! limited in size, so virtual identifiers start from `kVirtIdMin` and end at `kVirtIdMax`. - static ASMJIT_INLINE_NODEBUG bool isVirtId(uint32_t id) noexcept { return id - kVirtIdMin < uint32_t(kVirtIdCount); } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR bool isVirtId(uint32_t id) noexcept { return id - kVirtIdMin < uint32_t(kVirtIdCount); } + //! Converts a real-id into a packed-id that can be stored in Operand. - static ASMJIT_INLINE_NODEBUG uint32_t indexToVirtId(uint32_t id) noexcept { return id + kVirtIdMin; } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR uint32_t indexToVirtId(uint32_t id) noexcept { return id + kVirtIdMin; } + //! Converts a packed-id back to real-id. - static ASMJIT_INLINE_NODEBUG uint32_t virtIdToIndex(uint32_t id) noexcept { return id - kVirtIdMin; } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR uint32_t virtIdToIndex(uint32_t id) noexcept { return id - kVirtIdMin; } //! \name Construction & Destruction //! \{ //! \cond INTERNAL //! Initializes a `BaseReg` operand from `signature` and register `id`. - ASMJIT_INLINE_NODEBUG void _initReg(const Signature& signature, uint32_t id) noexcept { + ASMJIT_INLINE_CONSTEXPR void _initReg(const Signature& signature, uint32_t id) noexcept { _signature = signature; _baseId = id; _data[0] = 0; @@ -530,7 +587,7 @@ struct Operand_ { //! \endcond //! Initializes the operand from `other` operand (used by operator overloads). - ASMJIT_INLINE_NODEBUG void copyFrom(const Operand_& other) noexcept { + ASMJIT_INLINE_CONSTEXPR void copyFrom(const Operand_& other) noexcept { _signature._bits = other._signature._bits; _baseId = other._baseId; _data[0] = other._data[0]; @@ -564,7 +621,7 @@ struct Operand_ { //! memset(&b, 0, sizeof(Operand)); //! assert(a == b); //! ``` - ASMJIT_INLINE_NODEBUG void reset() noexcept { + ASMJIT_INLINE_CONSTEXPR void reset() noexcept { _signature.reset(); _baseId = 0; _data[0] = 0; @@ -577,9 +634,12 @@ struct Operand_ { //! \{ //! Tests whether this operand is the same as `other`. - ASMJIT_INLINE_NODEBUG constexpr bool operator==(const Operand_& other) const noexcept { return equals(other); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool operator==(const Operand_& other) const noexcept { return equals(other); } + //! Tests whether this operand is not the same as `other`. - ASMJIT_INLINE_NODEBUG constexpr bool operator!=(const Operand_& other) const noexcept { return !equals(other); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool operator!=(const Operand_& other) const noexcept { return !equals(other); } //! \} @@ -588,11 +648,13 @@ struct Operand_ { //! Casts this operand to `T` type. template - ASMJIT_INLINE_NODEBUG T& as() noexcept { return static_cast(*this); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR T& as() noexcept { return static_cast(*this); } //! Casts this operand to `T` type (const). template - ASMJIT_INLINE_NODEBUG const T& as() const noexcept { return static_cast(*this); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const T& as() const noexcept { return static_cast(*this); } //! \} @@ -603,7 +665,8 @@ struct Operand_ { //! //! \note This basically performs a binary comparison, if aby bit is //! different the operands are not equal. - ASMJIT_INLINE_NODEBUG constexpr bool equals(const Operand_& other) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool equals(const Operand_& other) const noexcept { return bool(unsigned(_signature == other._signature) & unsigned(_baseId == other._baseId ) & unsigned(_data[0] == other._data[0] ) & @@ -616,44 +679,65 @@ struct Operand_ { //! \{ //! Tests whether the operand's signature matches the signature of the `other` operand. - ASMJIT_INLINE_NODEBUG constexpr bool hasSignature(const Operand_& other) const noexcept { return _signature == other._signature; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasSignature(const Operand_& other) const noexcept { return _signature == other._signature; } + //! Tests whether the operand's signature matches the given signature `sign`. - ASMJIT_INLINE_NODEBUG constexpr bool hasSignature(const Signature& other) const noexcept { return _signature == other; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasSignature(const Signature& other) const noexcept { return _signature == other; } //! Returns operand signature as unsigned 32-bit integer. //! //! Signature is first 4 bytes of the operand data. It's used mostly for operand checking as it's //! much faster to check packed 4 bytes at once than having to check these bytes individually. - ASMJIT_INLINE_NODEBUG constexpr Signature signature() const noexcept { return _signature; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Signature signature() const noexcept { return _signature; } //! Sets the operand signature, see `signature()`. //! //! \note Improper use of `setSignature()` can lead to hard-to-debug errors. - ASMJIT_INLINE_NODEBUG void setSignature(const Signature& signature) noexcept { _signature = signature; } + ASMJIT_INLINE_CONSTEXPR void setSignature(const Signature& signature) noexcept { _signature = signature; } + //! \overload - ASMJIT_INLINE_NODEBUG void setSignature(uint32_t signature) noexcept { _signature._bits = signature; } + ASMJIT_INLINE_CONSTEXPR void setSignature(uint32_t signature) noexcept { _signature._bits = signature; } //! Returns the type of the operand, see `OpType`. - ASMJIT_INLINE_NODEBUG constexpr OperandType opType() const noexcept { return _signature.opType(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandType opType() const noexcept { return _signature.opType(); } + //! Tests whether the operand is none (`OperandType::kNone`). - ASMJIT_INLINE_NODEBUG constexpr bool isNone() const noexcept { return _signature == Signature::fromBits(0); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isNone() const noexcept { return _signature == Signature::fromBits(0); } + //! Tests whether the operand is a register (`OperandType::kReg`). - ASMJIT_INLINE_NODEBUG constexpr bool isReg() const noexcept { return opType() == OperandType::kReg; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isReg() const noexcept { return opType() == OperandType::kReg; } + //! Tests whether the operand is a register-list. //! //! \note Register-list is currently only used by 32-bit ARM architecture. - ASMJIT_INLINE_NODEBUG constexpr bool isRegList() const noexcept { return opType() == OperandType::kRegList; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isRegList() const noexcept { return opType() == OperandType::kRegList; } + //! Tests whether the operand is a memory location (`OperandType::kMem`). - ASMJIT_INLINE_NODEBUG constexpr bool isMem() const noexcept { return opType() == OperandType::kMem; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isMem() const noexcept { return opType() == OperandType::kMem; } + //! Tests whether the operand is an immediate (`OperandType::kImm`). - ASMJIT_INLINE_NODEBUG constexpr bool isImm() const noexcept { return opType() == OperandType::kImm; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isImm() const noexcept { return opType() == OperandType::kImm; } + //! Tests whether the operand is a label (`OperandType::kLabel`). - ASMJIT_INLINE_NODEBUG constexpr bool isLabel() const noexcept { return opType() == OperandType::kLabel; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isLabel() const noexcept { return opType() == OperandType::kLabel; } //! Tests whether the operand is a physical register. - ASMJIT_INLINE_NODEBUG constexpr bool isPhysReg() const noexcept { return isReg() && _baseId < 0xFFu; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isPhysReg() const noexcept { return isReg() && _baseId < 0xFFu; } + //! Tests whether the operand is a virtual register. - ASMJIT_INLINE_NODEBUG constexpr bool isVirtReg() const noexcept { return isReg() && _baseId > 0xFFu; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVirtReg() const noexcept { return isReg() && _baseId > 0xFFu; } //! Returns the operand id. //! @@ -664,52 +748,80 @@ struct Operand_ { //! * Imm - Should be `0`. //! * Label - Label id if it was created by using `newLabel()` or `Globals::kInvalidId` if the label is invalid or //! not initialized. - ASMJIT_INLINE_NODEBUG constexpr uint32_t id() const noexcept { return _baseId; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t id() const noexcept { return _baseId; } //! Tests whether the operand is a register matching the given register `type`. - ASMJIT_INLINE_NODEBUG constexpr bool isReg(RegType type) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isReg(RegType type) const noexcept { return _signature.subset(Signature::kOpTypeMask | Signature::kRegTypeMask) == (Signature::fromOpType(OperandType::kReg) | Signature::fromRegType(type)); } //! Tests whether the operand is a register of the provided register group `regGroup`. - ASMJIT_INLINE_NODEBUG constexpr bool isReg(RegGroup regGroup) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isReg(RegGroup regGroup) const noexcept { return _signature.subset(Signature::kOpTypeMask | Signature::kRegGroupMask) == (Signature::fromOpType(OperandType::kReg) | Signature::fromRegGroup(regGroup)); } //! Tests whether the operand is register and of register type `regType` and `regId`. - ASMJIT_INLINE_NODEBUG constexpr bool isReg(RegType regType, uint32_t regId) const noexcept { return isReg(regType) && _baseId == regId; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isReg(RegType regType, uint32_t regId) const noexcept { return isReg(regType) && _baseId == regId; } + //! Tests whether the operand is register and of register group `regGroup` and `regId`. - ASMJIT_INLINE_NODEBUG constexpr bool isReg(RegGroup regGroup, uint32_t regId) const noexcept { return isReg(regGroup) && _baseId == regId; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isReg(RegGroup regGroup, uint32_t regId) const noexcept { return isReg(regGroup) && _baseId == regId; } //! Tests whether the register is a general purpose register (any size). - ASMJIT_INLINE_NODEBUG constexpr bool isGp() const noexcept { return isReg(RegGroup::kGp); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGp() const noexcept { return isReg(RegGroup::kGp); } + //! Tests whether the register is a 32-bit general purpose register. - ASMJIT_INLINE_NODEBUG constexpr bool isGp32() const noexcept { return isReg(RegType::kGp32); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGp32() const noexcept { return isReg(RegType::kGp32); } + //! Tests whether the register is a 64-bit general purpose register. - ASMJIT_INLINE_NODEBUG constexpr bool isGp64() const noexcept { return isReg(RegType::kGp64); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGp64() const noexcept { return isReg(RegType::kGp64); } //! Tests whether the register is a vector register of any size. - ASMJIT_INLINE_NODEBUG constexpr bool isVec() const noexcept { return isReg(RegGroup::kVec); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec() const noexcept { return isReg(RegGroup::kVec); } + //! Tests whether the register is an 8-bit vector register or view (AArch64). - ASMJIT_INLINE_NODEBUG constexpr bool isVec8() const noexcept { return isReg(RegType::kVec8); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec8() const noexcept { return isReg(RegType::kVec8); } + //! Tests whether the register is a 16-bit vector register or view (AArch64). - ASMJIT_INLINE_NODEBUG constexpr bool isVec16() const noexcept { return isReg(RegType::kVec16); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec16() const noexcept { return isReg(RegType::kVec16); } + //! Tests whether the register is a 32-bit vector register or view (AArch32, AArch64). - ASMJIT_INLINE_NODEBUG constexpr bool isVec32() const noexcept { return isReg(RegType::kVec32); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec32() const noexcept { return isReg(RegType::kVec32); } + //! Tests whether the register is a 64-bit vector register or view (AArch32, AArch64). - ASMJIT_INLINE_NODEBUG constexpr bool isVec64() const noexcept { return isReg(RegType::kVec64); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec64() const noexcept { return isReg(RegType::kVec64); } + //! Tests whether the register is a 128-bit vector register or view (AArch32, AArch64, X86, X86_64). - ASMJIT_INLINE_NODEBUG constexpr bool isVec128() const noexcept { return isReg(RegType::kVec128); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec128() const noexcept { return isReg(RegType::kVec128); } + //! Tests whether the register is a 256-bit vector register or view (X86, X86_64). - ASMJIT_INLINE_NODEBUG constexpr bool isVec256() const noexcept { return isReg(RegType::kVec256); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec256() const noexcept { return isReg(RegType::kVec256); } + //! Tests whether the register is a 512-bit vector register or view (X86, X86_64). - ASMJIT_INLINE_NODEBUG constexpr bool isVec512() const noexcept { return isReg(RegType::kVec512); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec512() const noexcept { return isReg(RegType::kVec512); } //! Tests whether the register is a mask register of any size. - ASMJIT_INLINE_NODEBUG constexpr bool isMask() const noexcept { return isReg(RegGroup::kMask); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isMask() const noexcept { return isReg(RegGroup::kMask); } //! Tests whether the operand is a register matching the given register `type`. - ASMJIT_INLINE_NODEBUG constexpr bool isRegList(RegType type) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isRegList(RegType type) const noexcept { return _signature.subset(Signature::kOpTypeMask | Signature::kRegTypeMask) == (Signature::fromOpType(OperandType::kRegList) | Signature::fromRegType(type)); } @@ -718,7 +830,8 @@ struct Operand_ { //! \note This is useful on X86 and X86_64 architectures as many instructions support Reg/Mem operand combination. //! So if the user code works with just \ref Operand, it's possible to check whether the operand is either a register //! or memory location with a single check. - ASMJIT_INLINE_NODEBUG constexpr bool isRegOrMem() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isRegOrMem() const noexcept { return Support::isBetween(uint32_t(opType()), uint32_t(OperandType::kReg), uint32_t(OperandType::kMem)); } @@ -727,7 +840,8 @@ struct Operand_ { //! \note This is useful on 32-bit ARM architecture to check whether an operand references a register. It can be //! used in other architectures too, but it would work identically to \ref isRegOrMem() as other architectures //! don't provide register lists. - ASMJIT_INLINE_NODEBUG constexpr bool isRegOrRegListOrMem() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isRegOrRegListOrMem() const noexcept { return Support::isBetween(uint32_t(opType()), uint32_t(OperandType::kReg), uint32_t(OperandType::kRegList)); } @@ -742,7 +856,8 @@ struct Operand_ { //! that represent size as an additional payload. This means that memory size is architecture specific and should //! be accessed via \ref x86::Mem::size(). Sometimes when the user knows that the operand is either a register or //! memory operand this function can be helpful as it avoids casting, but it only works when it targets X86 and X86_64. - ASMJIT_INLINE_NODEBUG constexpr uint32_t x86RmSize() const noexcept { return _signature.size(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t x86RmSize() const noexcept { return _signature.size(); } //! \} }; @@ -754,18 +869,18 @@ public: //! \{ //! Creates `kOpNone` operand having all members initialized to zero. - ASMJIT_INLINE_NODEBUG constexpr Operand() noexcept + ASMJIT_INLINE_CONSTEXPR Operand() noexcept : Operand_{ Signature::fromOpType(OperandType::kNone), 0u, { 0u, 0u }} {} //! Creates a cloned `other` operand. - ASMJIT_INLINE_NODEBUG constexpr Operand(const Operand& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR Operand(const Operand& other) noexcept = default; //! Creates a cloned `other` operand. - ASMJIT_INLINE_NODEBUG constexpr explicit Operand(const Operand_& other) + ASMJIT_INLINE_CONSTEXPR explicit Operand(const Operand_& other) : Operand_(other) {} //! Creates an operand initialized to raw `[u0, u1, u2, u3]` values. - ASMJIT_INLINE_NODEBUG constexpr Operand(Globals::Init_, const Signature& u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept + ASMJIT_INLINE_CONSTEXPR Operand(Globals::Init_, const Signature& u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept : Operand_{{u0._bits}, u1, {u2, u3}} {} //! Creates an uninitialized operand (dangerous). @@ -776,8 +891,17 @@ public: //! \name Overloaded Operators //! \{ - ASMJIT_INLINE_NODEBUG Operand& operator=(const Operand& other) noexcept = default; - ASMJIT_INLINE_NODEBUG Operand& operator=(const Operand_& other) noexcept { return operator=(static_cast(other)); } + ASMJIT_INLINE_CONSTEXPR Operand& operator=(const Operand& other) noexcept { + // Defaulted copy operator cannot be marked as constexpr in C++17, thus we have to implement it. + copyFrom(other); + return *this; + } + + ASMJIT_INLINE_CONSTEXPR Operand& operator=(const Operand_& other) noexcept { + // Defaulted copy operator cannot be marked as constexpr in C++17, thus we have to implement it. + copyFrom(other); + return *this; + } //! \} @@ -785,7 +909,8 @@ public: //! \{ //! Clones this operand and returns its copy. - ASMJIT_INLINE_NODEBUG constexpr Operand clone() const noexcept { return Operand(*this); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Operand clone() const noexcept { return Operand(*this); } //! \} }; @@ -822,22 +947,22 @@ public: //! \{ //! Creates a label operand without ID (you must set the ID to make it valid). - ASMJIT_INLINE_NODEBUG constexpr Label() noexcept + ASMJIT_INLINE_CONSTEXPR Label() noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kLabel), Globals::kInvalidId, 0, 0) {} //! Creates a cloned label operand of `other`. - ASMJIT_INLINE_NODEBUG constexpr Label(const Label& other) noexcept + ASMJIT_INLINE_CONSTEXPR Label(const Label& other) noexcept : Operand(other) {} //! Creates a label operand of the given `id`. - ASMJIT_INLINE_NODEBUG constexpr explicit Label(uint32_t id) noexcept + ASMJIT_INLINE_CONSTEXPR explicit Label(uint32_t id) noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kLabel), id, 0, 0) {} ASMJIT_INLINE_NODEBUG explicit Label(Globals::NoInit_) noexcept : Operand(Globals::NoInit) {} //! Resets the label, will reset all properties and set its ID to `Globals::kInvalidId`. - ASMJIT_INLINE_NODEBUG void reset() noexcept { + ASMJIT_INLINE_CONSTEXPR void reset() noexcept { _signature = Signature::fromOpType(OperandType::kLabel); _baseId = Globals::kInvalidId; _data[0] = 0; @@ -849,7 +974,10 @@ public: //! \name Overloaded Operators //! \{ - ASMJIT_INLINE_NODEBUG Label& operator=(const Label& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR Label& operator=(const Label& other) noexcept { + copyFrom(other); + return *this; + } //! \} @@ -857,9 +985,11 @@ public: //! \{ //! Tests whether the label was created by CodeHolder and/or an attached emitter. - ASMJIT_INLINE_NODEBUG constexpr bool isValid() const noexcept { return _baseId != Globals::kInvalidId; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isValid() const noexcept { return _baseId != Globals::kInvalidId; } + //! Sets the label `id`. - ASMJIT_INLINE_NODEBUG void setId(uint32_t id) noexcept { _baseId = id; } + ASMJIT_INLINE_CONSTEXPR void setId(uint32_t id) noexcept { _baseId = id; } //! \} }; @@ -867,22 +997,20 @@ public: //! \cond INTERNAL //! Default register traits. struct BaseRegTraits { - enum : uint32_t { - //! \ref TypeId representing this register type, could be \ref TypeId::kVoid if such type doesn't exist. - kTypeId = uint32_t(TypeId::kVoid), - //! RegType is not valid by default. - kValid = 0, + //! \ref TypeId representing this register type, could be \ref TypeId::kVoid if such type doesn't exist. + static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kVoid); + //! RegType is not valid by default. + static inline constexpr uint32_t kValid = 0; - //! Zero type by default (defaults to None). - kType = uint32_t(RegType::kNone), - //! Zero group by default (defaults to GP). - kGroup = uint32_t(RegGroup::kGp), - //! No size by default. - kSize = 0, + //! Zero type by default (defaults to None). + static inline constexpr uint32_t kType = uint32_t(RegType::kNone); + //! Zero group by default (defaults to GP). + static inline constexpr uint32_t kGroup = uint32_t(RegGroup::kGp); + //! No size by default. + static inline constexpr uint32_t kSize = 0u; - //! Empty signature by default (not even having operand type set to register). - kSignature = 0 - }; + //! Empty signature by default (not even having operand type set to register). + static inline constexpr uint32_t kSignature = 0; }; //! \endcond @@ -892,19 +1020,17 @@ public: //! \name Constants //! \{ - enum : uint32_t { - //! None or any register (mostly internal). - kIdBad = 0xFFu, + //! None or any register (mostly internal). + static inline constexpr uint32_t kIdBad = 0xFFu; - kBaseSignatureMask = - Signature::kOpTypeMask | - Signature::kRegTypeMask | - Signature::kRegGroupMask | - Signature::kSizeMask, + static inline constexpr uint32_t kBaseSignatureMask = + Signature::kOpTypeMask | + Signature::kRegTypeMask | + Signature::kRegGroupMask | + Signature::kSizeMask; - kTypeNone = uint32_t(RegType::kNone), - kSignature = Signature::fromOpType(OperandType::kReg).bits() - }; + static inline constexpr uint32_t kTypeNone = uint32_t(RegType::kNone); + static inline constexpr uint32_t kSignature = Signature::fromOpType(OperandType::kReg).bits(); //! \} @@ -912,19 +1038,19 @@ public: //! \{ //! Creates a dummy register operand. - ASMJIT_INLINE_NODEBUG constexpr BaseReg() noexcept + ASMJIT_INLINE_CONSTEXPR BaseReg() noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kReg), kIdBad, 0, 0) {} //! Creates a new register operand which is the same as `other` . - ASMJIT_INLINE_NODEBUG constexpr BaseReg(const BaseReg& other) noexcept + ASMJIT_INLINE_CONSTEXPR BaseReg(const BaseReg& other) noexcept : Operand(other) {} //! Creates a new register operand compatible with `other`, but with a different `id`. - ASMJIT_INLINE_NODEBUG constexpr BaseReg(const BaseReg& other, uint32_t id) noexcept + ASMJIT_INLINE_CONSTEXPR BaseReg(const BaseReg& other, uint32_t id) noexcept : Operand(Globals::Init, other._signature, id, 0, 0) {} //! Creates a register initialized to the given `signature` and `id`. - ASMJIT_INLINE_NODEBUG constexpr BaseReg(const Signature& signature, uint32_t id) noexcept + ASMJIT_INLINE_CONSTEXPR BaseReg(const Signature& signature, uint32_t id) noexcept : Operand(Globals::Init, signature, id, 0, 0) {} ASMJIT_INLINE_NODEBUG explicit BaseReg(Globals::NoInit_) noexcept @@ -935,7 +1061,10 @@ public: //! \name Overloaded Operators //! \{ - ASMJIT_INLINE_NODEBUG BaseReg& operator=(const BaseReg& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR BaseReg& operator=(const BaseReg& other) noexcept { + copyFrom(other); + return *this; + } //! \} @@ -947,14 +1076,20 @@ public: //! Base signature only contains the operand type, register type, register group, and register size. It doesn't //! contain element type, predicate, or other architecture-specific data. Base signature is a signature that is //! provided by architecture-specific `RegTraits`, like \ref x86::RegTraits. - ASMJIT_INLINE_NODEBUG constexpr OperandSignature baseSignature() const noexcept { return _signature & kBaseSignatureMask; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature baseSignature() const noexcept { return _signature & kBaseSignatureMask; } //! Tests whether the operand's base signature matches the given signature `sign`. - ASMJIT_INLINE_NODEBUG constexpr bool hasBaseSignature(uint32_t signature) const noexcept { return baseSignature() == signature; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBaseSignature(uint32_t signature) const noexcept { return baseSignature() == signature; } + //! Tests whether the operand's base signature matches the given signature `sign`. - ASMJIT_INLINE_NODEBUG constexpr bool hasBaseSignature(const OperandSignature& signature) const noexcept { return baseSignature() == signature; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBaseSignature(const OperandSignature& signature) const noexcept { return baseSignature() == signature; } + //! Tests whether the operand's base signature matches the base signature of the `other` operand. - ASMJIT_INLINE_NODEBUG constexpr bool hasBaseSignature(const BaseReg& other) const noexcept { return baseSignature() == other.baseSignature(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBaseSignature(const BaseReg& other) const noexcept { return baseSignature() == other.baseSignature(); } //! Tests whether this register is the same as `other`. //! @@ -963,90 +1098,117 @@ public: //! both \ref equals() and \ref isSame() should give the same answer, however, if any of these two contains garbage //! or other metadata in the upper 8 bytes then \ref isSame() may return `true` in cases in which \ref equals() //! returns false. - ASMJIT_INLINE_NODEBUG constexpr bool isSame(const BaseReg& other) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isSame(const BaseReg& other) const noexcept { return (_signature == other._signature) & (_baseId == other._baseId); } //! Tests whether the register is valid (either virtual or physical). - ASMJIT_INLINE_NODEBUG constexpr bool isValid() const noexcept { return bool(unsigned(_signature != 0) & unsigned(_baseId != kIdBad)); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isValid() const noexcept { return bool(unsigned(_signature != 0) & unsigned(_baseId != kIdBad)); } //! Tests whether this is a physical register. - ASMJIT_INLINE_NODEBUG constexpr bool isPhysReg() const noexcept { return _baseId < kIdBad; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isPhysReg() const noexcept { return _baseId < kIdBad; } + //! Tests whether this is a virtual register. - ASMJIT_INLINE_NODEBUG constexpr bool isVirtReg() const noexcept { return _baseId > kIdBad; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVirtReg() const noexcept { return _baseId > kIdBad; } //! Tests whether the register type matches `type` - same as `isReg(type)`, provided for convenience. - ASMJIT_INLINE_NODEBUG constexpr bool isType(RegType type) const noexcept { return _signature.subset(Signature::kRegTypeMask) == Signature::fromRegType(type); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isType(RegType type) const noexcept { return _signature.subset(Signature::kRegTypeMask) == Signature::fromRegType(type); } + //! Tests whether the register group matches `group`. - ASMJIT_INLINE_NODEBUG constexpr bool isGroup(RegGroup group) const noexcept { return _signature.subset(Signature::kRegGroupMask) == Signature::fromRegGroup(group); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGroup(RegGroup group) const noexcept { return _signature.subset(Signature::kRegGroupMask) == Signature::fromRegGroup(group); } //! Tests whether the register is a general purpose register (any size). - ASMJIT_INLINE_NODEBUG constexpr bool isGp() const noexcept { return isGroup(RegGroup::kGp); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGp() const noexcept { return isGroup(RegGroup::kGp); } + //! Tests whether the register is a vector register of any size. - ASMJIT_INLINE_NODEBUG constexpr bool isVec() const noexcept { return isGroup(RegGroup::kVec); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec() const noexcept { return isGroup(RegGroup::kVec); } + //! Tests whether the register is a mask register of any size. - ASMJIT_INLINE_NODEBUG constexpr bool isMask() const noexcept { return isGroup(RegGroup::kMask); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isMask() const noexcept { return isGroup(RegGroup::kMask); } using Operand_::isReg; //! Same as `isType()`, provided for convenience. - ASMJIT_INLINE_NODEBUG constexpr bool isReg(RegType rType) const noexcept { return isType(rType); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isReg(RegType rType) const noexcept { return isType(rType); } + //! Tests whether the register type matches `type` and register id matches `id`. - ASMJIT_INLINE_NODEBUG constexpr bool isReg(RegType rType, uint32_t id) const noexcept { return isType(rType) && this->id() == id; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isReg(RegType rType, uint32_t id) const noexcept { return isType(rType) && this->id() == id; } //! Returns the register type. - ASMJIT_INLINE_NODEBUG constexpr RegType type() const noexcept { return _signature.regType(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegType type() const noexcept { return _signature.regType(); } + //! Returns the register group. - ASMJIT_INLINE_NODEBUG constexpr RegGroup group() const noexcept { return _signature.regGroup(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegGroup group() const noexcept { return _signature.regGroup(); } //! Tests whether the register specifies a size (i.e. the size is not zero). - ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return _signature.hasField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasSize() const noexcept { return _signature.hasField(); } + //! Tests whether the register size matches size `s`. - ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasSize(uint32_t s) const noexcept { return size() == s; } //! Returns the size of the register in bytes. If the register size depends on architecture (like `x86::CReg` and //! `x86::DReg`) the size returned should be the greatest possible (so it should return 64-bit size in such case). - ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t size() const noexcept { return _signature.getField(); } //! Returns operation predicate of the register (ARM/AArch64). //! //! The meaning depends on architecture, for example on ARM hardware this describes \ref arm::ShiftOp //! of the register. - ASMJIT_INLINE_NODEBUG constexpr uint32_t predicate() const noexcept { return _signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t predicate() const noexcept { return _signature.getField(); } //! Sets operation predicate of the register to `predicate` (ARM/AArch64). //! //! The meaning depends on architecture, for example on ARM hardware this describes \ref arm::ShiftOp //! of the register. - ASMJIT_INLINE_NODEBUG void setPredicate(uint32_t predicate) noexcept { _signature.setField(predicate); } + ASMJIT_INLINE_CONSTEXPR void setPredicate(uint32_t predicate) noexcept { _signature.setField(predicate); } //! Resets shift operation type of the register to the default value (ARM/AArch64). - ASMJIT_INLINE_NODEBUG void resetPredicate() noexcept { _signature.setField(0); } + ASMJIT_INLINE_CONSTEXPR void resetPredicate() noexcept { _signature.setField(0); } //! Clones the register operand. - ASMJIT_INLINE_NODEBUG constexpr BaseReg clone() const noexcept { return BaseReg(*this); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR BaseReg clone() const noexcept { return BaseReg(*this); } //! Casts this register to `RegT` by also changing its signature. //! //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors. template - ASMJIT_INLINE_NODEBUG constexpr RegT cloneAs() const noexcept { return RegT(Signature(RegT::kSignature), id()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegT cloneAs() const noexcept { return RegT(Signature(RegT::kSignature), id()); } //! Casts this register to `other` by also changing its signature. //! //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors. template - ASMJIT_INLINE_NODEBUG constexpr RegT cloneAs(const RegT& other) const noexcept { return RegT(other.signature(), id()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegT cloneAs(const RegT& other) const noexcept { return RegT(other.signature(), id()); } //! Sets the register id to `id`. - ASMJIT_INLINE_NODEBUG void setId(uint32_t id) noexcept { _baseId = id; } + ASMJIT_INLINE_CONSTEXPR void setId(uint32_t id) noexcept { _baseId = id; } //! Sets a 32-bit operand signature based on traits of `RegT`. template - ASMJIT_INLINE_NODEBUG void setSignatureT() noexcept { _signature = RegT::kSignature; } + ASMJIT_INLINE_CONSTEXPR void setSignatureT() noexcept { _signature = RegT::kSignature; } //! Sets the register `signature` and `id`. - ASMJIT_INLINE_NODEBUG void setSignatureAndId(const OperandSignature& signature, uint32_t id) noexcept { + ASMJIT_INLINE_CONSTEXPR void setSignatureAndId(const OperandSignature& signature, uint32_t id) noexcept { _signature = signature; _baseId = id; } @@ -1057,21 +1219,26 @@ public: //! \{ //! Tests whether the `op` operand is a general purpose register. - static ASMJIT_INLINE_NODEBUG bool isGp(const Operand_& op) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR bool isGp(const Operand_& op) noexcept { // Check operand type and register group. Not interested in register type and size. return op.signature().subset(Signature::kOpTypeMask | Signature::kRegGroupMask) == (Signature::fromOpType(OperandType::kReg) | Signature::fromRegGroup(RegGroup::kGp)); } //! Tests whether the `op` operand is a vector register. - static ASMJIT_INLINE_NODEBUG bool isVec(const Operand_& op) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR bool isVec(const Operand_& op) noexcept { // Check operand type and register group. Not interested in register type and size. return op.signature().subset(Signature::kOpTypeMask | Signature::kRegGroupMask) == (Signature::fromOpType(OperandType::kReg) | Signature::fromRegGroup(RegGroup::kVec)); } //! Tests whether the `op` is a general purpose register of the given `id`. - static ASMJIT_INLINE_NODEBUG bool isGp(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isGp(op)) & unsigned(op.id() == id)); } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR bool isGp(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isGp(op)) & unsigned(op.id() == id)); } + //! Tests whether the `op` is a vector register of the given `id`. - static ASMJIT_INLINE_NODEBUG bool isVec(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVec(op)) & unsigned(op.id() == id)); } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR bool isVec(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVec(op)) & unsigned(op.id() == id)); } //! \} }; @@ -1083,7 +1250,7 @@ struct RegOnly { //! \name Types //! \{ - typedef OperandSignature Signature; + using Signature = OperandSignature; //! \} @@ -1096,16 +1263,16 @@ struct RegOnly { //! \{ //! Initializes the `RegOnly` instance to hold register `signature` and `id`. - ASMJIT_INLINE_NODEBUG void init(const OperandSignature& signature, uint32_t id) noexcept { + ASMJIT_INLINE_CONSTEXPR void init(const OperandSignature& signature, uint32_t id) noexcept { _signature = signature; _id = id; } - ASMJIT_INLINE_NODEBUG void init(const BaseReg& reg) noexcept { init(reg.signature(), reg.id()); } - ASMJIT_INLINE_NODEBUG void init(const RegOnly& reg) noexcept { init(reg.signature(), reg.id()); } + ASMJIT_INLINE_CONSTEXPR void init(const BaseReg& reg) noexcept { init(reg.signature(), reg.id()); } + ASMJIT_INLINE_CONSTEXPR void init(const RegOnly& reg) noexcept { init(reg.signature(), reg.id()); } //! Resets the `RegOnly` members to zeros (none). - ASMJIT_INLINE_NODEBUG void reset() noexcept { init(Signature::fromBits(0), 0); } + ASMJIT_INLINE_CONSTEXPR void reset() noexcept { init(Signature::fromBits(0), 0); } //! \} @@ -1113,30 +1280,42 @@ struct RegOnly { //! \{ //! Tests whether this ExtraReg is none (same as calling `Operand_::isNone()`). - ASMJIT_INLINE_NODEBUG constexpr bool isNone() const noexcept { return _signature == 0; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isNone() const noexcept { return _signature == 0; } + //! Tests whether the register is valid (either virtual or physical). - ASMJIT_INLINE_NODEBUG constexpr bool isReg() const noexcept { return _signature != 0; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isReg() const noexcept { return _signature != 0; } //! Tests whether this is a physical register. - ASMJIT_INLINE_NODEBUG constexpr bool isPhysReg() const noexcept { return _id < BaseReg::kIdBad; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isPhysReg() const noexcept { return _id < BaseReg::kIdBad; } + //! Tests whether this is a virtual register (used by `BaseCompiler`). - ASMJIT_INLINE_NODEBUG constexpr bool isVirtReg() const noexcept { return _id > BaseReg::kIdBad; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVirtReg() const noexcept { return _id > BaseReg::kIdBad; } //! Returns the register signature or 0 if no register is assigned. - ASMJIT_INLINE_NODEBUG constexpr OperandSignature signature() const noexcept { return _signature; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR OperandSignature signature() const noexcept { return _signature; } + //! Returns the register id. //! //! \note Always check whether the register is assigned before using the returned identifier as //! non-assigned `RegOnly` instance would return zero id, which is still a valid register id. - ASMJIT_INLINE_NODEBUG constexpr uint32_t id() const noexcept { return _id; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t id() const noexcept { return _id; } //! Sets the register id. - ASMJIT_INLINE_NODEBUG void setId(uint32_t id) noexcept { _id = id; } + ASMJIT_INLINE_CONSTEXPR void setId(uint32_t id) noexcept { _id = id; } //! Returns the register type. - ASMJIT_INLINE_NODEBUG constexpr RegType type() const noexcept { return _signature.regType(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegType type() const noexcept { return _signature.regType(); } + //! Returns the register group. - ASMJIT_INLINE_NODEBUG constexpr RegGroup group() const noexcept { return _signature.regGroup(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegGroup group() const noexcept { return _signature.regGroup(); } //! \} @@ -1145,7 +1324,8 @@ struct RegOnly { //! Converts this ExtraReg to a real `RegT` operand. template - ASMJIT_INLINE_NODEBUG constexpr RegT toReg() const noexcept { return RegT(_signature, _id); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegT toReg() const noexcept { return RegT(_signature, _id); } //! \} }; @@ -1155,13 +1335,13 @@ struct RegOnly { #define ASMJIT_DEFINE_REG_TRAITS(REG_TYPE, GROUP, SIZE, TYPE_ID) \ template<> \ struct RegTraits { \ - static constexpr uint32_t kValid = 1; \ - static constexpr RegType kType = REG_TYPE; \ - static constexpr RegGroup kGroup = GROUP; \ - static constexpr uint32_t kSize = SIZE; \ - static constexpr TypeId kTypeId = TYPE_ID; \ + static inline constexpr uint32_t kValid = 1; \ + static inline constexpr RegType kType = REG_TYPE; \ + static inline constexpr RegGroup kGroup = GROUP; \ + static inline constexpr uint32_t kSize = SIZE; \ + static inline constexpr TypeId kTypeId = TYPE_ID; \ \ - static constexpr uint32_t kSignature = \ + static inline constexpr uint32_t kSignature = \ (OperandSignature::fromOpType(OperandType::kReg) | \ OperandSignature::fromRegType(kType) | \ OperandSignature::fromRegGroup(kGroup) | \ @@ -1174,19 +1354,19 @@ struct RegTraits { #define ASMJIT_DEFINE_ABSTRACT_REG(REG, BASE) \ public: \ /*! Default constructor that only setups basics. */ \ - ASMJIT_INLINE_NODEBUG constexpr REG() noexcept \ + ASMJIT_INLINE_CONSTEXPR REG() noexcept \ : BASE(Signature{kSignature}, kIdBad) {} \ \ /*! Makes a copy of the `other` register operand. */ \ - ASMJIT_INLINE_NODEBUG constexpr REG(const REG& other) noexcept \ + ASMJIT_INLINE_CONSTEXPR REG(const REG& other) noexcept \ : BASE(other) {} \ \ /*! Makes a copy of the `other` register having id set to `id` */ \ - ASMJIT_INLINE_NODEBUG constexpr REG(const BaseReg& other, uint32_t id) noexcept \ + ASMJIT_INLINE_CONSTEXPR REG(const BaseReg& other, uint32_t id) noexcept \ : BASE(other, id) {} \ \ /*! Creates a register based on `signature` and `id`. */ \ - ASMJIT_INLINE_NODEBUG constexpr REG(const OperandSignature& sgn, uint32_t id) noexcept \ + ASMJIT_INLINE_CONSTEXPR REG(const OperandSignature& sgn, uint32_t id) noexcept \ : BASE(sgn, id) {} \ \ /*! Creates a completely uninitialized REG register operand (garbage). */ \ @@ -1199,23 +1379,27 @@ public: } \ \ /*! Clones the register operand. */ \ - ASMJIT_INLINE_NODEBUG constexpr REG clone() const noexcept { return REG(*this); } \ + [[nodiscard]] \ + ASMJIT_INLINE_CONSTEXPR REG clone() const noexcept { return REG(*this); } \ \ - ASMJIT_INLINE_NODEBUG REG& operator=(const REG& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR REG& operator=(const REG& other) noexcept { \ + copyFrom(other); \ + return *this; \ + } //! Adds constructors and member functions to a class that implements final register. Final registers MUST HAVE a valid //! signature. #define ASMJIT_DEFINE_FINAL_REG(REG, BASE, TRAITS) \ public: \ - static constexpr RegType kThisType = TRAITS::kType; \ - static constexpr RegGroup kThisGroup = TRAITS::kGroup; \ - static constexpr uint32_t kThisSize = TRAITS::kSize; \ - static constexpr uint32_t kSignature = TRAITS::kSignature; \ + static inline constexpr RegType kThisType = TRAITS::kType; \ + static inline constexpr RegGroup kThisGroup = TRAITS::kGroup; \ + static inline constexpr uint32_t kThisSize = TRAITS::kSize; \ + static inline constexpr uint32_t kSignature = TRAITS::kSignature; \ \ ASMJIT_DEFINE_ABSTRACT_REG(REG, BASE) \ \ /*! Creates a register operand having its id set to `id`. */ \ - ASMJIT_INLINE_NODEBUG constexpr explicit REG(uint32_t id) noexcept \ + ASMJIT_INLINE_CONSTEXPR explicit REG(uint32_t id) noexcept \ : BASE(Signature{kSignature}, id) {} //! \endcond @@ -1227,9 +1411,7 @@ public: //! \name Constants //! \{ - enum : uint32_t { - kSignature = Signature::fromOpType(OperandType::kRegList).bits() - }; + static inline constexpr uint32_t kSignature = Signature::fromOpType(OperandType::kRegList).bits(); //! \} @@ -1237,19 +1419,19 @@ public: //! \{ //! Creates a dummy register operand. - ASMJIT_INLINE_NODEBUG constexpr BaseRegList() noexcept + ASMJIT_INLINE_CONSTEXPR BaseRegList() noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kRegList), 0, 0, 0) {} //! Creates a new register operand which is the same as `other` . - ASMJIT_INLINE_NODEBUG constexpr BaseRegList(const BaseRegList& other) noexcept + ASMJIT_INLINE_CONSTEXPR BaseRegList(const BaseRegList& other) noexcept : Operand(other) {} //! Creates a new register operand compatible with `other`, but with a different `id`. - ASMJIT_INLINE_NODEBUG constexpr BaseRegList(const BaseRegList& other, RegMask regMask) noexcept + ASMJIT_INLINE_CONSTEXPR BaseRegList(const BaseRegList& other, RegMask regMask) noexcept : Operand(Globals::Init, other._signature, regMask, 0, 0) {} //! Creates a register initialized to the given `signature` and `id`. - ASMJIT_INLINE_NODEBUG constexpr BaseRegList(const Signature& signature, RegMask regMask) noexcept + ASMJIT_INLINE_CONSTEXPR BaseRegList(const Signature& signature, RegMask regMask) noexcept : Operand(Globals::Init, signature, regMask, 0, 0) {} ASMJIT_INLINE_NODEBUG explicit BaseRegList(Globals::NoInit_) noexcept @@ -1260,7 +1442,10 @@ public: //! \name Overloaded Operators //! \{ - ASMJIT_INLINE_NODEBUG BaseRegList& operator=(const BaseRegList& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR BaseRegList& operator=(const BaseRegList& other) noexcept { + copyFrom(other); + return *this; + } //! \} @@ -1268,62 +1453,86 @@ public: //! \{ //! Tests whether the register-list is valid, which means it has a type and at least a single register in the list. - ASMJIT_INLINE_NODEBUG constexpr bool isValid() const noexcept { return bool(unsigned(_signature != 0u) & unsigned(_baseId != 0u)); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isValid() const noexcept { return bool(unsigned(_signature != 0u) & unsigned(_baseId != 0u)); } //! Tests whether the register type matches `type` - same as `isReg(type)`, provided for convenience. - ASMJIT_INLINE_NODEBUG constexpr bool isType(RegType type) const noexcept { return _signature.subset(Signature::kRegTypeMask) == Signature::fromRegType(type); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isType(RegType type) const noexcept { return _signature.subset(Signature::kRegTypeMask) == Signature::fromRegType(type); } + //! Tests whether the register group matches `group`. - ASMJIT_INLINE_NODEBUG constexpr bool isGroup(RegGroup group) const noexcept { return _signature.subset(Signature::kRegGroupMask) == Signature::fromRegGroup(group); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGroup(RegGroup group) const noexcept { return _signature.subset(Signature::kRegGroupMask) == Signature::fromRegGroup(group); } //! Tests whether the register is a general purpose register (any size). - ASMJIT_INLINE_NODEBUG constexpr bool isGp() const noexcept { return isGroup(RegGroup::kGp); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGp() const noexcept { return isGroup(RegGroup::kGp); } + //! Tests whether the register is a vector register. - ASMJIT_INLINE_NODEBUG constexpr bool isVec() const noexcept { return isGroup(RegGroup::kVec); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec() const noexcept { return isGroup(RegGroup::kVec); } //! Returns the register type. - ASMJIT_INLINE_NODEBUG constexpr RegType type() const noexcept { return _signature.regType(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegType type() const noexcept { return _signature.regType(); } + //! Returns the register group. - ASMJIT_INLINE_NODEBUG constexpr RegGroup group() const noexcept { return _signature.regGroup(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegGroup group() const noexcept { return _signature.regGroup(); } + //! Returns the size of a single register in this register-list or 0 if unspecified. - ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t size() const noexcept { return _signature.getField(); } //! Returns the register list as a mask, where each bit represents one physical register. - ASMJIT_INLINE_NODEBUG constexpr RegMask list() const noexcept { return _baseId; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegMask list() const noexcept { return _baseId; } + //! Sets the register list to `mask`. - ASMJIT_INLINE_NODEBUG void setList(RegMask mask) noexcept { _baseId = mask; } + ASMJIT_INLINE_CONSTEXPR void setList(RegMask mask) noexcept { _baseId = mask; } + //! Remoes all registers from the register-list by making the underlying register-mask zero. - ASMJIT_INLINE_NODEBUG void resetList() noexcept { _baseId = 0; } + ASMJIT_INLINE_CONSTEXPR void resetList() noexcept { _baseId = 0; } //! Adds registers passed by a register `mask` to the register-list. - ASMJIT_INLINE_NODEBUG void addList(RegMask mask) noexcept { _baseId |= mask; } + ASMJIT_INLINE_CONSTEXPR void addList(RegMask mask) noexcept { _baseId |= mask; } + //! Removes registers passed by a register `mask` to the register-list. - ASMJIT_INLINE_NODEBUG void clearList(RegMask mask) noexcept { _baseId &= ~mask; } + ASMJIT_INLINE_CONSTEXPR void clearList(RegMask mask) noexcept { _baseId &= ~mask; } + //! Uses AND operator to combine the current register-list with other register `mask`. - ASMJIT_INLINE_NODEBUG void andList(RegMask mask) noexcept { _baseId &= mask; } + ASMJIT_INLINE_CONSTEXPR void andList(RegMask mask) noexcept { _baseId &= mask; } + //! Uses XOR operator to combine the current register-list with other register `mask`. - ASMJIT_INLINE_NODEBUG void xorList(RegMask mask) noexcept { _baseId ^= mask; } + ASMJIT_INLINE_CONSTEXPR void xorList(RegMask mask) noexcept { _baseId ^= mask; } //! Checks whether a physical register `physId` is in the register-list. - ASMJIT_INLINE_NODEBUG bool hasReg(uint32_t physId) const noexcept { return physId < 32u ? (_baseId & (1u << physId)) != 0 : false; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasReg(uint32_t physId) const noexcept { return physId < 32u ? (_baseId & (1u << physId)) != 0 : false; } + //! Adds a physical register `physId` to the register-list. - ASMJIT_INLINE_NODEBUG void addReg(uint32_t physId) noexcept { addList(1u << physId); } + ASMJIT_INLINE_CONSTEXPR void addReg(uint32_t physId) noexcept { addList(1u << physId); } + //! Removes a physical register `physId` from the register-list. - ASMJIT_INLINE_NODEBUG void clearReg(uint32_t physId) noexcept { clearList(1u << physId); } + ASMJIT_INLINE_CONSTEXPR void clearReg(uint32_t physId) noexcept { clearList(1u << physId); } //! Clones the register-list operand. - ASMJIT_INLINE_NODEBUG constexpr BaseRegList clone() const noexcept { return BaseRegList(*this); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR BaseRegList clone() const noexcept { return BaseRegList(*this); } //! Casts this register to `RegT` by also changing its signature. //! //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors. template - ASMJIT_INLINE_NODEBUG constexpr RegListT cloneAs() const noexcept { return RegListT(Signature(RegListT::kSignature), list()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegListT cloneAs() const noexcept { return RegListT(Signature(RegListT::kSignature), list()); } //! Casts this register to `other` by also changing its signature. //! //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors. template - ASMJIT_INLINE_NODEBUG constexpr RegListT cloneAs(const RegListT& other) const noexcept { return RegListT(other.signature(), list()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegListT cloneAs(const RegListT& other) const noexcept { return RegListT(other.signature(), list()); } //! \} }; @@ -1335,19 +1544,19 @@ public: //! \{ //! Creates a dummy register operand. - ASMJIT_INLINE_NODEBUG constexpr RegListT() noexcept + ASMJIT_INLINE_CONSTEXPR RegListT() noexcept : BaseRegList() {} //! Creates a new register operand which is the same as `other` . - ASMJIT_INLINE_NODEBUG constexpr RegListT(const RegListT& other) noexcept + ASMJIT_INLINE_CONSTEXPR RegListT(const RegListT& other) noexcept : BaseRegList(other) {} //! Creates a new register operand compatible with `other`, but with a different `id`. - ASMJIT_INLINE_NODEBUG constexpr RegListT(const RegListT& other, RegMask regMask) noexcept + ASMJIT_INLINE_CONSTEXPR RegListT(const RegListT& other, RegMask regMask) noexcept : BaseRegList(other, regMask) {} //! Creates a register initialized to the given `signature` and `id`. - ASMJIT_INLINE_NODEBUG constexpr RegListT(const Signature& signature, RegMask regMask) noexcept + ASMJIT_INLINE_CONSTEXPR RegListT(const Signature& signature, RegMask regMask) noexcept : BaseRegList(signature, regMask) {} //! Creates a register initialized to the given `signature` and `regs`. @@ -1362,7 +1571,10 @@ public: //! \name Overloaded Operators //! \{ - ASMJIT_INLINE_NODEBUG RegListT& operator=(const RegListT& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR RegListT& operator=(const RegListT& other) noexcept { + copyFrom(other); + return *this; + } //! \} @@ -1375,35 +1587,42 @@ public: using BaseRegList::xorList; //! Adds registers to this register-list as provided by `other` register-list. - ASMJIT_INLINE_NODEBUG void addList(const RegListT& other) noexcept { addList(other.list()); } + ASMJIT_INLINE_CONSTEXPR void addList(const RegListT& other) noexcept { addList(other.list()); } + //! Removes registers contained in `other` register-list. - ASMJIT_INLINE_NODEBUG void clearList(const RegListT& other) noexcept { clearList(other.list()); } + ASMJIT_INLINE_CONSTEXPR void clearList(const RegListT& other) noexcept { clearList(other.list()); } + //! Uses AND operator to combine the current register-list with `other` register-list. - ASMJIT_INLINE_NODEBUG void andList(const RegListT& other) noexcept { andList(other.list()); } + ASMJIT_INLINE_CONSTEXPR void andList(const RegListT& other) noexcept { andList(other.list()); } + //! Uses XOR operator to combine the current register-list with `other` register-list. - ASMJIT_INLINE_NODEBUG void xorList(const RegListT& other) noexcept { xorList(other.list()); } + ASMJIT_INLINE_CONSTEXPR void xorList(const RegListT& other) noexcept { xorList(other.list()); } using BaseRegList::addReg; using BaseRegList::clearReg; - ASMJIT_INLINE_NODEBUG void addReg(const RegT& reg) noexcept { - if (reg.id() < 32u) + ASMJIT_INLINE_CONSTEXPR void addReg(const RegT& reg) noexcept { + if (reg.id() < 32u) { addReg(reg.id()); + } } - ASMJIT_INLINE_NODEBUG void addRegs(std::initializer_list regs) noexcept { - for (const RegT& reg : regs) + ASMJIT_INLINE_CONSTEXPR void addRegs(std::initializer_list regs) noexcept { + for (const RegT& reg : regs) { addReg(reg); + } } - ASMJIT_INLINE_NODEBUG void clearReg(const RegT& reg) noexcept { - if (reg.id() < 32u) + ASMJIT_INLINE_CONSTEXPR void clearReg(const RegT& reg) noexcept { + if (reg.id() < 32u) { clearReg(reg.id()); + } } - ASMJIT_INLINE_NODEBUG void clearRegs(std::initializer_list regs) noexcept { - for (const RegT& reg : regs) + ASMJIT_INLINE_CONSTEXPR void clearRegs(std::initializer_list regs) noexcept { + for (const RegT& reg : regs) { clearReg(reg); + } } //! \} @@ -1434,18 +1653,18 @@ public: //! \{ //! Creates a default `BaseMem` operand, that points to [0]. - ASMJIT_INLINE_NODEBUG constexpr BaseMem() noexcept + ASMJIT_INLINE_CONSTEXPR BaseMem() noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kMem), 0, 0, 0) {} //! Creates a `BaseMem` operand that is a clone of `other`. - ASMJIT_INLINE_NODEBUG constexpr BaseMem(const BaseMem& other) noexcept + ASMJIT_INLINE_CONSTEXPR BaseMem(const BaseMem& other) noexcept : Operand(other) {} //! Creates a `BaseMem` operand from `baseReg` and `offset`. //! //! \note This is an architecture independent constructor that can be used to create an architecture //! independent memory operand to be used in portable code that can handle multiple architectures. - ASMJIT_INLINE_NODEBUG constexpr explicit BaseMem(const BaseReg& baseReg, int32_t offset = 0) noexcept + ASMJIT_INLINE_CONSTEXPR explicit BaseMem(const BaseReg& baseReg, int32_t offset = 0) noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(baseReg.type()), baseReg.id(), @@ -1454,7 +1673,7 @@ public: //! \cond INTERNAL //! Creates a `BaseMem` operand from 4 integers as used by `Operand_` struct. - ASMJIT_INLINE_NODEBUG constexpr BaseMem(const OperandSignature& u0, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept + ASMJIT_INLINE_CONSTEXPR BaseMem(const OperandSignature& u0, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept : Operand(Globals::Init, u0, baseId, indexId, uint32_t(offset)) {} //! \endcond @@ -1463,7 +1682,7 @@ public: : Operand(Globals::NoInit) {} //! Resets the memory operand - after the reset the memory points to [0]. - ASMJIT_INLINE_NODEBUG void reset() noexcept { + ASMJIT_INLINE_CONSTEXPR void reset() noexcept { _signature = Signature::fromOpType(OperandType::kMem); _baseId = 0; _data[0] = 0; @@ -1475,7 +1694,10 @@ public: //! \name Overloaded Operators //! \{ - ASMJIT_INLINE_NODEBUG BaseMem& operator=(const BaseMem& other) noexcept { copyFrom(other); return *this; } + ASMJIT_INLINE_CONSTEXPR BaseMem& operator=(const BaseMem& other) noexcept { + copyFrom(other); + return *this; + } //! \} @@ -1483,54 +1705,66 @@ public: //! \{ //! Clones the memory operand. - ASMJIT_INLINE_NODEBUG constexpr BaseMem clone() const noexcept { return BaseMem(*this); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR BaseMem clone() const noexcept { return BaseMem(*this); } //! Creates a new copy of this memory operand adjusted by `off`. - ASMJIT_INLINE_NODEBUG BaseMem cloneAdjusted(int64_t off) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR BaseMem cloneAdjusted(int64_t off) const noexcept { BaseMem result(*this); result.addOffset(off); return result; } //! Tests whether this memory operand is a register home (only used by \ref asmjit_compiler) - ASMJIT_INLINE_NODEBUG constexpr bool isRegHome() const noexcept { return _signature.hasField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isRegHome() const noexcept { return _signature.hasField(); } + //! Mark this memory operand as register home (only used by \ref asmjit_compiler). - ASMJIT_INLINE_NODEBUG void setRegHome() noexcept { _signature |= Signature::kMemRegHomeFlag; } + ASMJIT_INLINE_CONSTEXPR void setRegHome() noexcept { _signature |= Signature::kMemRegHomeFlag; } + //! Marks this operand to not be a register home (only used by \ref asmjit_compiler). - ASMJIT_INLINE_NODEBUG void clearRegHome() noexcept { _signature &= ~Signature::kMemRegHomeFlag; } + ASMJIT_INLINE_CONSTEXPR void clearRegHome() noexcept { _signature &= ~Signature::kMemRegHomeFlag; } //! Tests whether the memory operand has a BASE register or label specified. - ASMJIT_INLINE_NODEBUG constexpr bool hasBase() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBase() const noexcept { return (_signature & Signature::kMemBaseTypeMask) != 0; } //! Tests whether the memory operand has an INDEX register specified. - ASMJIT_INLINE_NODEBUG constexpr bool hasIndex() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasIndex() const noexcept { return (_signature & Signature::kMemIndexTypeMask) != 0; } //! Tests whether the memory operand has BASE or INDEX register. - ASMJIT_INLINE_NODEBUG constexpr bool hasBaseOrIndex() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBaseOrIndex() const noexcept { return (_signature & Signature::kMemBaseIndexMask) != 0; } //! Tests whether the memory operand has BASE and INDEX register. - ASMJIT_INLINE_NODEBUG constexpr bool hasBaseAndIndex() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBaseAndIndex() const noexcept { return (_signature & Signature::kMemBaseTypeMask) != 0 && (_signature & Signature::kMemIndexTypeMask) != 0; } //! Tests whether the BASE operand is a label. - ASMJIT_INLINE_NODEBUG constexpr bool hasBaseLabel() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBaseLabel() const noexcept { return _signature.subset(Signature::kMemBaseTypeMask) == Signature::fromMemBaseType(RegType::kLabelTag); } //! Tests whether the BASE operand is a register (registers start after `RegType::kLabelTag`). - ASMJIT_INLINE_NODEBUG constexpr bool hasBaseReg() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBaseReg() const noexcept { return _signature.subset(Signature::kMemBaseTypeMask).bits() > Signature::fromMemBaseType(RegType::kLabelTag).bits(); } //! Tests whether the INDEX operand is a register (registers start after `RegType::kLabelTag`). - ASMJIT_INLINE_NODEBUG constexpr bool hasIndexReg() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasIndexReg() const noexcept { return _signature.subset(Signature::kMemIndexTypeMask).bits() > Signature::fromMemIndexType(RegType::kLabelTag).bits(); } @@ -1538,85 +1772,99 @@ public: //! //! \note If the returned type is one (a value never associated to a register type) the BASE is not register, but it //! is a label. One equals to `kLabelTag`. You should always check `hasBaseLabel()` before using `baseId()` result. - ASMJIT_INLINE_NODEBUG constexpr RegType baseType() const noexcept { return _signature.memBaseType(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegType baseType() const noexcept { return _signature.memBaseType(); } //! Returns the type of an INDEX register (0 if this memory operand doesn't //! use the INDEX register). - ASMJIT_INLINE_NODEBUG constexpr RegType indexType() const noexcept { return _signature.memIndexType(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR RegType indexType() const noexcept { return _signature.memIndexType(); } //! This is used internally for BASE+INDEX validation. - ASMJIT_INLINE_NODEBUG constexpr uint32_t baseAndIndexTypes() const noexcept { return _signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t baseAndIndexTypes() const noexcept { return _signature.getField(); } //! Returns both BASE (4:0 bits) and INDEX (9:5 bits) types combined into a single value. //! //! \remarks Returns id of the BASE register or label (if the BASE was specified as label). - ASMJIT_INLINE_NODEBUG constexpr uint32_t baseId() const noexcept { return _baseId; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t baseId() const noexcept { return _baseId; } //! Returns the id of the INDEX register. - ASMJIT_INLINE_NODEBUG constexpr uint32_t indexId() const noexcept { return _data[kDataMemIndexId]; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t indexId() const noexcept { return _data[kDataMemIndexId]; } //! Sets the id of the BASE register (without modifying its type). - ASMJIT_INLINE_NODEBUG void setBaseId(uint32_t id) noexcept { _baseId = id; } + ASMJIT_INLINE_CONSTEXPR void setBaseId(uint32_t id) noexcept { _baseId = id; } + //! Sets the register type of the BASE register (without modifying its id). - ASMJIT_INLINE_NODEBUG void setBaseType(RegType regType) noexcept { _signature.setMemBaseType(regType); } + ASMJIT_INLINE_CONSTEXPR void setBaseType(RegType regType) noexcept { _signature.setMemBaseType(regType); } //! Sets the id of the INDEX register (without modifying its type). - ASMJIT_INLINE_NODEBUG void setIndexId(uint32_t id) noexcept { _data[kDataMemIndexId] = id; } + ASMJIT_INLINE_CONSTEXPR void setIndexId(uint32_t id) noexcept { _data[kDataMemIndexId] = id; } + //! Sets the register type of the INDEX register (without modifying its id). - ASMJIT_INLINE_NODEBUG void setIndexType(RegType regType) noexcept { _signature.setMemIndexType(regType); } + ASMJIT_INLINE_CONSTEXPR void setIndexType(RegType regType) noexcept { _signature.setMemIndexType(regType); } //! Sets the base register to type and id of the given `base` operand. - ASMJIT_INLINE_NODEBUG void setBase(const BaseReg& base) noexcept { return _setBase(base.type(), base.id()); } + ASMJIT_INLINE_CONSTEXPR void setBase(const BaseReg& base) noexcept { return _setBase(base.type(), base.id()); } + //! Sets the index register to type and id of the given `index` operand. - ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index) noexcept { return _setIndex(index.type(), index.id()); } + ASMJIT_INLINE_CONSTEXPR void setIndex(const BaseReg& index) noexcept { return _setIndex(index.type(), index.id()); } //! \cond INTERNAL - ASMJIT_INLINE_NODEBUG void _setBase(RegType type, uint32_t id) noexcept { + ASMJIT_INLINE_CONSTEXPR void _setBase(RegType type, uint32_t id) noexcept { _signature.setField(uint32_t(type)); _baseId = id; } - ASMJIT_INLINE_NODEBUG void _setIndex(RegType type, uint32_t id) noexcept { + ASMJIT_INLINE_CONSTEXPR void _setIndex(RegType type, uint32_t id) noexcept { _signature.setField(uint32_t(type)); _data[kDataMemIndexId] = id; } //! \endcond //! Resets the memory operand's BASE register or label. - ASMJIT_INLINE_NODEBUG void resetBase() noexcept { _setBase(RegType::kNone, 0); } + ASMJIT_INLINE_CONSTEXPR void resetBase() noexcept { _setBase(RegType::kNone, 0); } //! Resets the memory operand's INDEX register. - ASMJIT_INLINE_NODEBUG void resetIndex() noexcept { _setIndex(RegType::kNone, 0); } + ASMJIT_INLINE_CONSTEXPR void resetIndex() noexcept { _setIndex(RegType::kNone, 0); } //! Tests whether the memory operand has a 64-bit offset or absolute address. //! //! If this is true then `hasBase()` must always report false. - ASMJIT_INLINE_NODEBUG constexpr bool isOffset64Bit() const noexcept { return baseType() == RegType::kNone; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isOffset64Bit() const noexcept { return baseType() == RegType::kNone; } //! Tests whether the memory operand has a non-zero offset or absolute address. - ASMJIT_INLINE_NODEBUG constexpr bool hasOffset() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasOffset() const noexcept { return (_data[kDataMemOffsetLo] | uint32_t(_baseId & Support::bitMaskFromBool(isOffset64Bit()))) != 0; } //! Returns either relative offset or absolute address as 64-bit integer. - ASMJIT_INLINE_NODEBUG constexpr int64_t offset() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR int64_t offset() const noexcept { return isOffset64Bit() ? int64_t(uint64_t(_data[kDataMemOffsetLo]) | (uint64_t(_baseId) << 32)) : int64_t(int32_t(_data[kDataMemOffsetLo])); // Sign extend 32-bit offset. } //! Returns a 32-bit low part of a 64-bit offset or absolute address. - ASMJIT_INLINE_NODEBUG constexpr int32_t offsetLo32() const noexcept { return int32_t(_data[kDataMemOffsetLo]); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR int32_t offsetLo32() const noexcept { return int32_t(_data[kDataMemOffsetLo]); } + //! Returns a 32-but high part of a 64-bit offset or absolute address. //! //! \note This function is UNSAFE and returns garbage if `isOffset64Bit()` //! returns false. Never use it blindly without checking it first. - ASMJIT_INLINE_NODEBUG constexpr int32_t offsetHi32() const noexcept { return int32_t(_baseId); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR int32_t offsetHi32() const noexcept { return int32_t(_baseId); } //! Sets a 64-bit offset or an absolute address to `offset`. //! //! \note This functions attempts to set both high and low parts of a 64-bit offset, however, if the operand has //! a BASE register it will store only the low 32 bits of the offset / address as there is no way to store both //! BASE and 64-bit offset, and there is currently no architecture that has such capability targeted by AsmJit. - inline void setOffset(int64_t offset) noexcept { + ASMJIT_INLINE_CONSTEXPR void setOffset(int64_t offset) noexcept { uint32_t lo = uint32_t(uint64_t(offset) & 0xFFFFFFFFu); uint32_t hi = uint32_t(uint64_t(offset) >> 32); uint32_t hiMsk = Support::bitMaskFromBool(isOffset64Bit()); @@ -1624,8 +1872,9 @@ public: _data[kDataMemOffsetLo] = lo; _baseId = (hi & hiMsk) | (_baseId & ~hiMsk); } + //! Sets a low 32-bit offset to `offset` (don't use without knowing how BaseMem works). - inline void setOffsetLo32(int32_t offset) noexcept { _data[kDataMemOffsetLo] = uint32_t(offset); } + ASMJIT_INLINE_CONSTEXPR void setOffsetLo32(int32_t offset) noexcept { _data[kDataMemOffsetLo] = uint32_t(offset); } //! Adjusts the offset by `offset`. //! @@ -1633,7 +1882,7 @@ public: //! there is a BASE register and the offset is only 32 bits anyway. //! Adjusts the memory operand offset by a `offset`. - inline void addOffset(int64_t offset) noexcept { + ASMJIT_INLINE_CONSTEXPR void addOffset(int64_t offset) noexcept { if (isOffset64Bit()) { int64_t result = offset + int64_t(uint64_t(_data[kDataMemOffsetLo]) | (uint64_t(_baseId) << 32)); _data[kDataMemOffsetLo] = uint32_t(uint64_t(result) & 0xFFFFFFFFu); @@ -1645,13 +1894,13 @@ public: } //! Adds `offset` to a low 32-bit offset part (don't use without knowing how BaseMem works). - ASMJIT_INLINE_NODEBUG void addOffsetLo32(int32_t offset) noexcept { _data[kDataMemOffsetLo] += uint32_t(offset); } + ASMJIT_INLINE_CONSTEXPR void addOffsetLo32(int32_t offset) noexcept { _data[kDataMemOffsetLo] += uint32_t(offset); } //! Resets the memory offset to zero. - ASMJIT_INLINE_NODEBUG void resetOffset() noexcept { setOffset(0); } + ASMJIT_INLINE_CONSTEXPR void resetOffset() noexcept { setOffset(0); } //! Resets the lo part of the memory offset to zero (don't use without knowing how BaseMem works). - ASMJIT_INLINE_NODEBUG void resetOffsetLo32() noexcept { setOffsetLo32(0); } + ASMJIT_INLINE_CONSTEXPR void resetOffsetLo32() noexcept { setOffsetLo32(0); } //! \} }; @@ -1670,30 +1919,26 @@ public: //! \cond INTERNAL template struct IsConstexprConstructibleAsImmType - : public std::integral_constant::value || - std::is_pointer::value || - std::is_integral::value || - std::is_function::value> {}; + : public std::integral_constant || std::is_pointer_v || std::is_integral_v || std::is_function_v> {}; template struct IsConvertibleToImmType - : public std::integral_constant::value || - std::is_floating_point::value> {}; + : public std::integral_constant::value || std::is_floating_point_v> {}; //! \endcond //! \name Construction & Destruction //! \{ //! Creates a new immediate value (initial value is 0). - ASMJIT_INLINE_NODEBUG constexpr Imm() noexcept + ASMJIT_INLINE_CONSTEXPR Imm() noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kImm), 0, 0, 0) {} //! Creates a new immediate value from `other`. - ASMJIT_INLINE_NODEBUG constexpr Imm(const Imm& other) noexcept + ASMJIT_INLINE_CONSTEXPR Imm(const Imm& other) noexcept : Operand(other) {} //! Creates a new immediate value from ARM/AArch64 specific `shift`. - ASMJIT_INLINE_NODEBUG constexpr Imm(const arm::Shift& shift) noexcept + ASMJIT_INLINE_CONSTEXPR Imm(const arm::Shift& shift) noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kImm) | Signature::fromPredicate(uint32_t(shift.op())), 0, @@ -1704,8 +1949,8 @@ public: //! to `predicate`. //! //! \note Predicate is currently only used by ARM architectures. - template::type>::value>::type> - ASMJIT_INLINE_NODEBUG constexpr Imm(const T& val, const uint32_t predicate = 0) noexcept + template>::value>::type> + ASMJIT_INLINE_CONSTEXPR Imm(const T& val, const uint32_t predicate = 0) noexcept : Operand(Globals::Init, Signature::fromOpType(OperandType::kImm) | Signature::fromPredicate(predicate), 0, @@ -1735,7 +1980,10 @@ public: //! \{ //! Assigns the value of the `other` operand to this immediate. - ASMJIT_INLINE_NODEBUG Imm& operator=(const Imm& other) noexcept { copyFrom(other); return *this; } + ASMJIT_INLINE_CONSTEXPR Imm& operator=(const Imm& other) noexcept { + copyFrom(other); + return *this; + } //! \} @@ -1743,73 +1991,100 @@ public: //! \{ //! Returns immediate type. - ASMJIT_INLINE_NODEBUG constexpr ImmType type() const noexcept { return (ImmType)_signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR ImmType type() const noexcept { return (ImmType)_signature.getField(); } + //! Sets the immediate type to `type`. - ASMJIT_INLINE_NODEBUG void setType(ImmType type) noexcept { _signature.setField(uint32_t(type)); } + ASMJIT_INLINE_CONSTEXPR void setType(ImmType type) noexcept { _signature.setField(uint32_t(type)); } + //! Resets immediate type to \ref ImmType::kInt. - ASMJIT_INLINE_NODEBUG void resetType() noexcept { setType(ImmType::kInt); } + ASMJIT_INLINE_CONSTEXPR void resetType() noexcept { setType(ImmType::kInt); } //! Returns operation predicate of the immediate. //! //! The meaning depends on architecture, for example on ARM hardware this describes \ref arm::ShiftOp //! of the immediate. - ASMJIT_INLINE_NODEBUG constexpr uint32_t predicate() const noexcept { return _signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t predicate() const noexcept { return _signature.getField(); } //! Sets operation predicate of the immediate to `predicate`. //! //! The meaning depends on architecture, for example on ARM hardware this describes \ref arm::ShiftOp //! of the immediate. - ASMJIT_INLINE_NODEBUG void setPredicate(uint32_t predicate) noexcept { _signature.setField(predicate); } + ASMJIT_INLINE_CONSTEXPR void setPredicate(uint32_t predicate) noexcept { _signature.setField(predicate); } //! Resets the shift operation type of the immediate to the default value (no operation). - ASMJIT_INLINE_NODEBUG void resetPredicate() noexcept { _signature.setField(0); } + ASMJIT_INLINE_CONSTEXPR void resetPredicate() noexcept { _signature.setField(0); } //! Returns the immediate value as `int64_t`, which is the internal format Imm uses. - ASMJIT_INLINE_NODEBUG constexpr int64_t value() const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR int64_t value() const noexcept { return int64_t((uint64_t(_data[kDataImmValueHi]) << 32) | _data[kDataImmValueLo]); } //! Tests whether this immediate value is integer of any size. - ASMJIT_INLINE_NODEBUG constexpr uint32_t isInt() const noexcept { return type() == ImmType::kInt; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t isInt() const noexcept { return type() == ImmType::kInt; } + //! Tests whether this immediate value is a double precision floating point value. - ASMJIT_INLINE_NODEBUG constexpr uint32_t isDouble() const noexcept { return type() == ImmType::kDouble; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t isDouble() const noexcept { return type() == ImmType::kDouble; } //! Tests whether the immediate can be casted to 8-bit signed integer. - ASMJIT_INLINE_NODEBUG constexpr bool isInt8() const noexcept { return type() == ImmType::kInt && Support::isInt8(value()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isInt8() const noexcept { return type() == ImmType::kInt && Support::isInt8(value()); } + //! Tests whether the immediate can be casted to 8-bit unsigned integer. - ASMJIT_INLINE_NODEBUG constexpr bool isUInt8() const noexcept { return type() == ImmType::kInt && Support::isUInt8(value()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isUInt8() const noexcept { return type() == ImmType::kInt && Support::isUInt8(value()); } + //! Tests whether the immediate can be casted to 16-bit signed integer. - ASMJIT_INLINE_NODEBUG constexpr bool isInt16() const noexcept { return type() == ImmType::kInt && Support::isInt16(value()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isInt16() const noexcept { return type() == ImmType::kInt && Support::isInt16(value()); } + //! Tests whether the immediate can be casted to 16-bit unsigned integer. - ASMJIT_INLINE_NODEBUG constexpr bool isUInt16() const noexcept { return type() == ImmType::kInt && Support::isUInt16(value()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isUInt16() const noexcept { return type() == ImmType::kInt && Support::isUInt16(value()); } + //! Tests whether the immediate can be casted to 32-bit signed integer. - ASMJIT_INLINE_NODEBUG constexpr bool isInt32() const noexcept { return type() == ImmType::kInt && Support::isInt32(value()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isInt32() const noexcept { return type() == ImmType::kInt && Support::isInt32(value()); } + //! Tests whether the immediate can be casted to 32-bit unsigned integer. - ASMJIT_INLINE_NODEBUG constexpr bool isUInt32() const noexcept { return type() == ImmType::kInt && _data[kDataImmValueHi] == 0; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isUInt32() const noexcept { return type() == ImmType::kInt && _data[kDataImmValueHi] == 0; } //! Returns the immediate value casted to `T`. //! //! The value is masked before it's casted to `T` so the returned value is simply the representation of `T` //! considering the original value's lowest bits. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T valueAs() const noexcept { return Support::immediateToT(value()); } //! Returns low 32-bit signed integer. - ASMJIT_INLINE_NODEBUG constexpr int32_t int32Lo() const noexcept { return int32_t(_data[kDataImmValueLo]); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR int32_t int32Lo() const noexcept { return int32_t(_data[kDataImmValueLo]); } + //! Returns high 32-bit signed integer. - ASMJIT_INLINE_NODEBUG constexpr int32_t int32Hi() const noexcept { return int32_t(_data[kDataImmValueHi]); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR int32_t int32Hi() const noexcept { return int32_t(_data[kDataImmValueHi]); } + //! Returns low 32-bit signed integer. - ASMJIT_INLINE_NODEBUG constexpr uint32_t uint32Lo() const noexcept { return _data[kDataImmValueLo]; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t uint32Lo() const noexcept { return _data[kDataImmValueLo]; } + //! Returns high 32-bit signed integer. - ASMJIT_INLINE_NODEBUG constexpr uint32_t uint32Hi() const noexcept { return _data[kDataImmValueHi]; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t uint32Hi() const noexcept { return _data[kDataImmValueHi]; } //! Sets immediate value to `val`, the value is casted to a signed 64-bit integer. template ASMJIT_INLINE_NODEBUG void setValue(const T& val) noexcept { - _setValueInternal(Support::immediateFromT(val), std::is_floating_point::value ? ImmType::kDouble : ImmType::kInt); + _setValueInternal(Support::immediateFromT(val), std::is_floating_point_v ? ImmType::kDouble : ImmType::kInt); } - ASMJIT_INLINE_NODEBUG void _setValueInternal(int64_t val, ImmType type) noexcept { + ASMJIT_INLINE_CONSTEXPR void _setValueInternal(int64_t val, ImmType type) noexcept { setType(type); _data[kDataImmValueHi] = uint32_t(uint64_t(val) >> 32); _data[kDataImmValueLo] = uint32_t(uint64_t(val) & 0xFFFFFFFFu); @@ -1821,14 +2096,21 @@ public: //! \{ //! Clones the immediate operand. - ASMJIT_INLINE_NODEBUG constexpr Imm clone() const noexcept { return Imm(*this); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Imm clone() const noexcept { return Imm(*this); } + //! Sign extend the integer immediate value from 8-bit signed integer to 64 bits. ASMJIT_INLINE_NODEBUG void signExtend8Bits() noexcept { setValue(int64_t(valueAs())); } + //! Sign extend the integer immediate value from 16-bit signed integer to 64 bits. ASMJIT_INLINE_NODEBUG void signExtend16Bits() noexcept { setValue(int64_t(valueAs())); } + //! Sign extend the integer immediate value from 32-bit signed integer to 64 bits. ASMJIT_INLINE_NODEBUG void signExtend32Bits() noexcept { setValue(int64_t(valueAs())); } + //! Zero extend the integer immediate value from 8-bit unsigned integer to 64 bits. ASMJIT_INLINE_NODEBUG void zeroExtend8Bits() noexcept { setValue(valueAs()); } + //! Zero extend the integer immediate value from 16-bit unsigned integer to 64 bits. ASMJIT_INLINE_NODEBUG void zeroExtend16Bits() noexcept { setValue(valueAs()); } + //! Zero extend the integer immediate value from 32-bit unsigned integer to 64 bits. ASMJIT_INLINE_NODEBUG void zeroExtend32Bits() noexcept { _data[kDataImmValueHi] = 0u; } //! \} @@ -1836,7 +2118,8 @@ public: //! Creates a new immediate operand. template -static ASMJIT_INLINE_NODEBUG constexpr Imm imm(const T& val) noexcept { return Imm(val); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Imm imm(const T& val) noexcept { return Imm(val); } //! \} @@ -1852,18 +2135,20 @@ namespace Support { template struct ForwardOpImpl { + [[nodiscard]] static ASMJIT_INLINE_NODEBUG const T& forward(const T& value) noexcept { return value; } }; template struct ForwardOpImpl { + [[nodiscard]] static ASMJIT_INLINE_NODEBUG Imm forward(const T& value) noexcept { return Imm(value); } }; //! Either forwards operand T or returns a new operand that wraps it if T is a type convertible to operand. -//! At the moment this is only used to convert integers, floats, and enumarations to \ref Imm operands. +//! At the moment this is only used to convert integers, floats, and enumerations to \ref Imm operands. template -struct ForwardOp : public ForwardOpImpl::type>::value> {}; +struct ForwardOp : public ForwardOpImpl>::value> {}; } // {Support} //! \endcond diff --git a/src/asmjit/core/osutils.cpp b/src/asmjit/core/osutils.cpp index 2d39069..63b7497 100644 --- a/src/asmjit/core/osutils.cpp +++ b/src/asmjit/core/osutils.cpp @@ -17,8 +17,9 @@ ASMJIT_BEGIN_NAMESPACE #if !defined(_WIN32) Error OSUtils::readFile(const char* name, String& dst, size_t maxSize) noexcept { char* buffer = dst.prepare(String::ModifyOp::kAssign, maxSize); - if (ASMJIT_UNLIKELY(!buffer)) + if (ASMJIT_UNLIKELY(!buffer)) { return DebugUtils::errored(kErrorOutOfMemory); + } int fd = ASMJIT_FILE64_API(::open)(name, O_RDONLY); if (fd < 0) { diff --git a/src/asmjit/core/osutils.h b/src/asmjit/core/osutils.h index c658837..2a35dac 100644 --- a/src/asmjit/core/osutils.h +++ b/src/asmjit/core/osutils.h @@ -35,7 +35,7 @@ public: Handle _handle; #pragma pack(pop) #elif !defined(__EMSCRIPTEN__) - typedef pthread_mutex_t Handle; + using Handle = pthread_mutex_t; Handle _handle; #endif diff --git a/src/asmjit/core/raassignment_p.h b/src/asmjit/core/raassignment_p.h index 0865ece..198b78b 100644 --- a/src/asmjit/core/raassignment_p.h +++ b/src/asmjit/core/raassignment_p.h @@ -66,6 +66,7 @@ public: //! PhysReg to WorkReg mapping. uint32_t workIds[1 /* ... */]; + [[nodiscard]] static ASMJIT_INLINE_NODEBUG size_t sizeOf(size_t count) noexcept { return sizeof(PhysToWorkMap) - sizeof(uint32_t) + count * sizeof(uint32_t); } @@ -74,8 +75,9 @@ public: assigned.reset(); dirty.reset(); - for (size_t i = 0; i < count; i++) + for (size_t i = 0; i < count; i++) { workIds[i] = kWorkNone; + } } inline void copyFrom(const PhysToWorkMap* other, size_t count) noexcept { @@ -94,19 +96,22 @@ public: //! WorkReg to PhysReg mapping uint8_t physIds[1 /* ... */]; + [[nodiscard]] static inline size_t sizeOf(size_t count) noexcept { return size_t(count) * sizeof(uint8_t); } inline void reset(size_t count) noexcept { - for (size_t i = 0; i < count; i++) + for (size_t i = 0; i < count; i++) { physIds[i] = kPhysNone; + } } inline void copyFrom(const WorkToPhysMap* other, size_t count) noexcept { size_t size = sizeOf(count); - if (ASMJIT_LIKELY(size)) + if (ASMJIT_LIKELY(size)) { memcpy(this, other, size); + } } }; @@ -132,7 +137,7 @@ public: resetMaps(); } - ASMJIT_FORCE_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept { + ASMJIT_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept { // Layout must be initialized before data. ASMJIT_ASSERT(_physToWorkMap == nullptr); ASMJIT_ASSERT(_workToPhysMap == nullptr); @@ -145,14 +150,15 @@ public: _layout.workRegs = &workRegs; } - ASMJIT_FORCE_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept { + ASMJIT_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept { _physToWorkMap = physToWorkMap; _workToPhysMap = workToPhysMap; - for (RegGroup group : RegGroupVirtValues{}) + for (RegGroup group : RegGroupVirtValues{}) { _physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group); + } } - ASMJIT_FORCE_INLINE void resetMaps() noexcept { + ASMJIT_INLINE void resetMaps() noexcept { _physToWorkMap = nullptr; _workToPhysMap = nullptr; _physToWorkIds.fill(nullptr); @@ -163,17 +169,31 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RARegMask& assigned() noexcept { return _physToWorkMap->assigned; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t assigned(RegGroup group) const noexcept { return _physToWorkMap->assigned[group]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RARegMask& dirty() noexcept { return _physToWorkMap->dirty; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask dirty(RegGroup group) const noexcept { return _physToWorkMap->dirty[group]; } + [[nodiscard]] inline uint32_t workToPhysId(RegGroup group, uint32_t workId) const noexcept { DebugUtils::unused(group); ASMJIT_ASSERT(workId != kWorkNone); @@ -181,16 +201,19 @@ public: return _workToPhysMap->physIds[workId]; } + [[nodiscard]] inline uint32_t physToWorkId(RegGroup group, uint32_t physId) const noexcept { ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); return _physToWorkIds[group][physId]; } + [[nodiscard]] inline bool isPhysAssigned(RegGroup group, uint32_t physId) const noexcept { ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); return Support::bitTest(_physToWorkMap->assigned[group], physId); } + [[nodiscard]] inline bool isPhysDirty(RegGroup group, uint32_t physId) const noexcept { ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); return Support::bitTest(_physToWorkMap->dirty[group], physId); @@ -304,7 +327,7 @@ public: //! \name Utilities //! \{ - ASMJIT_FORCE_INLINE void swap(RAAssignment& other) noexcept { + ASMJIT_INLINE void swap(RAAssignment& other) noexcept { std::swap(_workToPhysMap, other._workToPhysMap); std::swap(_physToWorkMap, other._physToWorkMap); _physToWorkIds.swap(other._physToWorkIds); @@ -342,6 +365,7 @@ public: } // Not really useful outside of debugging. + [[nodiscard]] bool equals(const RAAssignment& other) const noexcept { // Layout should always match. if (_layout.physIndex != other._layout.physIndex || @@ -357,15 +381,17 @@ public: for (uint32_t physId = 0; physId < physTotal; physId++) { uint32_t thisWorkId = _physToWorkMap->workIds[physId]; uint32_t otherWorkId = other._physToWorkMap->workIds[physId]; - if (thisWorkId != otherWorkId) + if (thisWorkId != otherWorkId) { return false; + } } for (uint32_t workId = 0; workId < workCount; workId++) { uint32_t thisPhysId = _workToPhysMap->physIds[workId]; uint32_t otherPhysId = other._workToPhysMap->physIds[workId]; - if (thisPhysId != otherPhysId) + if (thisPhysId != otherPhysId) { return false; + } } if (_physToWorkMap->assigned != other._physToWorkMap->assigned || diff --git a/src/asmjit/core/rabuilders_p.h b/src/asmjit/core/rabuilders_p.h index 4d131f4..93dde9e 100644 --- a/src/asmjit/core/rabuilders_p.h +++ b/src/asmjit/core/rabuilders_p.h @@ -21,15 +21,18 @@ ASMJIT_BEGIN_NAMESPACE template class RACFGBuilderT { public: - enum : uint32_t { - kRootIndentation = 2, - kCodeIndentation = 4, + //! \name Constants + //! \{ - // NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and - // `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these - // we just use their position that is [at that time] unassigned. - kNodePositionDidOnBefore = 0xFFFFFFFFu - }; + static inline constexpr uint32_t kRootIndentation = 2; + static inline constexpr uint32_t kCodeIndentation = 4; + + // NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and + // `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these + // we just use their position that is [at that time] unassigned. + static inline constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu; + + //! \} //! \name Members //! \{ @@ -60,17 +63,20 @@ public: _cc(pass->cc()) { #ifndef ASMJIT_NO_LOGGING _logger = _pass->hasDiagnosticOption(DiagnosticOptions::kRADebugCFG) ? _pass->logger() : nullptr; - if (_logger) + if (_logger) { _formatOptions = _logger->options(); + } #endif } + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return _cc; } //! \name Run //! \{ //! Called per function by an architecture-specific CFG builder. + [[nodiscard]] Error run() noexcept { log("[BuildCFG]\n"); ASMJIT_PROPAGATE(prepare()); @@ -289,8 +295,9 @@ public: _hasCode = false; _blockRegStats.reset(); - if (_curBlock->isConstructed()) + if (_curBlock->isConstructed()) { break; + } ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock)); logBlock(_curBlock, kRootIndentation); @@ -417,16 +424,18 @@ public: if (node->type() == NodeType::kSentinel) { if (node == _funcNode->endNode()) { // Make sure we didn't flow here if this is the end of the function sentinel. - if (ASMJIT_UNLIKELY(_curBlock && _hasCode)) + if (ASMJIT_UNLIKELY(_curBlock && _hasCode)) { return DebugUtils::errored(kErrorInvalidState); + } break; } } else if (node->type() == NodeType::kFunc) { // RAPass can only compile a single function at a time. If we // encountered a function it must be the current one, bail if not. - if (ASMJIT_UNLIKELY(node != _funcNode)) + if (ASMJIT_UNLIKELY(node != _funcNode)) { return DebugUtils::errored(kErrorInvalidState); + } // PASS if this is the first node. } else { @@ -440,15 +449,18 @@ public: // NOTE: We cannot encounter a NULL node, because every function must be terminated by a sentinel (`stop`) // node. If we encountered a NULL node it means that something went wrong and this node list is corrupted; // bail in such case. - if (ASMJIT_UNLIKELY(!node)) + if (ASMJIT_UNLIKELY(!node)) { return DebugUtils::errored(kErrorInvalidState); + } } - if (_pass->hasDanglingBlocks()) + if (_pass->hasDanglingBlocks()) { return DebugUtils::errored(kErrorInvalidState); + } - for (RABlock* block : blocksWithUnknownJumps) - handleBlockWithUnknownJump(block); + for (RABlock* block : blocksWithUnknownJumps) { + ASMJIT_PROPAGATE(handleBlockWithUnknownJump(block)); + } return _pass->initSharedAssignments(_sharedAssignmentsMap); } @@ -459,6 +471,7 @@ public: //! \{ //! Prepares the CFG builder of the current function. + [[nodiscard]] Error prepare() noexcept { FuncNode* func = _pass->func(); BaseNode* node = nullptr; @@ -508,6 +521,7 @@ public: //! //! If we encounter such block we basically insert all existing blocks as successors except the function entry //! block and a natural successor, if such block exists. + [[nodiscard]] Error handleBlockWithUnknownJump(RABlock* block) noexcept { RABlocks& blocks = _pass->blocks(); size_t blockCount = blocks.size(); @@ -517,40 +531,48 @@ public: RABlock* consecutive = block->consecutive(); for (size_t i = 1; i < blockCount; i++) { RABlock* candidate = blocks[i]; - if (candidate == consecutive || !candidate->isTargetable()) + if (candidate == consecutive || !candidate->isTargetable()) { continue; - block->appendSuccessor(candidate); + } + ASMJIT_PROPAGATE(block->appendSuccessor(candidate)); } return shareAssignmentAcrossSuccessors(block); } + [[nodiscard]] Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept { - if (block->successors().size() <= 1) + if (block->successors().size() <= 1) { return kErrorOk; + } RABlock* consecutive = block->consecutive(); uint32_t sharedAssignmentId = Globals::kInvalidId; for (RABlock* successor : block->successors()) { - if (successor == consecutive) + if (successor == consecutive) { continue; + } if (successor->hasSharedAssignmentId()) { - if (sharedAssignmentId == Globals::kInvalidId) + if (sharedAssignmentId == Globals::kInvalidId) { sharedAssignmentId = successor->sharedAssignmentId(); - else + } + else { _sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId; + } } else { - if (sharedAssignmentId == Globals::kInvalidId) + if (sharedAssignmentId == Globals::kInvalidId) { ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId)); + } successor->setSharedAssignmentId(sharedAssignmentId); } } return kErrorOk; } + [[nodiscard]] Error newSharedAssignmentId(uint32_t* out) noexcept { uint32_t id = _sharedAssignmentsMap.size(); ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id)); @@ -567,18 +589,21 @@ public: #ifndef ASMJIT_NO_LOGGING template inline void log(const char* fmt, Args&&... args) noexcept { - if (_logger) + if (_logger) { _logger->logf(fmt, std::forward(args)...); + } } inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept { - if (_logger) + if (_logger) { _logBlock(block, indentation); + } } inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept { - if (_logger) + if (_logger) { _logNode(node, indentation, action); + } } void _logBlock(RABlock* block, uint32_t indentation) noexcept { diff --git a/src/asmjit/core/radefs_p.h b/src/asmjit/core/radefs_p.h index 50e1eef..243f045 100644 --- a/src/asmjit/core/radefs_p.h +++ b/src/asmjit/core/radefs_p.h @@ -44,8 +44,8 @@ class RABlock; class BaseNode; struct RAStackSlot; -typedef ZoneVector RABlocks; -typedef ZoneVector RAWorkRegs; +using RABlocks = ZoneVector; +using RAWorkRegs = ZoneVector; //! Maximum number of consecutive registers aggregated from all supported backends. static constexpr uint32_t kMaxConsecutiveRegs = 4; @@ -60,6 +60,7 @@ public: //! \} + [[nodiscard]] ASMJIT_NOINLINE Error init(Arch arch) noexcept { switch (arch) { case Arch::kX86: @@ -85,6 +86,7 @@ public: } } + [[nodiscard]] inline RegMask availableRegs(RegGroup group) const noexcept { return _availableRegs[group]; } }; @@ -120,14 +122,23 @@ struct RAStrategy { _flags = RAStrategyFlags::kNone; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAStrategyType type() const noexcept { return _type; } + ASMJIT_INLINE_NODEBUG void setType(RAStrategyType type) noexcept { _type = type; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isSimple() const noexcept { return _type == RAStrategyType::kSimple; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isComplex() const noexcept { return _type >= RAStrategyType::kComplex; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAStrategyFlags flags() const noexcept { return _flags; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(RAStrategyFlags flag) const noexcept { return Support::test(_flags, flag); } + ASMJIT_INLINE_NODEBUG void addFlags(RAStrategyFlags flags) noexcept { _flags |= flags; } //! \} @@ -160,17 +171,22 @@ struct RARegCount { //! \name Overloaded Operators //! \{ + [[nodiscard]] inline uint8_t& operator[](RegGroup group) noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _regs[size_t(group)]; } + [[nodiscard]] inline const uint8_t& operator[](RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); return _regs[size_t(group)]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator==(const RARegCount& other) const noexcept { return _packed == other._packed; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const RARegCount& other) const noexcept { return _packed != other._packed; } //! \} @@ -179,6 +195,7 @@ struct RARegCount { //! \{ //! Returns the count of registers by the given register `group`. + [[nodiscard]] inline uint32_t get(RegGroup group) const noexcept { ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); @@ -210,7 +227,7 @@ struct RARegCount { //! Provides mapping that can be used to fast index architecture register groups. struct RARegIndex : public RARegCount { //! Build register indexes based on the given `count` of registers. - ASMJIT_FORCE_INLINE void buildIndexes(const RARegCount& count) noexcept { + ASMJIT_INLINE void buildIndexes(const RARegCount& count) noexcept { uint32_t x = uint32_t(count._regs[0]); uint32_t y = uint32_t(count._regs[1]) + x; uint32_t z = uint32_t(count._regs[2]) + y; @@ -253,13 +270,18 @@ struct RARegMask { //! \name Overloaded Operators //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator==(const RARegMask& other) const noexcept { return _masks == other._masks; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const RARegMask& other) const noexcept { return _masks != other._masks; } template + [[nodiscard]] inline uint32_t& operator[](const Index& index) noexcept { return _masks[index]; } template + [[nodiscard]] inline const uint32_t& operator[](const Index& index) const noexcept { return _masks[index]; } //! \} @@ -268,10 +290,12 @@ struct RARegMask { //! \{ //! Tests whether all register masks are zero (empty). + [[nodiscard]] inline bool empty() const noexcept { return _masks.aggregate() == 0; } + [[nodiscard]] inline bool has(RegGroup group, RegMask mask = 0xFFFFFFFFu) const noexcept { return (_masks[group] & mask) != 0; } @@ -329,16 +353,28 @@ public: ASMJIT_INLINE_NODEBUG void reset() noexcept { _packed = 0; } ASMJIT_INLINE_NODEBUG void combineWith(const RARegsStats& other) noexcept { _packed |= other._packed; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasUsed() const noexcept { return (_packed & kMaskUsed) != 0u; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasUsed(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexUsed + uint32_t(group))) != 0u; } + ASMJIT_INLINE_NODEBUG void makeUsed(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexUsed + uint32_t(group)); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFixed() const noexcept { return (_packed & kMaskFixed) != 0u; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFixed(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexFixed + uint32_t(group))) != 0u; } + ASMJIT_INLINE_NODEBUG void makeFixed(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexFixed + uint32_t(group)); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasClobbered() const noexcept { return (_packed & kMaskClobbered) != 0u; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasClobbered(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexClobbered + uint32_t(group))) != 0u; } + ASMJIT_INLINE_NODEBUG void makeClobbered(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexClobbered + uint32_t(group)); } //! \} @@ -370,8 +406,11 @@ public: ASMJIT_INLINE_NODEBUG RALiveCount& operator=(const RALiveCount& other) noexcept = default; - inline uint32_t& operator[](RegGroup group) noexcept { return n[group]; } - inline const uint32_t& operator[](RegGroup group) const noexcept { return n[group]; } + [[nodiscard]] + ASMJIT_INLINE_NODEBUG uint32_t& operator[](RegGroup group) noexcept { return n[group]; } + + [[nodiscard]] + ASMJIT_INLINE_NODEBUG const uint32_t& operator[](RegGroup group) const noexcept { return n[group]; } //! \} @@ -388,10 +427,8 @@ struct RALiveInterval { //! \name Constants //! \{ - enum : uint32_t { - kNaN = 0, - kInf = 0xFFFFFFFFu - }; + static inline constexpr uint32_t kNaN = 0; + static inline constexpr uint32_t kInf = 0xFFFFFFFFu; //! \} @@ -428,7 +465,10 @@ struct RALiveInterval { //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isValid() const noexcept { return a < b; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t width() const noexcept { return b - a; } //! \} @@ -441,7 +481,7 @@ public: //! \name Types //! \{ - typedef T DataType; + using DataType = T; //! \} @@ -488,9 +528,20 @@ class RALiveSpans { public: ASMJIT_NONCOPYABLE(RALiveSpans) - typedef typename T::DataType DataType; + //! \name Types + //! \{ + + using DataType = typename T::DataType; + + //! \} + + //! \name Members + //! \{ + ZoneVector _data; + //! \} + //! \name Construction & Destruction //! \{ @@ -504,12 +555,19 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _data.empty(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _data.size(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* data() noexcept { return _data.data(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return _data.data(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isOpen() const noexcept { uint32_t size = _data.size(); return size > 0 && _data[size - 1].b == RALiveInterval::kInf; @@ -523,12 +581,12 @@ public: ASMJIT_INLINE_NODEBUG void swap(RALiveSpans& other) noexcept { _data.swap(other._data); } //! Open the current live span. - ASMJIT_FORCE_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end) noexcept { + ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end) noexcept { bool wasOpen; return openAt(allocator, start, end, wasOpen); } - ASMJIT_FORCE_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end, bool& wasOpen) noexcept { + ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end, bool& wasOpen) noexcept { uint32_t size = _data.size(); wasOpen = false; @@ -544,7 +602,7 @@ public: return _data.append(allocator, T(start, end)); } - ASMJIT_FORCE_INLINE void closeAt(uint32_t end) noexcept { + ASMJIT_INLINE void closeAt(uint32_t end) noexcept { ASMJIT_ASSERT(!empty()); uint32_t size = _data.size(); @@ -561,14 +619,19 @@ public: return width; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG T& operator[](uint32_t index) noexcept { return _data[index]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const T& operator[](uint32_t index) const noexcept { return _data[index]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool intersects(const RALiveSpans& other) const noexcept { return intersects(*this, other); } - ASMJIT_FORCE_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans& x, const RALiveSpans& y, const DataType& yData) noexcept { + [[nodiscard]] + ASMJIT_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans& x, const RALiveSpans& y, const DataType& yData) noexcept { uint32_t finalSize = x.size() + y.size(); ASMJIT_PROPAGATE(_data.growingReserve(allocator, finalSize)); @@ -588,21 +651,24 @@ public: while (ySpan->b <= xa) { dstPtr->init(*ySpan, yData); dstPtr++; - if (++ySpan == yEnd) + if (++ySpan == yEnd) { goto Done; + } } ya = ySpan->a; while (xSpan->b <= ya) { *dstPtr++ = *xSpan; - if (++xSpan == xEnd) + if (++xSpan == xEnd) { goto Done; + } } // We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`. xa = xSpan->a; - if (ySpan->b > xa) + if (ySpan->b > xa) { return 0xFFFFFFFFu; + } } } @@ -621,7 +687,8 @@ public: return kErrorOk; } - static ASMJIT_FORCE_INLINE bool intersects(const RALiveSpans& x, const RALiveSpans& y) noexcept { + [[nodiscard]] + static ASMJIT_INLINE bool intersects(const RALiveSpans& x, const RALiveSpans& y) noexcept { const T* xSpan = x.data(); const T* ySpan = y.data(); @@ -630,26 +697,32 @@ public: // Loop until we have intersection or either `xSpan == xEnd` or `ySpan == yEnd`, which means that there is no // intersection. We advance either `xSpan` or `ySpan` depending on their end positions. - if (xSpan == xEnd || ySpan == yEnd) + if (xSpan == xEnd || ySpan == yEnd) { return false; + } uint32_t xa, ya; xa = xSpan->a; for (;;) { - while (ySpan->b <= xa) - if (++ySpan == yEnd) + while (ySpan->b <= xa) { + if (++ySpan == yEnd) { return false; + } + } ya = ySpan->a; - while (xSpan->b <= ya) - if (++xSpan == xEnd) + while (xSpan->b <= ya) { + if (++xSpan == xEnd) { return false; + } + } // We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`. xa = xSpan->a; - if (ySpan->b > xa) + if (ySpan->b > xa) { return true; + } } } @@ -666,8 +739,13 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t width() const noexcept { return _width; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG float freq() const noexcept { return _freq; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG float priority() const noexcept { return _priority; } //! \} @@ -681,12 +759,15 @@ struct LiveRegData { ASMJIT_INLINE_NODEBUG void init(const LiveRegData& other) noexcept { id = other.id; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator==(const LiveRegData& other) const noexcept { return id == other.id; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const LiveRegData& other) const noexcept { return id != other.id; } }; -typedef RALiveSpan LiveRegSpan; -typedef RALiveSpans LiveRegSpans; +using LiveRegSpan = RALiveSpan; +using LiveRegSpans = RALiveSpans; //! Flags used by \ref RATiedReg. //! @@ -875,53 +956,86 @@ struct RATiedReg { //! \{ //! Returns the associated WorkReg id. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t workId() const noexcept { return _workId; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasConsecutiveParent() const noexcept { return _consecutiveParent != Globals::kInvalidId; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t consecutiveParent() const noexcept { return _consecutiveParent; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t consecutiveData() const noexcept { return consecutiveDataFromFlags(_flags); } //! Returns TiedReg flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedFlags flags() const noexcept { return _flags; } + //! Checks if the given `flag` is set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); } + //! Adds tied register flags. ASMJIT_INLINE_NODEBUG void addFlags(RATiedFlags flags) noexcept { _flags |= flags; } //! Tests whether the register is read (writes `true` also if it's Read/Write). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasFlag(RATiedFlags::kRead); } + //! Tests whether the register is written (writes `true` also if it's Read/Write). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasFlag(RATiedFlags::kWrite); } + //! Tests whether the register is read only. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kRead; } + //! Tests whether the register is write only. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kWrite; } + //! Tests whether the register is read and written. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kRW; } //! Tests whether the tied register has use operand (Read/ReadWrite). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isUse() const noexcept { return hasFlag(RATiedFlags::kUse); } + //! Tests whether the tied register has out operand (Write). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isOut() const noexcept { return hasFlag(RATiedFlags::kOut); } //! Tests whether the tied register has \ref RATiedFlags::kLeadConsecutive flag set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLeadConsecutive() const noexcept { return hasFlag(RATiedFlags::kLeadConsecutive); } + //! Tests whether the tied register has \ref RATiedFlags::kUseConsecutive flag set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isUseConsecutive() const noexcept { return hasFlag(RATiedFlags::kUseConsecutive); } + //! Tests whether the tied register has \ref RATiedFlags::kOutConsecutive flag set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isOutConsecutive() const noexcept { return hasFlag(RATiedFlags::kOutConsecutive); } //! Tests whether the tied register must be unique (cannot be allocated to any other allocated register). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasFlag(RATiedFlags::kUnique); } //! Tests whether the tied register has any consecutive flag. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAnyConsecutiveFlag() const noexcept { return hasFlag(RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive); } //! Tests whether the USE slot can be patched to memory operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasUseRM() const noexcept { return hasFlag(RATiedFlags::kUseRM); } + //! Tests whether the OUT slot can be patched to memory operand. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasOutRM() const noexcept { return hasFlag(RATiedFlags::kOutRM); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; } inline void makeReadOnly() noexcept { @@ -937,46 +1051,70 @@ struct RATiedReg { } //! Tests whether the register would duplicate. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isDuplicate() const noexcept { return hasFlag(RATiedFlags::kDuplicate); } //! Tests whether the register (and the instruction it's part of) appears last in the basic block. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLast() const noexcept { return hasFlag(RATiedFlags::kLast); } + //! Tests whether the register should be killed after USEd and/or OUTed. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isKill() const noexcept { return hasFlag(RATiedFlags::kKill); } //! Tests whether the register is OUT or KILL (used internally by local register allocator). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isOutOrKill() const noexcept { return hasFlag(RATiedFlags::kOut | RATiedFlags::kKill); } //! Returns a register mask that describes allocable USE registers (Read/ReadWrite access). + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask useRegMask() const noexcept { return _useRegMask; } + //! Returns a register mask that describes allocable OUT registers (WriteOnly access). + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask outRegMask() const noexcept { return _outRegMask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t refCount() const noexcept { return _refCount; } + ASMJIT_INLINE_NODEBUG void addRefCount(uint32_t n = 1) noexcept { _refCount = uint8_t(_refCount + n); } //! Tests whether the register must be allocated to a fixed physical register before it's used. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasUseId() const noexcept { return _useId != BaseReg::kIdBad; } + //! Tests whether the register must be allocated to a fixed physical register before it's written. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasOutId() const noexcept { return _outId != BaseReg::kIdBad; } //! Returns a physical register id used for 'use' operation. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t useId() const noexcept { return _useId; } + //! Returns a physical register id used for 'out' operation. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t outId() const noexcept { return _outId; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t useRewriteMask() const noexcept { return _useRewriteMask; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t outRewriteMask() const noexcept { return _outRewriteMask; } //! Sets a physical register used for 'use' operation. ASMJIT_INLINE_NODEBUG void setUseId(uint32_t index) noexcept { _useId = uint8_t(index); } + //! Sets a physical register used for 'out' operation. ASMJIT_INLINE_NODEBUG void setOutId(uint32_t index) noexcept { _outId = uint8_t(index); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isUseDone() const noexcept { return hasFlag(RATiedFlags::kUseDone); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isOutDone() const noexcept { return hasFlag(RATiedFlags::kOutDone); } ASMJIT_INLINE_NODEBUG void markUseDone() noexcept { addFlags(RATiedFlags::kUseDone); } + ASMJIT_INLINE_NODEBUG void markOutDone() noexcept { addFlags(RATiedFlags::kOutDone); } //! \} @@ -1024,13 +1162,8 @@ public: //! \name Constants //! \{ - enum : uint32_t { - kIdNone = 0xFFFFFFFFu - }; - - enum : uint32_t { - kNoArgIndex = 0xFFu - }; + static inline constexpr uint32_t kIdNone = 0xFFFFFFFFu; + static inline constexpr uint32_t kNoArgIndex = 0xFFu; //! \} @@ -1128,22 +1261,38 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t workId() const noexcept { return _workId; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t virtId() const noexcept { return _virtId; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _virtReg->name(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _virtReg->nameSize(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _virtReg->typeId(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAWorkRegFlags flags() const noexcept { return _flags; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(RAWorkRegFlags flag) const noexcept { return Support::test(_flags, flag); } + ASMJIT_INLINE_NODEBUG void addFlags(RAWorkRegFlags flags) noexcept { _flags |= flags; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return hasFlag(RAWorkRegFlags::kAllocated); } + ASMJIT_INLINE_NODEBUG void markAllocated() noexcept { addFlags(RAWorkRegFlags::kAllocated); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isWithinSingleBasicBlock() const noexcept { return !hasFlag(RAWorkRegFlags::kMultipleBasicBlocks); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t singleBasicBlockId() const noexcept { return _singleBasicBlockId; } //! Called when this register appeared in a basic block having `blockId`. @@ -1160,43 +1309,77 @@ public: addFlags(RAWorkRegFlags::kMultipleBasicBlocks); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLeadConsecutive() const noexcept { return hasFlag(RAWorkRegFlags::kLeadConsecutive); } + ASMJIT_INLINE_NODEBUG void markLeadConsecutive() noexcept { addFlags(RAWorkRegFlags::kLeadConsecutive); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isProcessedConsecutive() const noexcept { return hasFlag(RAWorkRegFlags::kProcessedConsecutive); } + ASMJIT_INLINE_NODEBUG void markProcessedConsecutive() noexcept { addFlags(RAWorkRegFlags::kProcessedConsecutive); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isStackUsed() const noexcept { return hasFlag(RAWorkRegFlags::kStackUsed); } + ASMJIT_INLINE_NODEBUG void markStackUsed() noexcept { addFlags(RAWorkRegFlags::kStackUsed); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isStackPreferred() const noexcept { return hasFlag(RAWorkRegFlags::kStackPreferred); } + ASMJIT_INLINE_NODEBUG void markStackPreferred() noexcept { addFlags(RAWorkRegFlags::kStackPreferred); } //! Tests whether this RAWorkReg has been coalesced with another one (cannot be used anymore). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isCoalesced() const noexcept { return hasFlag(RAWorkRegFlags::kCoalesced); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG VirtReg* virtReg() const noexcept { return _virtReg; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasTiedReg() const noexcept { return _tiedReg != nullptr; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedReg* tiedReg() const noexcept { return _tiedReg; } + ASMJIT_INLINE_NODEBUG void setTiedReg(RATiedReg* tiedReg) noexcept { _tiedReg = tiedReg; } + ASMJIT_INLINE_NODEBUG void resetTiedReg() noexcept { _tiedReg = nullptr; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return _stackSlot != nullptr; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAStackSlot* stackSlot() const noexcept { return _stackSlot; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG LiveRegSpans& liveSpans() noexcept { return _liveSpans; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const LiveRegSpans& liveSpans() const noexcept { return _liveSpans; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RALiveStats& liveStats() noexcept { return _liveStats; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RALiveStats& liveStats() const noexcept { return _liveStats; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasArgIndex() const noexcept { return _argIndex != kNoArgIndex; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t argIndex() const noexcept { return _argIndex; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t argValueIndex() const noexcept { return _argValueIndex; } inline void setArgIndex(uint32_t argIndex, uint32_t valueIndex) noexcept { @@ -1204,39 +1387,71 @@ public: _argValueIndex = uint8_t(valueIndex); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasHomeRegId() const noexcept { return _homeRegId != BaseReg::kIdBad; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t homeRegId() const noexcept { return _homeRegId; } + ASMJIT_INLINE_NODEBUG void setHomeRegId(uint32_t physId) noexcept { _homeRegId = uint8_t(physId); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasHintRegId() const noexcept { return _hintRegId != BaseReg::kIdBad; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t hintRegId() const noexcept { return _hintRegId; } + ASMJIT_INLINE_NODEBUG void setHintRegId(uint32_t physId) noexcept { _hintRegId = uint8_t(physId); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask useIdMask() const noexcept { return _useIdMask; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasUseIdMask() const noexcept { return _useIdMask != 0u; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasMultipleUseIds() const noexcept { return _useIdMask != 0u && !Support::isPowerOf2(_useIdMask); } + ASMJIT_INLINE_NODEBUG void addUseIdMask(RegMask mask) noexcept { _useIdMask |= mask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask preferredMask() const noexcept { return _preferredMask; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasPreferredMask() const noexcept { return _preferredMask != 0xFFFFFFFFu; } + ASMJIT_INLINE_NODEBUG void restrictPreferredMask(RegMask mask) noexcept { _preferredMask &= mask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask consecutiveMask() const noexcept { return _consecutiveMask; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasConsecutiveMask() const noexcept { return _consecutiveMask != 0xFFFFFFFFu; } + ASMJIT_INLINE_NODEBUG void restrictConsecutiveMask(RegMask mask) noexcept { _consecutiveMask &= mask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask clobberSurvivalMask() const noexcept { return _clobberSurvivalMask; } + ASMJIT_INLINE_NODEBUG void addClobberSurvivalMask(RegMask mask) noexcept { _clobberSurvivalMask |= mask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask allocatedMask() const noexcept { return _allocatedMask; } + ASMJIT_INLINE_NODEBUG void addAllocatedMask(RegMask mask) noexcept { _allocatedMask |= mask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t regByteMask() const noexcept { return _regByteMask; } + ASMJIT_INLINE_NODEBUG void setRegByteMask(uint64_t mask) noexcept { _regByteMask = mask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasImmediateConsecutives() const noexcept { return !_immediateConsecutives.empty(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneBitVector& immediateConsecutives() const noexcept { return _immediateConsecutives; } + [[nodiscard]] inline Error addImmediateConsecutive(ZoneAllocator* allocator, uint32_t workId) noexcept { if (_immediateConsecutives.size() <= workId) ASMJIT_PROPAGATE(_immediateConsecutives.resize(allocator, workId + 1)); diff --git a/src/asmjit/core/ralocal.cpp b/src/asmjit/core/ralocal.cpp index d5fbb3b..78f5c02 100644 --- a/src/asmjit/core/ralocal.cpp +++ b/src/asmjit/core/ralocal.cpp @@ -14,7 +14,7 @@ ASMJIT_BEGIN_NAMESPACE // RALocalAllocator - Utilities // ============================ -static ASMJIT_FORCE_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept { +static ASMJIT_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept { for (size_t i = 0; i < count; i++) if (tiedRegs[i].workId() == workId) return &tiedRegs[i]; @@ -65,24 +65,28 @@ Error RALocalAllocator::makeInitialAssignment() noexcept { for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { // Unassigned argument. const RegOnly& regArg = func->argPack(argIndex)[valueIndex]; - if (!regArg.isReg() || !_cc->isVirtIdValid(regArg.id())) + if (!regArg.isReg() || !_cc->isVirtIdValid(regArg.id())) { continue; + } VirtReg* virtReg = _cc->virtRegById(regArg.id()); // Unreferenced argument. RAWorkReg* workReg = virtReg->workReg(); - if (!workReg) + if (!workReg) { continue; + } // Overwritten argument. uint32_t workId = workReg->workId(); - if (!liveIn.bitAt(workId)) + if (!liveIn.bitAt(workId)) { continue; + } RegGroup group = workReg->group(); - if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone) + if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone) { continue; + } RegMask allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group); if (iter == 0) { @@ -109,8 +113,9 @@ Error RALocalAllocator::makeInitialAssignment() noexcept { // This register will definitely need stack, create the slot now and assign also `argIndex` // to it. We will patch `_argsAssignment` later after RAStackAllocator finishes. RAStackSlot* slot = _pass->getOrCreateStackSlot(workReg); - if (ASMJIT_UNLIKELY(!slot)) + if (ASMJIT_UNLIKELY(!slot)) { return DebugUtils::errored(kErrorOutOfMemory); + } // This means STACK_ARG may be moved to STACK. workReg->addFlags(RAWorkRegFlags::kStackArgToStack); @@ -182,8 +187,9 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons while (affectedRegs) { if (++runId == 2) { - if (!tryMode) + if (!tryMode) { return DebugUtils::errored(kErrorInvalidState); + } // Stop in `tryMode` if we haven't done anything in past two rounds. break; @@ -204,12 +210,14 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons // Both assigned. if (curWorkId != dstWorkId) { // Wait a bit if this is the first run, we may avoid this if `curWorkId` moves out. - if (runId <= 0) + if (runId <= 0) { continue; + } uint32_t altPhysId = cur.workToPhysId(group, dstWorkId); - if (altPhysId == RAAssignment::kPhysNone) + if (altPhysId == RAAssignment::kPhysNone) { continue; + } // Reset as we will do some changes to the current assignment. runId = -1; @@ -220,14 +228,15 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons else { // SPILL the reg if it's not dirty in DST, otherwise try to MOVE. if (!cur.isPhysDirty(group, physId)) { - ASMJIT_PROPAGATE(onKillReg(group, curWorkId, physId)); + onKillReg(group, curWorkId, physId); } else { RegMask allocableRegs = _pass->_availableRegs[group] & ~cur.assigned(group); // If possible don't conflict with assigned regs at DST. - if (allocableRegs & ~dst.assigned(group)) + if (allocableRegs & ~dst.assigned(group)) { allocableRegs &= ~dst.assigned(group); + } if (allocableRegs) { // MOVE is possible, thus preferred. @@ -251,8 +260,9 @@ Cleared: // DST assigned, CUR unassigned. uint32_t altPhysId = cur.workToPhysId(group, dstWorkId); if (altPhysId == RAAssignment::kPhysNone) { - if (liveIn.bitAt(dstWorkId)) + if (liveIn.bitAt(dstWorkId)) { willLoadRegs |= physMask; // Scheduled for `onLoadReg()`. + } affectedRegs &= ~physMask; // Unaffected from now. continue; } @@ -267,10 +277,12 @@ Cleared: // If `dstReadOnly` is true it means that that block was already processed and we cannot change from // CLEAN to DIRTY. In that case the register has to be saved as it cannot enter the block DIRTY. - if (dstReadOnly) + if (dstReadOnly) { ASMJIT_PROPAGATE(onSaveReg(group, dstWorkId, physId)); - else + } + else { dst.makeDirty(group, dstWorkId, physId); + } } else { // DST dirty, CUR not dirty (the assert is just to visualize the condition). @@ -306,8 +318,9 @@ Cleared: ASMJIT_ASSERT(liveIn.bitAt(workId) == true); ASMJIT_PROPAGATE(onLoadReg(group, workId, physId)); - if (dst.isPhysDirty(group, physId)) + if (dst.isPhysDirty(group, physId)) { cur.makeDirty(group, workId, physId); + } ASMJIT_ASSERT(dst.isPhysDirty(group, physId) == cur.isPhysDirty(group, physId)); } else { @@ -413,19 +426,22 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { if (tiedReg->hasAnyConsecutiveFlag()) { uint32_t consecutiveOffset = tiedReg->isLeadConsecutive() ? uint32_t(0) : tiedReg->consecutiveData(); - if (ASMJIT_UNLIKELY(Support::bitTest(consecutiveMask, consecutiveOffset))) + if (ASMJIT_UNLIKELY(Support::bitTest(consecutiveMask, consecutiveOffset))) { return DebugUtils::errored(kErrorInvalidState); + } consecutiveMask |= Support::bitMask(consecutiveOffset); consecutiveRegs[consecutiveOffset] = tiedReg; } // Add OUT and KILL to `outPending` for CLOBBERing and/or OUT assignment. - if (tiedReg->isOutOrKill()) + if (tiedReg->isOutOrKill()) { outTiedRegs[outTiedCount++] = tiedReg; + } - if (tiedReg->isDuplicate()) + if (tiedReg->isDuplicate()) { dupTiedRegs[dupTiedCount++] = tiedReg; + } if (!tiedReg->isUse()) { tiedReg->markUseDone(); @@ -434,8 +450,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { } // Don't assign anything here if this is a consecutive USE - we will handle this in STEP 2 instead. - if (tiedReg->isUseConsecutive()) + if (tiedReg->isUseConsecutive()) { continue; + } uint32_t workId = tiedReg->workId(); uint32_t assignedId = _curAssignment.workToPhysId(group, workId); @@ -450,8 +467,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { if (assignedId == tiedReg->useId()) { // If the register is already allocated in this one, mark it done and continue. tiedReg->markUseDone(); - if (tiedReg->isWrite()) + if (tiedReg->isWrite()) { _curAssignment.makeDirty(group, workId, assignedId); + } usePending--; willUse |= useMask; } @@ -467,8 +485,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { if ((allocableRegs & ~willUse) & assignedMask) { tiedReg->setUseId(assignedId); tiedReg->markUseDone(); - if (tiedReg->isWrite()) + if (tiedReg->isWrite()) { _curAssignment.makeDirty(group, workId, assignedId); + } usePending--; willUse |= assignedMask; } @@ -488,8 +507,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { uint32_t consecutiveCount = 0; if (consecutiveMask) { - if ((consecutiveMask & (consecutiveMask + 1u)) != 0) + if ((consecutiveMask & (consecutiveMask + 1u)) != 0) { return DebugUtils::errored(kErrorInvalidState); + } // Count of trailing ones is the count of consecutive registers. There cannot be gap. consecutiveCount = Support::ctz(~consecutiveMask); @@ -505,8 +525,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { uint32_t assignments[kMaxConsecutiveRegs]; - for (i = 0; i < consecutiveCount; i++) + for (i = 0; i < consecutiveCount; i++) { assignments[i] = _curAssignment.workToPhysId(group, consecutiveRegs[i]->workId()); + } Support::BitWordIterator it(lead->useRegMask()); while (it.hasNext()) { @@ -533,8 +554,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { } } - if (bestLeadReg == 0xFFFFFFFF) + if (bestLeadReg == 0xFFFFFFFF) { return DebugUtils::errored(kErrorConsecutiveRegsAllocation); + } for (i = 0; i < consecutiveCount; i++) { uint32_t consecutiveIndex = bestLeadReg + i; @@ -550,8 +572,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { if (assignedId == consecutiveIndex) { // If the register is already allocated in this one, mark it done and continue. tiedReg->markUseDone(); - if (tiedReg->isWrite()) + if (tiedReg->isWrite()) { _curAssignment.makeDirty(group, workId, assignedId); + } usePending--; willUse |= useMask; } @@ -578,8 +601,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { for (i = 0; i < count; i++) { RATiedReg* tiedReg = &tiedRegs[i]; - if (tiedReg->isUseDone()) + if (tiedReg->isUseDone()) { continue; + } uint32_t workId = tiedReg->workId(); uint32_t assignedId = _curAssignment.workToPhysId(group, workId); @@ -630,8 +654,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { if (!(liveRegs & useMask)) { ASMJIT_PROPAGATE(onMoveReg(group, workId, useId, assignedId)); tiedReg->markUseDone(); - if (tiedReg->isWrite()) + if (tiedReg->isWrite()) { _curAssignment.makeDirty(group, workId, useId); + } usePending--; } } @@ -640,8 +665,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { if (!(liveRegs & useMask)) { ASMJIT_PROPAGATE(onLoadReg(group, workId, useId)); tiedReg->markUseDone(); - if (tiedReg->isWrite()) + if (tiedReg->isWrite()) { _curAssignment.makeDirty(group, workId, useId); + } usePending--; } } @@ -743,8 +769,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { continue; } - if (!mustSwap) + if (!mustSwap) { continue; + } // Only branched here if the previous iteration did nothing. This is essentially a SWAP operation without // having a dedicated instruction for that purpose (vector registers, etc...). The simplest way to handle @@ -805,7 +832,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { // Must check if it's allocated as KILL can be related to OUT (like KILL immediately after OUT, which could // mean the register is not assigned). if (physId != RAAssignment::kPhysNone) { - ASMJIT_PROPAGATE(onKillReg(group, workId, physId)); + onKillReg(group, workId, physId); willOut &= ~Support::bitMask(physId); } @@ -851,7 +878,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { if (dstId == srcId) { continue; } - _pass->emitMove(workId, dstId, srcId); + ASMJIT_PROPAGATE(_pass->emitMove(workId, dstId, srcId)); } } @@ -947,18 +974,20 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept { // Allocate OUT registers. for (i = 0; i < outTiedCount; i++) { RATiedReg* tiedReg = outTiedRegs[i]; - if (!tiedReg->isOut()) + if (!tiedReg->isOut()) { continue; + } RegMask avoidOut = avoidRegs; - if (tiedReg->isUnique()) + if (tiedReg->isUnique()) { avoidOut |= willUse; + } uint32_t workId = tiedReg->workId(); uint32_t assignedId = _curAssignment.workToPhysId(group, workId); if (assignedId != RAAssignment::kPhysNone) { - ASMJIT_PROPAGATE(onKillReg(group, workId, assignedId)); + onKillReg(group, workId, assignedId); } uint32_t physId = tiedReg->outId(); @@ -1087,16 +1116,18 @@ Error RALocalAllocator::allocJumpTable(InstNode* node, const RABlocks& targets, // TODO: Do we really need to use `cont`? DebugUtils::unused(cont); - if (targets.empty()) + if (targets.empty()) { return DebugUtils::errored(kErrorInvalidState); + } // The cursor must point to the previous instruction for a possible instruction insertion. _cc->_setCursor(node->prev()); // All `targets` should have the same sharedAssignmentId, we just read the first. RABlock* anyTarget = targets[0]; - if (!anyTarget->hasSharedAssignmentId()) + if (!anyTarget->hasSharedAssignmentId()) { return DebugUtils::errored(kErrorInvalidState); + } RASharedAssignment& sharedAssignment = _pass->_sharedAssignments[anyTarget->sharedAssignmentId()]; diff --git a/src/asmjit/core/ralocal_p.h b/src/asmjit/core/ralocal_p.h index 7fd00f6..02beabe 100644 --- a/src/asmjit/core/ralocal_p.h +++ b/src/asmjit/core/ralocal_p.h @@ -80,30 +80,47 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); } //! Returns the currently processed block. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; } + //! Sets the currently processed block. ASMJIT_INLINE_NODEBUG void setBlock(RABlock* block) noexcept { _block = block; } //! Returns the currently processed `InstNode`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstNode* node() const noexcept { return _node; } + //! Returns the currently processed `RAInst`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAInst* raInst() const noexcept { return _raInst; } //! Returns all tied regs as `RATiedReg` array. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); } + //! Returns tied registers grouped by the given `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return _raInst->tiedRegs(group); } //! Returns count of all TiedRegs used by the instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; } + //! Returns count of TiedRegs used by the given register `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount.get(group); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isGroupUsed(RegGroup group) const noexcept { return _tiedCount[group] != 0; } //! \} @@ -111,8 +128,10 @@ public: //! \name Assignment //! \{ + [[nodiscard]] Error makeInitialAssignment() noexcept; + [[nodiscard]] Error replaceAssignment(const PhysToWorkMap* physToWorkMap) noexcept; //! Switch to the given assignment by reassigning all register and emitting code that reassigns them. @@ -121,12 +140,15 @@ public: //! If `tryMode` is true then the final assignment doesn't have to be exactly same as specified by `dstPhysToWorkMap` //! and `dstWorkToPhysMap`. This mode is only used before conditional jumps that already have assignment to generate //! a code sequence that is always executed regardless of the flow. + [[nodiscard]] Error switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, const ZoneBitVector& liveIn, bool dstReadOnly, bool tryMode) noexcept; + [[nodiscard]] ASMJIT_INLINE_NODEBUG Error spillRegsBeforeEntry(RABlock* block) noexcept { return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs()); } + [[nodiscard]] Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept; //! \} @@ -134,10 +156,16 @@ public: //! \name Allocation //! \{ + [[nodiscard]] Error allocInst(InstNode* node) noexcept; + + [[nodiscard]] Error spillAfterAllocation(InstNode* node) noexcept; + [[nodiscard]] Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept; + + [[nodiscard]] Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept; //! \} @@ -150,11 +178,13 @@ public: kCostOfDirtyFlag = kCostOfFrequency / 4 }; + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t costByFrequency(float freq) const noexcept { return uint32_t(int32_t(freq * float(kCostOfFrequency))); } - ASMJIT_FORCE_INLINE uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept { + [[nodiscard]] + ASMJIT_INLINE uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept { RAWorkReg* workReg = workRegById(workId); uint32_t cost = costByFrequency(workReg->liveStats().freq()); @@ -164,7 +194,8 @@ public: return cost; } - ASMJIT_FORCE_INLINE uint32_t pickBestSuitableRegister(RegGroup group, RegMask allocableRegs) const noexcept { + [[nodiscard]] + ASMJIT_INLINE uint32_t pickBestSuitableRegister(RegGroup group, RegMask allocableRegs) const noexcept { // These are registers must be preserved by the function itself. RegMask preservedRegs = _funcPreservedRegs[group]; @@ -177,6 +208,7 @@ public: } //! Decides on register assignment. + [[nodiscard]] uint32_t decideOnAssignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept; //! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated in a physical register that have @@ -185,9 +217,11 @@ public: //! The function must return either `RAAssignment::kPhysNone`, which means that the WorkReg of `workId` should be //! spilled, or a valid physical register ID, which means that the register should be moved to that physical register //! instead. + [[nodiscard]] uint32_t decideOnReassignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs, RAInst* raInst) const noexcept; //! Decides on best spill given a register mask `spillableRegs` + [[nodiscard]] uint32_t decideOnSpillFor(RegGroup group, uint32_t workId, RegMask spillableRegs, uint32_t* spillWorkId) const noexcept; //! \} @@ -197,6 +231,7 @@ public: //! Emits a move between a destination and source register, and fixes the //! register assignment. + [[nodiscard]] inline Error onMoveReg(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept { if (dstPhysId == srcPhysId) { return kErrorOk; @@ -209,6 +244,7 @@ public: //! Emits a swap between two physical registers and fixes their assignment. //! //! \note Target must support this operation otherwise this would ASSERT. + [[nodiscard]] inline Error onSwapReg(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept { _curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId); return _pass->emitSwap(aWorkId, aPhysId, bWorkId, bPhysId); @@ -216,6 +252,7 @@ public: //! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register //! and makes it assigned and clean. + [[nodiscard]] inline Error onLoadReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { _curAssignment.assign(group, workId, physId, RAAssignment::kClean); return _pass->emitLoad(workId, physId); @@ -223,6 +260,7 @@ public: //! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot, //! keeps it assigned, and makes it clean. + [[nodiscard]] inline Error onSaveReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId); ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId); @@ -232,26 +270,29 @@ public: } //! Assigns a register, the content of it is undefined at this point. + [[nodiscard]] inline Error onAssignReg(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept { _curAssignment.assign(group, workId, physId, dirty); return kErrorOk; } //! Spills a variable/register, saves the content to the memory-home if modified. + [[nodiscard]] inline Error onSpillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { if (_curAssignment.isPhysDirty(group, physId)) ASMJIT_PROPAGATE(onSaveReg(group, workId, physId)); - return onKillReg(group, workId, physId); + onKillReg(group, workId, physId); + return kErrorOk; } + [[nodiscard]] inline Error onDirtyReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { _curAssignment.makeDirty(group, workId, physId); return kErrorOk; } - inline Error onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { + inline void onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { _curAssignment.unassign(group, workId, physId); - return kErrorOk; } //! \} diff --git a/src/asmjit/core/rapass.cpp b/src/asmjit/core/rapass.cpp index 4a8130e..573fcb1 100644 --- a/src/asmjit/core/rapass.cpp +++ b/src/asmjit/core/rapass.cpp @@ -251,8 +251,9 @@ RABlock* BaseRAPass::newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stopped if (block) { // Exit node has always a block associated with it. If we went here it means that `cbLabel` passed here // is after the end of the function and cannot be merged with the function exit block. - if (node == func->exitNode()) + if (node == func->exitNode()) { block = nullptr; + } break; } @@ -273,8 +274,9 @@ RABlock* BaseRAPass::newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stopped if (!block) { block = newBlock(); - if (ASMJIT_UNLIKELY(!block)) + if (ASMJIT_UNLIKELY(!block)) { return nullptr; + } } cbLabel->setPassData(block); @@ -402,13 +404,15 @@ Error BaseRAPass::buildCFGViews() noexcept { for (;;) { for (;;) { - if (i >= current->successors().size()) + if (i >= current->successors().size()) { break; + } // Skip if already visited. RABlock* child = current->successors()[i++]; - if (visited.bitAt(child->blockId())) + if (visited.bitAt(child->blockId())) { continue; + } // Mark as visited to prevent visiting the same block multiple times. visited.setBit(child->blockId(), true); @@ -423,8 +427,9 @@ Error BaseRAPass::buildCFGViews() noexcept { current->_povOrder = _pov.size(); _pov.appendUnsafe(current); - if (stack.empty()) + if (stack.empty()) { break; + } RABlockVisitItem top = stack.pop(); current = top.block(); @@ -454,7 +459,7 @@ Error BaseRAPass::buildCFGViews() noexcept { // BaseRAPass - CFG - Dominators // ============================= -static ASMJIT_FORCE_INLINE RABlock* intersectBlocks(RABlock* b1, RABlock* b2) noexcept { +static ASMJIT_INLINE RABlock* intersectBlocks(RABlock* b1, RABlock* b2) noexcept { while (b1 != b2) { while (b2->povOrder() > b1->povOrder()) b1 = b1->iDom(); while (b1->povOrder() > b2->povOrder()) b2 = b2->iDom(); @@ -469,8 +474,9 @@ Error BaseRAPass::buildCFGDominators() noexcept { ASMJIT_RA_LOG_FORMAT("[BuildCFGDominators]\n"); #endif - if (_blocks.empty()) + if (_blocks.empty()) { return kErrorOk; + } RABlock* entryBlock = this->entryBlock(); entryBlock->setIDom(entryBlock); @@ -491,8 +497,9 @@ Error BaseRAPass::buildCFGDominators() noexcept { uint32_t i = _pov.size(); while (i) { RABlock* block = _pov[--i]; - if (block == entryBlock) + if (block == entryBlock) { continue; + } RABlock* iDom = nullptr; const RABlocks& preds = block->predecessors(); @@ -500,8 +507,9 @@ Error BaseRAPass::buildCFGDominators() noexcept { uint32_t j = preds.size(); while (j) { RABlock* p = preds[--j]; - if (!p->iDom()) + if (!p->iDom()) { continue; + } iDom = !iDom ? p : intersectBlocks(iDom, p); } @@ -525,12 +533,14 @@ bool BaseRAPass::_strictlyDominates(const RABlock* a, const RABlock* b) const no // Nothing strictly dominates the entry block. const RABlock* entryBlock = this->entryBlock(); - if (a == entryBlock) + if (a == entryBlock) { return false; + } const RABlock* iDom = b->iDom(); - while (iDom != a && iDom != entryBlock) + while (iDom != a && iDom != entryBlock) { iDom = iDom->iDom(); + } return iDom != entryBlock; } @@ -540,16 +550,19 @@ const RABlock* BaseRAPass::_nearestCommonDominator(const RABlock* a, const RABlo ASMJIT_ASSERT(b != nullptr); // called, as both `a` and `b` must be valid blocks. ASMJIT_ASSERT(a != b); // Checked by `dominates()` and `properlyDominates()`. - if (a == b) + if (a == b) { return a; + } // If `a` strictly dominates `b` then `a` is the nearest common dominator. - if (_strictlyDominates(a, b)) + if (_strictlyDominates(a, b)) { return a; + } // If `b` strictly dominates `a` then `b` is the nearest common dominator. - if (_strictlyDominates(b, a)) + if (_strictlyDominates(b, a)) { return b; + } const RABlock* entryBlock = this->entryBlock(); uint64_t timestamp = nextTimestamp(); @@ -564,8 +577,9 @@ const RABlock* BaseRAPass::_nearestCommonDominator(const RABlock* a, const RABlo // Check all B's dominators against marked dominators of A. block = b->iDom(); while (block != entryBlock) { - if (block->hasTimestamp(timestamp)) + if (block->hasTimestamp(timestamp)) { return block; + } block = block->iDom(); } @@ -580,8 +594,9 @@ Error BaseRAPass::removeUnreachableCode() noexcept { uint32_t numReachableBlocks = reachableBlockCount(); // All reachable -> nothing to do. - if (numAllBlocks == numReachableBlocks) + if (numAllBlocks == numReachableBlocks) { return kErrorOk; + } #ifndef ASMJIT_NO_LOGGING StringTmp<256> sb; @@ -591,8 +606,9 @@ Error BaseRAPass::removeUnreachableCode() noexcept { for (uint32_t i = 0; i < numAllBlocks; i++) { RABlock* block = _blocks[i]; - if (block->isReachable()) + if (block->isReachable()) { continue; + } ASMJIT_RA_LOG_FORMAT(" Removing code from unreachable block {%u}\n", i); BaseNode* first = block->first(); @@ -632,22 +648,26 @@ Error BaseRAPass::removeUnreachableCode() noexcept { } BaseNode* BaseRAPass::findSuccessorStartingAt(BaseNode* node) noexcept { - while (node && (node->isInformative() || node->hasNoEffect())) + while (node && (node->isInformative() || node->hasNoEffect())) { node = node->next(); + } return node; } bool BaseRAPass::isNextTo(BaseNode* node, BaseNode* target) noexcept { for (;;) { node = node->next(); - if (node == target) + if (node == target) { return true; + } - if (!node) + if (!node) { return false; + } - if (node->isCode() || node->isData()) + if (node->isCode() || node->isData()) { return false; + } } } @@ -668,12 +688,14 @@ Error BaseRAPass::_asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept { ASMJIT_PROPAGATE(wRegsByGroup.willGrow(allocator())); RAWorkReg* wReg = zone()->newT(vReg, wRegs.size()); - if (ASMJIT_UNLIKELY(!wReg)) + if (ASMJIT_UNLIKELY(!wReg)) { return DebugUtils::errored(kErrorOutOfMemory); + } vReg->setWorkReg(wReg); - if (!vReg->isStack()) + if (!vReg->isStack()) { wReg->setRegByteMask(Support::lsbMask(vReg->virtSize())); + } wRegs.appendUnsafe(wReg); wRegsByGroup.appendUnsafe(wReg); @@ -696,8 +718,9 @@ RAAssignment::WorkToPhysMap* BaseRAPass::newWorkToPhysMap() noexcept { } WorkToPhysMap* map = zone()->allocT(size); - if (ASMJIT_UNLIKELY(!map)) + if (ASMJIT_UNLIKELY(!map)) { return nullptr; + } map->reset(count); return map; @@ -708,8 +731,9 @@ RAAssignment::PhysToWorkMap* BaseRAPass::newPhysToWorkMap() noexcept { size_t size = PhysToWorkMap::sizeOf(count); PhysToWorkMap* map = zone()->allocT(size); - if (ASMJIT_UNLIKELY(!map)) + if (ASMJIT_UNLIKELY(!map)) { return nullptr; + } map->reset(count); return map; @@ -719,17 +743,17 @@ RAAssignment::PhysToWorkMap* BaseRAPass::newPhysToWorkMap() noexcept { // ========================================================= namespace LiveOps { - typedef ZoneBitVector::BitWord BitWord; + using BitWord = ZoneBitVector::BitWord; struct In { - static ASMJIT_FORCE_INLINE BitWord op(BitWord dst, BitWord out, BitWord gen, BitWord kill) noexcept { + static ASMJIT_INLINE BitWord op(BitWord dst, BitWord out, BitWord gen, BitWord kill) noexcept { DebugUtils::unused(dst); return (out | gen) & ~kill; } }; template - static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, uint32_t n) noexcept { + static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, uint32_t n) noexcept { BitWord changed = 0; for (uint32_t i = 0; i < n; i++) { @@ -744,7 +768,7 @@ namespace LiveOps { } template - static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, uint32_t n) noexcept { + static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, uint32_t n) noexcept { BitWord changed = 0; for (uint32_t i = 0; i < n; i++) { @@ -759,7 +783,7 @@ namespace LiveOps { } template - static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, const BitWord* c, uint32_t n) noexcept { + static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, const BitWord* c, uint32_t n) noexcept { BitWord changed = 0; #if defined(_MSC_VER) && _MSC_VER <= 1938 @@ -787,12 +811,14 @@ namespace LiveOps { uint32_t numSuccessors = successors.size(); // Calculate `OUT` based on `IN` of all successors. - for (uint32_t i = 0; i < numSuccessors; i++) + for (uint32_t i = 0; i < numSuccessors; i++) { changed |= op(block->liveOut().data(), successors[i]->liveIn().data(), numBitWords); + } // Calculate `IN` based on `OUT`, `GEN`, and `KILL` bits. - if (changed) + if (changed) { changed = op(block->liveIn().data(), block->liveOut().data(), block->gen().data(), block->kill().data(), numBitWords); + } return changed; } @@ -858,10 +884,12 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept { // Mark as: // KILL - if this VirtReg is killed afterwards. // LAST - if this VirtReg is last in this basic block. - if (block->kill().bitAt(workId)) + if (block->kill().bitAt(workId)) { tiedReg->addFlags(RATiedFlags::kKill); - else if (!block->gen().bitAt(workId)) + } + else if (!block->gen().bitAt(workId)) { tiedReg->addFlags(RATiedFlags::kLast); + } if (tiedReg->isWriteOnly()) { // KILL. @@ -887,8 +915,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept { nInsts++; } - if (node == stop) + if (node == stop) { break; + } node = node->prev(); ASMJIT_ASSERT(node != nullptr); @@ -976,8 +1005,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept { for (i = 0; i < numAllBlocks; i++) { RABlock* block = _blocks[i]; - if (!block->isReachable()) + if (!block->isReachable()) { continue; + } uint32_t blockId = block->blockId(); @@ -1087,8 +1117,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept { maxLiveCount.op(raInst->_liveCount); } - if (node == stop) + if (node == stop) { break; + } node = node->next(); ASMJIT_ASSERT(node != nullptr); @@ -1134,22 +1165,26 @@ Error BaseRAPass::assignArgIndexToWorkRegs() noexcept { for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { // Unassigned argument. const RegOnly& regArg = func()->argPack(argIndex)[valueIndex]; - if (!regArg.isReg() || !cc()->isVirtIdValid(regArg.id())) + if (!regArg.isReg() || !cc()->isVirtIdValid(regArg.id())) { continue; + } VirtReg* virtReg = cc()->virtRegById(regArg.id()); - if (!virtReg) + if (!virtReg) { continue; + } // Unreferenced argument. RAWorkReg* workReg = virtReg->workReg(); - if (!workReg) + if (!workReg) { continue; + } // Overwritten argument. uint32_t workId = workReg->workId(); - if (!liveIn.bitAt(workId)) + if (!liveIn.bitAt(workId)) { continue; + } workReg->setArgIndex(argIndex, valueIndex); const FuncValue& arg = func()->detail().arg(argIndex, valueIndex); @@ -1172,7 +1207,9 @@ static void RAPass_dumpSpans(String& sb, uint32_t index, const LiveRegSpans& liv for (uint32_t i = 0; i < liveSpans.size(); i++) { const LiveRegSpan& liveSpan = liveSpans[i]; - if (i) sb.append(", "); + if (i) { + sb.append(", "); + } sb.appendFormat("[%u:%u@%u]", liveSpan.a, liveSpan.b, liveSpan.id); } @@ -1197,11 +1234,13 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::initGlobalLiveSpans() noexcept { if (physCount) { liveSpans = allocator()->allocT(physCount * sizeof(LiveRegSpans)); - if (ASMJIT_UNLIKELY(!liveSpans)) + if (ASMJIT_UNLIKELY(!liveSpans)) { return DebugUtils::errored(kErrorOutOfMemory); + } - for (size_t physId = 0; physId < physCount; physId++) + for (size_t physId = 0; physId < physCount; physId++) { new(Support::PlacementNew{&liveSpans[physId]}) LiveRegSpans(); + } } _globalLiveSpans[group] = liveSpans; @@ -1272,8 +1311,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { continue; } - if (err != 0xFFFFFFFFu) + if (err != 0xFFFFFFFFu) { return err; + } } } @@ -1294,8 +1334,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { i = 0; for (;;) { uint32_t stop = consecutiveRegs.size(); - if (i == stop) + if (i == stop) { break; + } while (i < stop) { RAWorkReg* workReg = consecutiveRegs[i].workReg; @@ -1317,8 +1358,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { uint32_t numConsecutiveRegs = consecutiveRegs.size(); for (i = 0; i < numConsecutiveRegs; i++) { RAWorkReg* workReg = consecutiveRegs[i].workReg; - if (workReg->isAllocated()) + if (workReg->isAllocated()) { continue; + } RAWorkReg* parentReg = consecutiveRegs[i].parentReg; RegMask physRegs = 0; @@ -1330,16 +1372,18 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { // NOTE: This should never be true as it would mean we would never allocate this virtual register // (not here, and not later when local register allocator processes RATiedReg sets). - if (ASMJIT_UNLIKELY(!physRegs)) + if (ASMJIT_UNLIKELY(!physRegs)) { return DebugUtils::errored(kErrorConsecutiveRegsAllocation); + } } } else if (parentReg->hasHomeRegId()) { uint32_t consecutiveId = parentReg->homeRegId() + 1; // NOTE: We don't support wrapping. If this goes beyond all allocable registers there is something wrong. - if (consecutiveId > 31 || !Support::bitTest(availableRegs, consecutiveId)) + if (consecutiveId > 31 || !Support::bitTest(availableRegs, consecutiveId)) { return DebugUtils::errored(kErrorConsecutiveRegsAllocation); + } workReg->setHintRegId(consecutiveId); physRegs = Support::bitMask(consecutiveId); @@ -1358,8 +1402,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { break; } - if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) + if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) { return err; + } physRegs ^= Support::bitMask(physId); } @@ -1373,20 +1418,23 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { for (i = 0; i < numWorkRegs; i++) { RAWorkReg* workReg = workRegs[i]; - if (workReg->isAllocated()) + if (workReg->isAllocated()) { continue; + } RegMask remainingPhysRegs = availableRegs; - if (remainingPhysRegs & workReg->preferredMask()) + if (remainingPhysRegs & workReg->preferredMask()) { remainingPhysRegs &= workReg->preferredMask(); + } RegMask physRegs = remainingPhysRegs & ~preservedRegs; remainingPhysRegs &= preservedRegs; for (;;) { if (!physRegs) { - if (!remainingPhysRegs) + if (!remainingPhysRegs) { break; + } physRegs = remainingPhysRegs; remainingPhysRegs = 0; } @@ -1396,8 +1444,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { if (workReg->clobberSurvivalMask()) { RegMask preferredMask = (physRegs | remainingPhysRegs) & workReg->clobberSurvivalMask(); if (preferredMask) { - if (preferredMask & ~remainingPhysRegs) + if (preferredMask & ~remainingPhysRegs) { preferredMask &= ~remainingPhysRegs; + } physId = Support::ctz(preferredMask); } } @@ -1412,16 +1461,18 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { break; } - if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) + if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) { return err; + } physRegs &= ~Support::bitMask(physId); remainingPhysRegs &= ~Support::bitMask(physId); } // Keep it in `workRegs` if it was not allocated. - if (!physRegs) + if (!physRegs) { workRegs[dstIndex++] = workReg; + } } workRegs._setSize(dstIndex); @@ -1431,8 +1482,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { ASMJIT_RA_LOG_COMPLEX({ for (uint32_t physId = 0; physId < physCount; physId++) { LiveRegSpans& live = _globalLiveSpans[group][physId]; - if (live.empty()) + if (live.empty()) { continue; + } sb.clear(); RAPass_dumpSpans(sb, physId, live); @@ -1448,8 +1500,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { } else { _strategy[group].setType(RAStrategyType::kComplex); - for (RAWorkReg* workReg : workRegs) + for (RAWorkReg* workReg : workRegs) { workReg->markStackPreferred(); + } ASMJIT_RA_LOG_COMPLEX({ uint32_t count = workRegs.size(); @@ -1457,7 +1510,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept { sb.appendFormat(" Unassigned (%u): ", count); for (i = 0; i < numWorkRegs; i++) { RAWorkReg* workReg = workRegs[i]; - if (i) sb.append(", "); + if (i) { + sb.append(", "); + } sb.append(workReg->name()); } sb.append('\n'); @@ -1475,8 +1530,9 @@ Error BaseRAPass::runLocalAllocator() noexcept { RALocalAllocator lra(this); ASMJIT_PROPAGATE(lra.init()); - if (!blockCount()) + if (!blockCount()) { return kErrorOk; + } // The allocation is done when this reaches zero. uint32_t blocksRemaining = reachableBlockCount(); @@ -1489,7 +1545,7 @@ Error BaseRAPass::runLocalAllocator() noexcept { ASMJIT_ASSERT(block->isReachable()); // Assign function arguments for the initial block. The `lra` is valid now. - lra.makeInitialAssignment(); + ASMJIT_PROPAGATE(lra.makeInitialAssignment()); ASMJIT_PROPAGATE(setBlockEntryAssignment(block, block, lra._curAssignment)); // The loop starts from the first block and iterates blocks in order, however, the algorithm also allows to jump to @@ -1503,10 +1559,7 @@ Error BaseRAPass::runLocalAllocator() noexcept { BaseNode* afterLast = last->next(); bool unconditionalJump = false; - RABlock* consecutive = nullptr; - - if (block->hasSuccessors()) - consecutive = block->successors()[0]; + RABlock* consecutive = block->hasSuccessors() ? block->successors()[0] : nullptr; lra.setBlock(block); block->makeAllocated(); @@ -1539,10 +1592,12 @@ Error BaseRAPass::runLocalAllocator() noexcept { } ASMJIT_PROPAGATE(lra.allocInst(inst)); - if (inst->type() == NodeType::kInvoke) + if (inst->type() == NodeType::kInvoke) { ASMJIT_PROPAGATE(emitPreCall(inst->as())); - else + } + else { ASMJIT_PROPAGATE(lra.spillAfterAllocation(inst)); + } } node = next; } @@ -1566,30 +1621,34 @@ Error BaseRAPass::runLocalAllocator() noexcept { block->setFirst(beforeFirst->next()); block->setLast(afterLast ? afterLast->prev() : cc()->lastNode()); - if (--blocksRemaining == 0) + if (--blocksRemaining == 0) { break; + } // Switch to the next consecutive block, if any. if (consecutive) { block = consecutive; - if (!block->isAllocated()) + if (!block->isAllocated()) { continue; + } } // Get the next block. for (;;) { - if (++blockId >= blockCount()) + if (++blockId >= blockCount()) { blockId = 0; + } block = _blocks[blockId]; - if (!block->isReachable() || block->isAllocated() || !block->hasEntryAssignment()) + if (!block->isReachable() || block->isAllocated() || !block->hasEntryAssignment()) { continue; + } break; } // If we switched to some block we have to update the local allocator. - lra.replaceAssignment(block->entryPhysToWorkMap()); + ASMJIT_PROPAGATE(lra.replaceAssignment(block->entryPhysToWorkMap())); } _clobberedRegs.op(lra._clobberedRegs); @@ -1602,23 +1661,26 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo // Shouldn't happen. Entry assignment of a block that has a shared-state will assign to all blocks // with the same sharedAssignmentId. It's a bug if the shared state has been already assigned. - if (!_sharedAssignments[sharedAssignmentId].empty()) + if (!_sharedAssignments[sharedAssignmentId].empty()) { return DebugUtils::errored(kErrorInvalidState); + } return setSharedAssignment(sharedAssignmentId, fromAssignment); } PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap()); - if (ASMJIT_UNLIKELY(!physToWorkMap)) + if (ASMJIT_UNLIKELY(!physToWorkMap)) { return DebugUtils::errored(kErrorOutOfMemory); + } block->setEntryAssignment(physToWorkMap); // True if this is the first (entry) block, nothing to do in this case. if (block == fromBlock) { // Entry block should never have a shared state. - if (block->hasSharedAssignmentId()) + if (block->hasSharedAssignmentId()) { return DebugUtils::errored(kErrorInvalidState); + } return kErrorOk; } @@ -1637,8 +1699,9 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo RegGroup group = workReg->group(); uint32_t physId = fromAssignment.workToPhysId(group, workId); - if (physId != RAAssignment::kPhysNone) + if (physId != RAAssignment::kPhysNone) { physToWorkMap->unassign(group, physId, _physRegIndex.get(group) + physId); + } } } @@ -1663,8 +1726,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig ASMJIT_ASSERT(!block->hasEntryAssignment()); PhysToWorkMap* entryPhysToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap()); - if (ASMJIT_UNLIKELY(!entryPhysToWorkMap)) + if (ASMJIT_UNLIKELY(!entryPhysToWorkMap)) { return DebugUtils::errored(kErrorOutOfMemory); + } block->setEntryAssignment(entryPhysToWorkMap); @@ -1681,8 +1745,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig uint32_t physId = it.next(); uint32_t workId = entryPhysToWorkMap->workIds[physBaseIndex + physId]; - if (!liveIn.bitAt(workId)) + if (!liveIn.bitAt(workId)) { entryPhysToWorkMap->unassign(group, physId, physBaseIndex + physId); + } } } } @@ -1694,8 +1759,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig while (it.hasNext()) { uint32_t physId = it.next(); - if (Support::bitTest(physToWorkMap->assigned[group], physId)) + if (Support::bitTest(physToWorkMap->assigned[group], physId)) { physToWorkMap->unassign(group, physId, physBaseIndex + physId); + } } } @@ -1705,8 +1771,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig Error BaseRAPass::blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept { // Complex allocation strategy requires to record register assignments upon block entry (or per shared state). for (RegGroup group : RegGroupVirtValues{}) { - if (!_strategy[group].isComplex()) + if (!_strategy[group].isComplex()) { continue; + } uint32_t physBaseIndex = _physRegIndex[group]; Support::BitWordIterator it(physToWorkMap->assigned[group]); @@ -1753,15 +1820,17 @@ Error BaseRAPass::updateStackFrame() noexcept { // Update some StackFrame information that we updated during allocation. The only information we don't have at the // moment is final local stack size, which is calculated last. FuncFrame& frame = func()->frame(); - for (RegGroup group : RegGroupVirtValues{}) + for (RegGroup group : RegGroupVirtValues{}) { frame.addDirtyRegs(group, _clobberedRegs[group]); + } frame.setLocalStackAlignment(_stackAllocator.alignment()); // If there are stack arguments that are not assigned to registers upon entry and the function doesn't require // dynamic stack alignment we keep these arguments where they are. This will also mark all stack slots that match // these arguments as allocated. - if (_numStackArgsToStackSlots) + if (_numStackArgsToStackSlots) { ASMJIT_PROPAGATE(_markStackArgsToKeep()); + } // Calculate offsets of all stack slots and update StackSize to reflect the calculated local stack size. ASMJIT_PROPAGATE(_stackAllocator.calculateStackFrame()); @@ -1773,13 +1842,15 @@ Error BaseRAPass::updateStackFrame() noexcept { ASMJIT_PROPAGATE(frame.finalize()); // StackAllocator allocates all stots starting from [0], adjust them when necessary. - if (frame.localStackOffset() != 0) + if (frame.localStackOffset() != 0) { ASMJIT_PROPAGATE(_stackAllocator.adjustSlotOffsets(int32_t(frame.localStackOffset()))); + } // Again, if there are stack arguments allocated in function's stack we have to handle them. This handles all cases // (either regular or dynamic stack alignment). - if (_numStackArgsToStackSlots) + if (_numStackArgsToStackSlots) { ASMJIT_PROPAGATE(_updateStackArgs()); + } return kErrorOk; } @@ -1800,8 +1871,9 @@ Error BaseRAPass::_markStackArgsToKeep() noexcept { // If the register doesn't have stack slot then we failed. It doesn't make much sense as it was marked as // `kFlagStackArgToStack`, which requires the WorkReg was live-in upon function entry. RAStackSlot* slot = workReg->stackSlot(); - if (ASMJIT_UNLIKELY(!slot)) + if (ASMJIT_UNLIKELY(!slot)) { return DebugUtils::errored(kErrorInvalidState); + } if (hasSAReg && srcArg.isStack() && !srcArg.isIndirect()) { uint32_t typeSize = TypeUtils::sizeOf(srcArg.typeId()); @@ -1832,8 +1904,9 @@ Error BaseRAPass::_updateStackArgs() noexcept { ASMJIT_ASSERT(workReg->hasArgIndex()); RAStackSlot* slot = workReg->stackSlot(); - if (ASMJIT_UNLIKELY(!slot)) + if (ASMJIT_UNLIKELY(!slot)) { return DebugUtils::errored(kErrorInvalidState); + } if (slot->isStackArg()) { const FuncValue& srcArg = _func->detail().arg(workReg->argIndex()); @@ -1930,38 +2003,48 @@ static void RAPass_formatLiveness(BaseRAPass* pass, String& sb, const RAInst* ra for (uint32_t i = 0; i < tiedCount; i++) { const RATiedReg& tiedReg = tiedRegs[i]; - if (i != 0) + if (i != 0) { sb.append(' '); + } sb.appendFormat("%s{", pass->workRegById(tiedReg.workId())->name()); sb.append(tiedReg.isReadWrite() ? 'X' : tiedReg.isRead() ? 'R' : tiedReg.isWrite() ? 'W' : '?'); - if (tiedReg.isLeadConsecutive()) + if (tiedReg.isLeadConsecutive()) { sb.appendFormat("|Lead[%u]", tiedReg.consecutiveData() + 1u); + } - if (tiedReg.hasUseId()) + if (tiedReg.hasUseId()) { sb.appendFormat("|Use=%u", tiedReg.useId()); - else if (tiedReg.isUse()) + } + else if (tiedReg.isUse()) { sb.append("|Use"); + } - if (tiedReg.isUseConsecutive() && !tiedReg.isLeadConsecutive()) + if (tiedReg.isUseConsecutive() && !tiedReg.isLeadConsecutive()) { sb.appendFormat("+%u", tiedReg.consecutiveData()); + } - if (tiedReg.hasOutId()) + if (tiedReg.hasOutId()) { sb.appendFormat("|Out=%u", tiedReg.outId()); - else if (tiedReg.isOut()) + } + else if (tiedReg.isOut()) { sb.append("|Out"); + } - if (tiedReg.isOutConsecutive() && !tiedReg.isLeadConsecutive()) + if (tiedReg.isOutConsecutive() && !tiedReg.isLeadConsecutive()) { sb.appendFormat("+%u", tiedReg.consecutiveData()); + } - if (tiedReg.isLast()) + if (tiedReg.isLast()) { sb.append("|Last"); + } - if (tiedReg.isKill()) + if (tiedReg.isKill()) { sb.append("|Kill"); + } sb.append("}"); } @@ -1972,7 +2055,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept { for (const RABlock* block : _blocks) { BaseNode* node = block->first(); - if (!node) continue; + if (!node) { + continue; + } BaseNode* last = block->last(); for (;;) { @@ -1989,8 +2074,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept { } node->setInlineComment(static_cast(cc()->_dataZone.dup(sb.data(), sb.size(), true))); - if (node == last) + if (node == last) { break; + } node = node->next(); } } @@ -2001,10 +2087,12 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept { ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpBlockIds(String& sb, const RABlocks& blocks) noexcept { for (uint32_t i = 0, size = blocks.size(); i < size; i++) { const RABlock* block = blocks[i]; - if (i != 0) + if (i != 0) { ASMJIT_PROPAGATE(sb.appendFormat(", #%u", block->blockId())); - else + } + else { ASMJIT_PROPAGATE(sb.appendFormat("#%u", block->blockId())); + } } return kErrorOk; } @@ -2024,18 +2112,21 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpBlockLiveness(String& sb, const RABlock if (bits.bitAt(workId)) { RAWorkReg* wReg = workRegById(workId); - if (!n) + if (!n) { sb.appendFormat(" %s [", bitsName); - else + } + else { sb.append(", "); + } sb.append(wReg->name()); n++; } } - if (n) + if (n) { sb.append("]\n"); + } } return kErrorOk; @@ -2065,8 +2156,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpLiveSpans(String& sb) noexcept { LiveRegSpans& liveSpans = workReg->liveSpans(); for (uint32_t x = 0; x < liveSpans.size(); x++) { const LiveRegSpan& liveSpan = liveSpans[x]; - if (x) + if (x) { sb.append(", "); + } sb.appendFormat("[%u:%u]", liveSpan.a, liveSpan.b); } diff --git a/src/asmjit/core/rapass_p.h b/src/asmjit/core/rapass_p.h index 989667a..b79b9c8 100644 --- a/src/asmjit/core/rapass_p.h +++ b/src/asmjit/core/rapass_p.h @@ -56,24 +56,25 @@ class RABlock { public: ASMJIT_NONCOPYABLE(RABlock) - typedef RAAssignment::PhysToWorkMap PhysToWorkMap; - typedef RAAssignment::WorkToPhysMap WorkToPhysMap; + //! \name Types + //! \{ + + using PhysToWorkMap = RAAssignment::PhysToWorkMap; + using WorkToPhysMap = RAAssignment::WorkToPhysMap; + + //! \} //! \name Constants //! \{ - enum : uint32_t { - //! Unassigned block id. - kUnassignedId = 0xFFFFFFFFu - }; + //! Unassigned block id. + static inline constexpr uint32_t kUnassignedId = 0xFFFFFFFFu; - enum LiveType : uint32_t { - kLiveIn = 0, - kLiveOut = 1, - kLiveGen = 2, - kLiveKill = 3, - kLiveCount = 4 - }; + static inline constexpr uint32_t kLiveIn = 0; + static inline constexpr uint32_t kLiveOut = 1; + static inline constexpr uint32_t kLiveGen = 2; + static inline constexpr uint32_t kLiveKill = 3; + static inline constexpr uint32_t kLiveCount = 4; //! \} @@ -145,24 +146,48 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseRAPass* pass() const noexcept { return _ra; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept; + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t blockId() const noexcept { return _blockId; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RABlockFlags flags() const noexcept { return _flags; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(RABlockFlags flag) const noexcept { return Support::test(_flags, flag); } + ASMJIT_INLINE_NODEBUG void addFlags(RABlockFlags flags) noexcept { _flags |= flags; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAssigned() const noexcept { return _blockId != kUnassignedId; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isConstructed() const noexcept { return hasFlag(RABlockFlags::kIsConstructed); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isReachable() const noexcept { return hasFlag(RABlockFlags::kIsReachable); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isTargetable() const noexcept { return hasFlag(RABlockFlags::kIsTargetable); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return hasFlag(RABlockFlags::kIsAllocated); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFuncExit() const noexcept { return hasFlag(RABlockFlags::kIsFuncExit); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasTerminator() const noexcept { return hasFlag(RABlockFlags::kHasTerminator); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasConsecutive() const noexcept { return hasFlag(RABlockFlags::kHasConsecutive); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasJumpTable() const noexcept { return hasFlag(RABlockFlags::kHasJumpTable); } ASMJIT_INLINE_NODEBUG void makeConstructed(const RARegsStats& regStats) noexcept { @@ -174,11 +199,16 @@ public: ASMJIT_INLINE_NODEBUG void makeTargetable() noexcept { _flags |= RABlockFlags::kIsTargetable; } ASMJIT_INLINE_NODEBUG void makeAllocated() noexcept { _flags |= RABlockFlags::kIsAllocated; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RARegsStats& regsStats() const noexcept { return _regsStats; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasPredecessors() const noexcept { return !_predecessors.empty(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasSuccessors() const noexcept { return !_successors.empty(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasSuccessor(RABlock* block) noexcept { if (block->_predecessors.size() < _successors.size()) return block->_predecessors.contains(this); @@ -186,56 +216,97 @@ public: return _successors.contains(block); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RABlocks& predecessors() const noexcept { return _predecessors; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RABlocks& successors() const noexcept { return _successors; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* first() const noexcept { return _first; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* last() const noexcept { return _last; } ASMJIT_INLINE_NODEBUG void setFirst(BaseNode* node) noexcept { _first = node; } ASMJIT_INLINE_NODEBUG void setLast(BaseNode* node) noexcept { _last = node; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t firstPosition() const noexcept { return _firstPosition; } + ASMJIT_INLINE_NODEBUG void setFirstPosition(uint32_t position) noexcept { _firstPosition = position; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t endPosition() const noexcept { return _endPosition; } + ASMJIT_INLINE_NODEBUG void setEndPosition(uint32_t position) noexcept { _endPosition = position; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t povOrder() const noexcept { return _povOrder; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask entryScratchGpRegs() const noexcept; + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; } ASMJIT_INLINE_NODEBUG void addEntryScratchGpRegs(RegMask regMask) noexcept { _entryScratchGpRegs |= regMask; } ASMJIT_INLINE_NODEBUG void addExitScratchGpRegs(RegMask regMask) noexcept { _exitScratchGpRegs |= regMask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasSharedAssignmentId() const noexcept { return _sharedAssignmentId != Globals::kInvalidId; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t sharedAssignmentId() const noexcept { return _sharedAssignmentId; } + ASMJIT_INLINE_NODEBUG void setSharedAssignmentId(uint32_t id) noexcept { _sharedAssignmentId = id; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t timestamp() const noexcept { return _timestamp; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasTimestamp(uint64_t ts) const noexcept { return _timestamp == ts; } + ASMJIT_INLINE_NODEBUG void setTimestamp(uint64_t ts) const noexcept { _timestamp = ts; } + ASMJIT_INLINE_NODEBUG void resetTimestamp() const noexcept { _timestamp = 0; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RABlock* consecutive() const noexcept { return hasConsecutive() ? _successors[0] : nullptr; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RABlock* iDom() noexcept { return _idom; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RABlock* iDom() const noexcept { return _idom; } + ASMJIT_INLINE_NODEBUG void setIDom(RABlock* block) noexcept { _idom = block; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneBitVector& liveIn() noexcept { return _liveBits[kLiveIn]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveIn() const noexcept { return _liveBits[kLiveIn]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneBitVector& liveOut() noexcept { return _liveBits[kLiveOut]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveOut() const noexcept { return _liveBits[kLiveOut]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneBitVector& gen() noexcept { return _liveBits[kLiveGen]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneBitVector& gen() const noexcept { return _liveBits[kLiveGen]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneBitVector& kill() noexcept { return _liveBits[kLiveKill]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneBitVector& kill() const noexcept { return _liveBits[kLiveKill]; } + [[nodiscard]] inline Error resizeLiveBits(uint32_t size) noexcept { ASMJIT_PROPAGATE(_liveBits[kLiveIn ].resize(allocator(), size)); ASMJIT_PROPAGATE(_liveBits[kLiveOut ].resize(allocator(), size)); @@ -244,8 +315,12 @@ public: return kErrorOk; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasEntryAssignment() const noexcept { return _entryPhysToWorkMap != nullptr; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG PhysToWorkMap* entryPhysToWorkMap() const noexcept { return _entryPhysToWorkMap; } + ASMJIT_INLINE_NODEBUG void setEntryAssignment(PhysToWorkMap* physToWorkMap) noexcept { _entryPhysToWorkMap = physToWorkMap; } //! \} @@ -256,11 +331,13 @@ public: //! Adds a successor to this block, and predecessor to `successor`, making connection on both sides. //! //! This API must be used to manage successors and predecessors, never manage it manually. + [[nodiscard]] Error appendSuccessor(RABlock* successor) noexcept; //! Similar to `appendSuccessor()`, but does prepend instead append. //! //! This function is used to add a natural flow (always first) to the block. + [[nodiscard]] Error prependSuccessor(RABlock* successor) noexcept; //! \} @@ -318,53 +395,76 @@ public: //! \{ //! Returns instruction RW flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstRWFlags instRWFlags() const noexcept { return _instRWFlags; }; + //! Tests whether the given `flag` is present in instruction RW flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasInstRWFlag(InstRWFlags flag) const noexcept { return Support::test(_instRWFlags, flag); } + //! Adds `flags` to instruction RW flags. ASMJIT_INLINE_NODEBUG void addInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags |= flags; } //! Returns the instruction flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedFlags flags() const noexcept { return _flags; } + //! Tests whether the instruction has flag `flag`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); } + //! Replaces the existing instruction flags with `flags`. ASMJIT_INLINE_NODEBUG void setFlags(RATiedFlags flags) noexcept { _flags = flags; } + //! Adds instruction `flags` to this RAInst. ASMJIT_INLINE_NODEBUG void addFlags(RATiedFlags flags) noexcept { _flags |= flags; } + //! Clears instruction `flags` from this RAInst. ASMJIT_INLINE_NODEBUG void clearFlags(RATiedFlags flags) noexcept { _flags &= ~flags; } //! Tests whether one operand of this instruction has been patched from Reg to Mem. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isRegToMemPatched() const noexcept { return hasFlag(RATiedFlags::kInst_RegToMemPatched); } + //! Tests whether this instruction can be transformed to another instruction if necessary. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isTransformable() const noexcept { return hasFlag(RATiedFlags::kInst_IsTransformable); } //! Returns the associated block with this RAInst. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; } //! Returns tied registers (all). + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return const_cast(_tiedRegs); } + //! Returns tied registers for a given `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return const_cast(_tiedRegs) + _tiedIndex.get(group); } //! Returns count of all tied registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; } + //! Returns count of tied registers of a given `group`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount[group]; } //! Returns `RATiedReg` at the given `index`. + [[nodiscard]] inline RATiedReg* tiedAt(uint32_t index) const noexcept { ASMJIT_ASSERT(index < _tiedTotal); return tiedRegs() + index; } //! Returns `RATiedReg` at the given `index` of the given register `group`. + [[nodiscard]] inline RATiedReg* tiedOf(RegGroup group, uint32_t index) const noexcept { ASMJIT_ASSERT(index < _tiedCount.get(group)); return tiedRegs(group) + index; } + [[nodiscard]] inline const RATiedReg* tiedRegForWorkReg(RegGroup group, uint32_t workId) const noexcept { const RATiedReg* array = tiedRegs(group); size_t count = tiedCount(group); @@ -387,6 +487,7 @@ public: //! \name Static Functions //! \{ + [[nodiscard]] static ASMJIT_INLINE_NODEBUG size_t sizeOf(uint32_t tiedRegCount) noexcept { return sizeof(RAInst) - sizeof(RATiedReg) + tiedRegCount * sizeof(RATiedReg); } @@ -447,33 +548,51 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstRWFlags instRWFlags() const noexcept { return _instRWFlags; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasInstRWFlag(InstRWFlags flag) const noexcept { return Support::test(_instRWFlags, flag); } + ASMJIT_INLINE_NODEBUG void addInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags |= flags; } + ASMJIT_INLINE_NODEBUG void clearInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags &= ~flags; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedFlags aggregatedFlags() const noexcept { return _aggregatedFlags; } + ASMJIT_INLINE_NODEBUG void addAggregatedFlags(RATiedFlags flags) noexcept { _aggregatedFlags |= flags; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedFlags forbiddenFlags() const noexcept { return _forbiddenFlags; } + ASMJIT_INLINE_NODEBUG void addForbiddenFlags(RATiedFlags flags) noexcept { _forbiddenFlags |= flags; } //! Returns the number of tied registers added to the builder. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t tiedRegCount() const noexcept { return uint32_t((size_t)(_cur - _tiedRegs)); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedReg* begin() noexcept { return _tiedRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RATiedReg* end() noexcept { return _cur; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RATiedReg* begin() const noexcept { return _tiedRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RATiedReg* end() const noexcept { return _cur; } //! Returns `RATiedReg` at the given `index`. + [[nodiscard]] inline RATiedReg* operator[](size_t index) noexcept { ASMJIT_ASSERT(index < tiedRegCount()); return &_tiedRegs[index]; } //! Returns `RATiedReg` at the given `index`. (const). + [[nodiscard]] inline const RATiedReg* operator[](size_t index) const noexcept { ASMJIT_ASSERT(index < tiedRegCount()); return &_tiedRegs[index]; @@ -484,6 +603,7 @@ public: //! \name Utilities //! \{ + [[nodiscard]] Error add( RAWorkReg* workReg, RATiedFlags flags, @@ -524,20 +644,23 @@ public: } else { if (consecutiveParent != tiedReg->consecutiveParent()) { - if (tiedReg->consecutiveParent() != Globals::kInvalidId) + if (tiedReg->consecutiveParent() != Globals::kInvalidId) { return DebugUtils::errored(kErrorInvalidState); + } tiedReg->_consecutiveParent = consecutiveParent; } if (useId != BaseReg::kIdBad) { - if (ASMJIT_UNLIKELY(tiedReg->hasUseId())) + if (ASMJIT_UNLIKELY(tiedReg->hasUseId())) { return DebugUtils::errored(kErrorOverlappedRegs); + } tiedReg->setUseId(useId); } if (outId != BaseReg::kIdBad) { - if (ASMJIT_UNLIKELY(tiedReg->hasOutId())) + if (ASMJIT_UNLIKELY(tiedReg->hasOutId())) { return DebugUtils::errored(kErrorOverlappedRegs); + } tiedReg->setOutId(outId); } @@ -552,6 +675,7 @@ public: } } + [[nodiscard]] Error addCallArg(RAWorkReg* workReg, uint32_t useId) noexcept { ASMJIT_ASSERT(useId != BaseReg::kIdBad); @@ -594,6 +718,7 @@ public: } } + [[nodiscard]] Error addCallRet(RAWorkReg* workReg, uint32_t outId) noexcept { ASMJIT_ASSERT(outId != BaseReg::kIdBad); @@ -621,8 +746,9 @@ public: return kErrorOk; } else { - if (tiedReg->hasOutId()) + if (tiedReg->hasOutId()) { return DebugUtils::errored(kErrorOverlappedRegs); + } tiedReg->addRefCount(); tiedReg->addFlags(flags); @@ -639,8 +765,13 @@ public: //! See \ref RAAssignment for more information about register assignments. class RASharedAssignment { public: - typedef RAAssignment::PhysToWorkMap PhysToWorkMap; - typedef RAAssignment::WorkToPhysMap WorkToPhysMap; + //! \name Types + //! \{ + + using PhysToWorkMap = RAAssignment::PhysToWorkMap; + using WorkToPhysMap = RAAssignment::WorkToPhysMap; + + //! \} //! \name Members //! \{ @@ -659,14 +790,20 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _physToWorkMap == nullptr; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RegMask entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; } + ASMJIT_INLINE_NODEBUG void addEntryScratchGpRegs(RegMask mask) noexcept { _entryScratchGpRegs |= mask; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveIn() const noexcept { return _liveIn; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; } + ASMJIT_INLINE_NODEBUG void assignPhysToWorkMap(PhysToWorkMap* physToWorkMap) noexcept { _physToWorkMap = physToWorkMap; } //! \} @@ -676,14 +813,22 @@ public: class BaseRAPass : public FuncPass { public: ASMJIT_NONCOPYABLE(BaseRAPass) - typedef FuncPass Base; + using Base = FuncPass; - enum : uint32_t { - kCallArgWeight = 80 - }; + //! \name Constants + //! \{ - typedef RAAssignment::PhysToWorkMap PhysToWorkMap; - typedef RAAssignment::WorkToPhysMap WorkToPhysMap; + static inline constexpr uint32_t kCallArgWeight = 80; + + //! \} + + //! \name Types + //! \{ + + using PhysToWorkMap = RAAssignment::PhysToWorkMap; + using WorkToPhysMap = RAAssignment::WorkToPhysMap; + + //! \} //! \name Members //! \{ @@ -787,37 +932,55 @@ public: //! \{ //! Returns \ref Logger passed to \ref runOnFunction(). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; } //! Returns either a valid logger if the given `option` is set and logging is enabled, or nullptr. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Logger* getLoggerIf(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option) ? _logger : nullptr; } //! Returns whether the diagnostic `option` is enabled. //! //! \note Returns false if there is no logger (as diagnostics without logging make no sense). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); } //! Returns \ref Zone passed to \ref runOnFunction(). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Zone* zone() const noexcept { return _allocator.zone(); } + //! Returns \ref ZoneAllocator used by the register allocator. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return const_cast(&_allocator); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const ZoneVector& sharedAssignments() const { return _sharedAssignments; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t sharedAssignmentCount() const noexcept { return _sharedAssignments.size(); } //! Returns the current function node. + [[nodiscard]] ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; } + //! Returns the stop of the current function. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* stop() const noexcept { return _stop; } //! Returns an extra block used by the current function being processed. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BaseNode* extraBlock() const noexcept { return _extraBlock; } + //! Sets an extra block, see `extraBlock()`. ASMJIT_INLINE_NODEBUG void setExtraBlock(BaseNode* node) noexcept { _extraBlock = node; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t endPosition() const noexcept { return _instructionCount * 2; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RARegMask& availableRegs() const noexcept { return _availableRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RARegMask& clobberedRegs() const noexcept { return _clobberedRegs; } //! \} @@ -855,25 +1018,33 @@ public: //! \{ //! Returns the function's entry block. + [[nodiscard]] inline RABlock* entryBlock() noexcept { ASMJIT_ASSERT(!_blocks.empty()); return _blocks[0]; } //! \overload + [[nodiscard]] inline const RABlock* entryBlock() const noexcept { ASMJIT_ASSERT(!_blocks.empty()); return _blocks[0]; } //! Returns all basic blocks of this function. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RABlocks& blocks() noexcept { return _blocks; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RABlocks& blocks() const noexcept { return _blocks; } //! Returns the count of basic blocks (returns size of `_blocks` array). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t blockCount() const noexcept { return _blocks.size(); } + //! Returns the count of reachable basic blocks (returns size of `_pov` array). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t reachableBlockCount() const noexcept { return _pov.size(); } //! Tests whether the CFG has dangling blocks - these were created by `newBlock()`, but not added to CFG through @@ -881,41 +1052,51 @@ public: //! incomplete. //! //! \note This is only used to check if the number of created blocks matches the number of added blocks. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasDanglingBlocks() const noexcept { return _createdBlockCount != blockCount(); } //! Gest a next timestamp to be used to mark CFG blocks. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint64_t nextTimestamp() const noexcept { return ++_lastTimestamp; } //! Creates a new `RABlock` instance. //! //! \note New blocks don't have ID assigned until they are added to the block array by calling `addBlock()`. + [[nodiscard]] RABlock* newBlock(BaseNode* initialNode = nullptr) noexcept; //! Tries to find a neighboring LabelNode (without going through code) that is already connected with `RABlock`. //! If no label is found then a new RABlock is created and assigned to all possible labels in a backward direction. + [[nodiscard]] RABlock* newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt = nullptr) noexcept; //! Adds the given `block` to the block list and assign it a unique block id. + [[nodiscard]] Error addBlock(RABlock* block) noexcept; + [[nodiscard]] inline Error addExitBlock(RABlock* block) noexcept { block->addFlags(RABlockFlags::kIsFuncExit); return _exits.append(allocator(), block); } - ASMJIT_FORCE_INLINE RAInst* newRAInst(RABlock* block, InstRWFlags instRWFlags, RATiedFlags flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept { + [[nodiscard]] + ASMJIT_INLINE RAInst* newRAInst(RABlock* block, InstRWFlags instRWFlags, RATiedFlags flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept { void* p = zone()->alloc(RAInst::sizeOf(tiedRegCount)); - if (ASMJIT_UNLIKELY(!p)) + if (ASMJIT_UNLIKELY(!p)) { return nullptr; + } return new(Support::PlacementNew{p}) RAInst(block, instRWFlags, flags, tiedRegCount, clobberedRegs); } - ASMJIT_FORCE_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept { + [[nodiscard]] + ASMJIT_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept { uint32_t tiedRegCount = ib.tiedRegCount(); RAInst* raInst = newRAInst(block, ib.instRWFlags(), ib.aggregatedFlags(), tiedRegCount, ib._clobbered); - if (ASMJIT_UNLIKELY(!raInst)) + if (ASMJIT_UNLIKELY(!raInst)) { return DebugUtils::errored(kErrorOutOfMemory); + } RARegIndex index; RATiedFlags flagsFilter = ~ib.forbiddenFlags(); @@ -944,8 +1125,9 @@ public: dst = *tiedReg; dst._flags &= flagsFilter; - if (!tiedReg->isDuplicate()) + if (!tiedReg->isDuplicate()) { dst._useRegMask &= ~ib._used[group]; + } } node->setPassData(raInst); @@ -970,9 +1152,11 @@ public: //! analysis and register allocation. //! //! Use `RACFGBuilderT` template that provides the necessary boilerplate. + [[nodiscard]] virtual Error buildCFG() noexcept; //! Called after the CFG is built. + [[nodiscard]] Error initSharedAssignments(const ZoneVector& sharedAssignmentsMap) noexcept; //! \} @@ -981,6 +1165,7 @@ public: //! \{ //! Constructs CFG views (only POV at the moment). + [[nodiscard]] Error buildCFGViews() noexcept; //! \} @@ -993,19 +1178,29 @@ public: // - A node `Z` post-dominates a node `X` if any path from `X` to the end of the graph has to go through `Z`. //! Constructs a dominator-tree from CFG. + [[nodiscard]] Error buildCFGDominators() noexcept; + [[nodiscard]] bool _strictlyDominates(const RABlock* a, const RABlock* b) const noexcept; + + [[nodiscard]] const RABlock* _nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept; //! Tests whether the basic block `a` dominates `b` - non-strict, returns true when `a == b`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool dominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? true : _strictlyDominates(a, b); } + //! Tests whether the basic block `a` dominates `b` - strict dominance check, returns false when `a == b`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool strictlyDominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? false : _strictlyDominates(a, b); } //! Returns a nearest common dominator of `a` and `b`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG RABlock* nearestCommonDominator(RABlock* a, RABlock* b) const noexcept { return const_cast(_nearestCommonDominator(a, b)); } + //! Returns a nearest common dominator of `a` and `b` (const). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RABlock* nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept { return _nearestCommonDominator(a, b); } //! \} @@ -1013,15 +1208,18 @@ public: //! \name CFG - Utilities //! \{ + [[nodiscard]] Error removeUnreachableCode() noexcept; //! Returns `node` or some node after that is ideal for beginning a new block. This function is mostly used after //! a conditional or unconditional jump to select the successor node. In some cases the next node could be a label, //! which means it could have assigned some block already. + [[nodiscard]] BaseNode* findSuccessorStartingAt(BaseNode* node) noexcept; //! Returns `true` of the `node` can flow to `target` without reaching code nor data. It's used to eliminate jumps //! to labels that are next right to them. + [[nodiscard]] bool isNextTo(BaseNode* node, BaseNode* target) noexcept; //! \} @@ -1030,18 +1228,31 @@ public: //! \{ //! Returns a native size of the general-purpose register of the target architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return _sp.size(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t availableRegCount(RegGroup group) const noexcept { return _availableRegCount[group]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _workRegs[workId]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAWorkRegs& workRegs() noexcept { return _workRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAWorkRegs& workRegs(RegGroup group) noexcept { return _workRegsOfGroup[group]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RAWorkRegs& workRegs() const noexcept { return _workRegs; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RAWorkRegs& workRegs(RegGroup group) const noexcept { return _workRegsOfGroup[group]; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t workRegCount() const noexcept { return _workRegs.size(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t workRegCount(RegGroup group) const noexcept { return _workRegsOfGroup[group].size(); } inline void _buildPhysIndex() noexcept { @@ -1049,30 +1260,39 @@ public: _physRegTotal = uint32_t(_physRegIndex[RegGroup::kMaxVirt]) + uint32_t(_physRegCount[RegGroup::kMaxVirt]) ; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t physRegIndex(RegGroup group) const noexcept { return _physRegIndex[group]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t physRegTotal() const noexcept { return _physRegTotal; } + [[nodiscard]] Error _asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept; //! Creates `RAWorkReg` data for the given `vReg`. The function does nothing //! if `vReg` already contains link to `RAWorkReg`. Called by `constructBlocks()`. + [[nodiscard]] inline Error asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept { *out = vReg->workReg(); return *out ? kErrorOk : _asWorkReg(vReg, out); } - ASMJIT_FORCE_INLINE Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept { + [[nodiscard]] + ASMJIT_INLINE Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept { const ZoneVector& virtRegs = cc()->virtRegs(); if (ASMJIT_UNLIKELY(vIndex >= virtRegs.size())) return DebugUtils::errored(kErrorInvalidVirtId); return asWorkReg(virtRegs[vIndex], out); } + [[nodiscard]] inline RAStackSlot* getOrCreateStackSlot(RAWorkReg* workReg) noexcept { RAStackSlot* slot = workReg->stackSlot(); - if (slot) + if (slot) { return slot; + } slot = _stackAllocator.newSlot(_sp.id(), workReg->virtReg()->virtSize(), workReg->virtReg()->alignment(), RAStackSlot::kFlagRegHome); workReg->_stackSlot = slot; @@ -1080,17 +1300,22 @@ public: return slot; } + [[nodiscard]] inline BaseMem workRegAsMem(RAWorkReg* workReg) noexcept { - getOrCreateStackSlot(workReg); + (void)getOrCreateStackSlot(workReg); return BaseMem(OperandSignature::fromOpType(OperandType::kMem) | OperandSignature::fromMemBaseType(_sp.type()) | OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag), workReg->virtId(), 0, 0); } + [[nodiscard]] WorkToPhysMap* newWorkToPhysMap() noexcept; + + [[nodiscard]] PhysToWorkMap* newPhysToWorkMap() noexcept; + [[nodiscard]] inline PhysToWorkMap* clonePhysToWorkMap(const PhysToWorkMap* map) noexcept { size_t size = PhysToWorkMap::sizeOf(_physRegTotal); return static_cast(zone()->dupAligned(map, size, sizeof(uint32_t))); @@ -1101,10 +1326,12 @@ public: //! 1. Calculates GEN/KILL/IN/OUT of each block. //! 2. Calculates live spans and basic statistics of each work register. + [[nodiscard]] Error buildLiveness() noexcept; //! Assigns argIndex to WorkRegs. Must be called after the liveness analysis //! finishes as it checks whether the argument is live upon entry. + [[nodiscard]] Error assignArgIndexToWorkRegs() noexcept; //! \} @@ -1113,11 +1340,14 @@ public: //! \{ //! Runs a global register allocator. + [[nodiscard]] Error runGlobalAllocator() noexcept; //! Initializes data structures used for global live spans. + [[nodiscard]] Error initGlobalLiveSpans() noexcept; + [[nodiscard]] Error binPack(RegGroup group) noexcept; //! \} @@ -1126,13 +1356,19 @@ public: //! \{ //! Runs a local register allocator. + [[nodiscard]] Error runLocalAllocator() noexcept; + + [[nodiscard]] Error setBlockEntryAssignment(RABlock* block, const RABlock* fromBlock, const RAAssignment& fromAssignment) noexcept; + + [[nodiscard]] Error setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept; //! Called after the RA assignment has been assigned to a block. //! //! This cannot change the assignment, but can examine it. + [[nodiscard]] Error blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept; //! \} @@ -1140,6 +1376,7 @@ public: //! \name Register Allocation Utilities //! \{ + [[nodiscard]] Error useTemporaryMem(BaseMem& out, uint32_t size, uint32_t alignment) noexcept; //! \} @@ -1147,9 +1384,16 @@ public: //! \name Function Prolog & Epilog //! \{ + [[nodiscard]] virtual Error updateStackFrame() noexcept; + + [[nodiscard]] Error _markStackArgsToKeep() noexcept; + + [[nodiscard]] Error _updateStackArgs() noexcept; + + [[nodiscard]] Error insertPrologEpilog() noexcept; //! \} @@ -1157,7 +1401,10 @@ public: //! \name Instruction Rewriter //! \{ + [[nodiscard]] Error rewrite() noexcept; + + [[nodiscard]] virtual Error _rewrite(BaseNode* first, BaseNode* stop) noexcept; //! \} @@ -1167,7 +1414,6 @@ public: //! \{ Error annotateCode() noexcept; - Error _dumpBlockIds(String& sb, const RABlocks& blocks) noexcept; Error _dumpBlockLiveness(String& sb, const RABlock* block) noexcept; Error _dumpLiveSpans(String& sb) noexcept; @@ -1178,13 +1424,22 @@ public: //! \name Emit //! \{ + [[nodiscard]] virtual Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept; + + [[nodiscard]] virtual Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept; + [[nodiscard]] virtual Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept; + + [[nodiscard]] virtual Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept; + [[nodiscard]] virtual Error emitJump(const Label& label) noexcept; + + [[nodiscard]] virtual Error emitPreCall(InvokeNode* invokeNode) noexcept; //! \} @@ -1194,8 +1449,9 @@ inline ZoneAllocator* RABlock::allocator() const noexcept { return _ra->allocato inline RegMask RABlock::entryScratchGpRegs() const noexcept { RegMask regs = _entryScratchGpRegs; - if (hasSharedAssignmentId()) + if (hasSharedAssignmentId()) { regs = _ra->_sharedAssignments[_sharedAssignmentId].entryScratchGpRegs(); + } return regs; } diff --git a/src/asmjit/core/rastack.cpp b/src/asmjit/core/rastack.cpp index 318fbde..bb8b06e 100644 --- a/src/asmjit/core/rastack.cpp +++ b/src/asmjit/core/rastack.cpp @@ -15,12 +15,14 @@ ASMJIT_BEGIN_NAMESPACE // ======================== RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept { - if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk)) + if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk)) { return nullptr; + } RAStackSlot* slot = allocator()->allocT(); - if (ASMJIT_UNLIKELY(!slot)) + if (ASMJIT_UNLIKELY(!slot)) { return nullptr; + } slot->_baseRegId = uint8_t(baseRegId); slot->_alignment = uint8_t(Support::max(alignment, 1)); @@ -72,15 +74,18 @@ Error RAStackAllocator::calculateStackFrame() noexcept { uint32_t power = Support::min(Support::ctz(alignment), 6); uint64_t weight; - if (slot->isRegHome()) + if (slot->isRegHome()) { weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power)); - else + } + else { weight = power; + } // If overflown, which has less chance of winning a lottery, just use max possible weight. In such case it // probably doesn't matter at all. - if (weight > 0xFFFFFFFFu) + if (weight > 0xFFFFFFFFu) { weight = 0xFFFFFFFFu; + } slot->setWeight(uint32_t(weight)); } @@ -104,8 +109,9 @@ Error RAStackAllocator::calculateStackFrame() noexcept { ZoneVector gaps[kSizeCount - 1]; for (RAStackSlot* slot : _slots) { - if (slot->isStackArg()) + if (slot->isStackArg()) { continue; + } uint32_t slotAlignment = slot->alignment(); uint32_t alignedOffset = Support::alignUp(offset, slotAlignment); @@ -153,8 +159,9 @@ Error RAStackAllocator::calculateStackFrame() noexcept { uint32_t slotSize = 1u << index; // Weird case, better to bail... - if (gapEnd - gapOffset < slotSize) + if (gapEnd - gapOffset < slotSize) { break; + } ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize))); gapOffset += slotSize; @@ -173,9 +180,11 @@ Error RAStackAllocator::calculateStackFrame() noexcept { } Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept { - for (RAStackSlot* slot : _slots) - if (!slot->isStackArg()) + for (RAStackSlot* slot : _slots) { + if (!slot->isStackArg()) { slot->_offset += offset; + } + } return kErrorOk; } diff --git a/src/asmjit/core/rastack_p.h b/src/asmjit/core/rastack_p.h index 15db8e9..1aff61b 100644 --- a/src/asmjit/core/rastack_p.h +++ b/src/asmjit/core/rastack_p.h @@ -57,32 +57,50 @@ struct RAStackSlot { //! \name Accessors //! \{ + [[nodiscard]] inline uint32_t baseRegId() const noexcept { return _baseRegId; } + inline void setBaseRegId(uint32_t id) noexcept { _baseRegId = uint8_t(id); } + [[nodiscard]] inline uint32_t size() const noexcept { return _size; } + + [[nodiscard]] inline uint32_t alignment() const noexcept { return _alignment; } + [[nodiscard]] inline uint32_t flags() const noexcept { return _flags; } + + [[nodiscard]] inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; } + inline void addFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags | flags); } + [[nodiscard]] inline bool isRegHome() const noexcept { return hasFlag(kFlagRegHome); } + + [[nodiscard]] inline bool isStackArg() const noexcept { return hasFlag(kFlagStackArg); } + [[nodiscard]] inline uint32_t useCount() const noexcept { return _useCount; } + inline void addUseCount(uint32_t n = 1) noexcept { _useCount += n; } + [[nodiscard]] inline uint32_t weight() const noexcept { return _weight; } + inline void setWeight(uint32_t weight) noexcept { _weight = weight; } + [[nodiscard]] inline int32_t offset() const noexcept { return _offset; } + inline void setOffset(int32_t offset) noexcept { _offset = offset; } //! \} }; -typedef ZoneVector RAStackSlots; +using RAStackSlots = ZoneVector; //! Stack allocator. class RAStackAllocator { @@ -134,14 +152,25 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return _allocator; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t bytesUsed() const noexcept { return _bytesUsed; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t stackSize() const noexcept { return _stackSize; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG RAStackSlots& slots() noexcept { return _slots; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const RAStackSlots& slots() const noexcept { return _slots; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t slotCount() const noexcept { return _slots.size(); } //! \} @@ -149,9 +178,13 @@ public: //! \name Utilities //! \{ + [[nodiscard]] RAStackSlot* newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags = 0) noexcept; + [[nodiscard]] Error calculateStackFrame() noexcept; + + [[nodiscard]] Error adjustSlotOffsets(int32_t offset) noexcept; //! \} diff --git a/src/asmjit/core/string.cpp b/src/asmjit/core/string.cpp index dfd7404..0aa349b 100644 --- a/src/asmjit/core/string.cpp +++ b/src/asmjit/core/string.cpp @@ -21,7 +21,7 @@ constexpr size_t kMaxAllocSize = SIZE_MAX - Globals::kGrowThreshold; // // NOTE: The sizes here include null terminators - that way we can have aligned allocations that are power of 2s // initially. -static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t minimumByteSize) noexcept { +static ASMJIT_INLINE size_t String_growCapacity(size_t byteSize, size_t minimumByteSize) noexcept { static constexpr size_t kGrowThreshold = Globals::kGrowThreshold; ASMJIT_ASSERT(minimumByteSize < kMaxAllocSize); @@ -51,8 +51,9 @@ static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t mi byteSize = minimumByteSize + remainder; // Bail to `minimumByteSize` in case of overflow. - if (byteSize < minimumByteSize) + if (byteSize < minimumByteSize) { return minimumByteSize; + } } } @@ -63,8 +64,9 @@ static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t mi // ====================== Error String::reset() noexcept { - if (_type == kTypeLarge) + if (_type == kTypeLarge) { ::free(_large.data); + } _resetInternal(); return kErrorOk; @@ -104,17 +106,20 @@ char* String::prepare(ModifyOp op, size_t size) noexcept { if (op == ModifyOp::kAssign) { if (size > curCapacity) { // Prevent arithmetic overflow. - if (ASMJIT_UNLIKELY(size >= kMaxAllocSize)) + if (ASMJIT_UNLIKELY(size >= kMaxAllocSize)) { return nullptr; + } size_t newCapacity = Support::alignUp(size + 1, kMinAllocSize); char* newData = static_cast(::malloc(newCapacity)); - if (ASMJIT_UNLIKELY(!newData)) + if (ASMJIT_UNLIKELY(!newData)) { return nullptr; + } - if (_type == kTypeLarge) + if (_type == kTypeLarge) { ::free(curData); + } _large.type = kTypeLarge; _large.size = size; @@ -132,8 +137,9 @@ char* String::prepare(ModifyOp op, size_t size) noexcept { } else { // Prevent arithmetic overflow. - if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize - 1)) + if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize - 1)) { return nullptr; + } size_t newSize = size + curSize; size_t newSizePlusOne = newSize + 1; @@ -142,17 +148,20 @@ char* String::prepare(ModifyOp op, size_t size) noexcept { size_t newCapacityPlusOne = String_growCapacity(size + 1u, newSizePlusOne); ASMJIT_ASSERT(newCapacityPlusOne >= newSizePlusOne); - if (ASMJIT_UNLIKELY(newCapacityPlusOne < newSizePlusOne)) + if (ASMJIT_UNLIKELY(newCapacityPlusOne < newSizePlusOne)) { return nullptr; + } char* newData = static_cast(::malloc(newCapacityPlusOne)); - if (ASMJIT_UNLIKELY(!newData)) + if (ASMJIT_UNLIKELY(!newData)) { return nullptr; + } memcpy(newData, curData, curSize); - if (_type == kTypeLarge) + if (_type == kTypeLarge) { ::free(curData); + } _large.type = kTypeLarge; _large.size = newSize; @@ -177,8 +186,9 @@ Error String::assign(const char* data, size_t size) noexcept { char* dst = nullptr; // Null terminated string without `size` specified. - if (size == SIZE_MAX) + if (size == SIZE_MAX) { size = data ? strlen(data) : size_t(0); + } if (isLargeOrExternal()) { if (size <= _large.capacity) { @@ -187,15 +197,18 @@ Error String::assign(const char* data, size_t size) noexcept { } else { size_t capacityPlusOne = Support::alignUp(size + 1, 32); - if (ASMJIT_UNLIKELY(capacityPlusOne < size)) + if (ASMJIT_UNLIKELY(capacityPlusOne < size)) { return DebugUtils::errored(kErrorOutOfMemory); + } dst = static_cast(::malloc(capacityPlusOne)); - if (ASMJIT_UNLIKELY(!dst)) + if (ASMJIT_UNLIKELY(!dst)) { return DebugUtils::errored(kErrorOutOfMemory); + } - if (_type == kTypeLarge) + if (_type == kTypeLarge) { ::free(_large.data); + } _large.type = kTypeLarge; _large.data = dst; @@ -212,8 +225,9 @@ Error String::assign(const char* data, size_t size) noexcept { } else { dst = static_cast(::malloc(size + 1)); - if (ASMJIT_UNLIKELY(!dst)) + if (ASMJIT_UNLIKELY(!dst)) { return DebugUtils::errored(kErrorOutOfMemory); + } _large.type = kTypeLarge; _large.data = dst; @@ -237,15 +251,18 @@ Error String::assign(const char* data, size_t size) noexcept { // =================== Error String::_opString(ModifyOp op, const char* str, size_t size) noexcept { - if (size == SIZE_MAX) + if (size == SIZE_MAX) { size = str ? strlen(str) : size_t(0); + } - if (!size) + if (!size) { return kErrorOk; + } char* p = prepare(op, size); - if (!p) + if (!p) { return DebugUtils::errored(kErrorOutOfMemory); + } memcpy(p, str, size); return kErrorOk; @@ -253,20 +270,23 @@ Error String::_opString(ModifyOp op, const char* str, size_t size) noexcept { Error String::_opChar(ModifyOp op, char c) noexcept { char* p = prepare(op, 1); - if (!p) + if (!p) { return DebugUtils::errored(kErrorOutOfMemory); + } *p = c; return kErrorOk; } Error String::_opChars(ModifyOp op, char c, size_t n) noexcept { - if (!n) + if (!n) { return kErrorOk; + } char* p = prepare(op, n); - if (!p) + if (!p) { return DebugUtils::errored(kErrorOutOfMemory); + } memset(p, c, n); return kErrorOk; @@ -278,8 +298,9 @@ Error String::padEnd(size_t n, char c) noexcept { } Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, StringFormatFlags flags) noexcept { - if (base == 0) + if (base == 0) { base = 10; + } char buf[128]; char* p = buf + ASMJIT_ARRAY_SIZE(buf); @@ -345,8 +366,9 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St if (Support::test(flags, StringFormatFlags::kAlternate)) { if (base == 8) { - if (orig != 0) + if (orig != 0) { *--p = '0'; + } } if (base == 16) { *--p = 'x'; @@ -357,16 +379,20 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St // String Width // ------------ - if (sign != 0) + if (sign != 0) { *--p = sign; + } - if (width > 256) + if (width > 256) { width = 256; + } - if (width <= numberSize) + if (width <= numberSize) { width = 0; - else + } + else { width -= numberSize; + } // Finalize // -------- @@ -374,8 +400,9 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St size_t prefixSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberSize; char* data = prepare(op, prefixSize + width + numberSize); - if (!data) + if (!data) { return DebugUtils::errored(kErrorOutOfMemory); + } memcpy(data, p, prefixSize); data += prefixSize; @@ -391,23 +418,29 @@ Error String::_opHex(ModifyOp op, const void* data, size_t size, char separator) char* dst; const uint8_t* src = static_cast(data); - if (!size) + if (!size) { return kErrorOk; + } if (separator) { - if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 3)) + if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 3)) { return DebugUtils::errored(kErrorOutOfMemory); + } dst = prepare(op, size * 3 - 1); - if (ASMJIT_UNLIKELY(!dst)) + if (ASMJIT_UNLIKELY(!dst)) { return DebugUtils::errored(kErrorOutOfMemory); + } size_t i = 0; for (;;) { dst[0] = String_baseN[(src[0] >> 4) & 0xF]; dst[1] = String_baseN[(src[0] ) & 0xF]; - if (++i == size) + + if (++i == size) { break; + } + // This makes sure that the separator is only put between two hexadecimal bytes. dst[2] = separator; dst += 3; @@ -415,12 +448,14 @@ Error String::_opHex(ModifyOp op, const void* data, size_t size, char separator) } } else { - if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 2)) + if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 2)) { return DebugUtils::errored(kErrorOutOfMemory); + } dst = prepare(op, size * 2); - if (ASMJIT_UNLIKELY(!dst)) + if (ASMJIT_UNLIKELY(!dst)) { return DebugUtils::errored(kErrorOutOfMemory); + } for (size_t i = 0; i < size; i++, dst += 2, src++) { dst[0] = String_baseN[(src[0] >> 4) & 0xF]; @@ -466,16 +501,19 @@ Error String::_opVFormat(ModifyOp op, const char* fmt, va_list ap) noexcept { fmtResult = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); outputSize = size_t(fmtResult); - if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf))) + if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf))) { return _opString(op, buf, outputSize); + } } - if (ASMJIT_UNLIKELY(fmtResult < 0)) + if (ASMJIT_UNLIKELY(fmtResult < 0)) { return DebugUtils::errored(kErrorInvalidState); + } char* p = prepare(op, outputSize); - if (ASMJIT_UNLIKELY(!p)) + if (ASMJIT_UNLIKELY(!p)) { return DebugUtils::errored(kErrorOutOfMemory); + } fmtResult = vsnprintf(p, outputSize + 1, fmt, apCopy); ASMJIT_ASSERT(size_t(fmtResult) == outputSize); @@ -509,14 +547,17 @@ bool String::equals(const char* other, size_t size) const noexcept { if (bSize == SIZE_MAX) { size_t i; - for (i = 0; i < aSize; i++) - if (aData[i] != bData[i] || bData[i] == 0) + for (i = 0; i < aSize; i++) { + if (aData[i] != bData[i] || bData[i] == 0) { return false; + } + } return bData[i] == 0; } else { - if (aSize != bSize) + if (aSize != bSize) { return false; + } return ::memcmp(aData, bData, aSize) == 0; } } diff --git a/src/asmjit/core/string.h b/src/asmjit/core/string.h index c664820..dccaf25 100644 --- a/src/asmjit/core/string.h +++ b/src/asmjit/core/string.h @@ -36,9 +36,7 @@ union FixedString { //! \{ // This cannot be constexpr as GCC 4.8 refuses constexpr members of unions. - enum : uint32_t { - kNumUInt32Words = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t)) - }; + static inline constexpr uint32_t kNumUInt32Words = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t)); //! \} @@ -53,6 +51,7 @@ union FixedString { //! \name Utilities //! \{ + [[nodiscard]] inline bool equals(const char* other) const noexcept { return strcmp(str, other) == 0; } //! \} @@ -85,18 +84,13 @@ public: }; //! \cond INTERNAL - enum : uint32_t { - kLayoutSize = 32, - kSSOCapacity = kLayoutSize - 2 - }; + static inline constexpr uint32_t kLayoutSize = 32; + static inline constexpr uint32_t kSSOCapacity = kLayoutSize - 2; - //! String type. - enum Type : uint8_t { - //! Large string (owned by String). - kTypeLarge = 0x1Fu, - //! External string (zone allocated or not owned by String). - kTypeExternal = 0x20u - }; + //! Large string (owned by String). + static inline constexpr uint8_t kTypeLarge = 0x1Fu; + //! External string (zone allocated or not owned by String). + static inline constexpr uint8_t kTypeExternal = 0x20u; union Raw { uint8_t u8[kLayoutSize]; @@ -156,10 +150,16 @@ public: return *this; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator==(const char* other) const noexcept { return equals(other); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const char* other) const noexcept { return !equals(other); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator==(const String& other) const noexcept { return equals(other); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const String& other) const noexcept { return !equals(other); } //! \} @@ -167,25 +167,42 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return _type == kTypeExternal; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isLargeOrExternal() const noexcept { return _type >= kTypeLarge; } //! Tests whether the string is empty. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return size() == 0; } + //! Returns the size of the string. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return isLargeOrExternal() ? size_t(_large.size) : size_t(_type); } + //! Returns the capacity of the string. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return isLargeOrExternal() ? _large.capacity : size_t(kSSOCapacity); } //! Returns the data of the string. + [[nodiscard]] ASMJIT_INLINE_NODEBUG char* data() noexcept { return isLargeOrExternal() ? _large.data : _small.data; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return isLargeOrExternal() ? _large.data : _small.data; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG char* start() noexcept { return data(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* start() const noexcept { return data(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG char* end() noexcept { return data() + size(); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* end() const noexcept { return data() + size(); } //! \} @@ -201,6 +218,7 @@ public: //! Clears the content of the string. ASMJIT_API Error clear() noexcept; + [[nodiscard]] ASMJIT_API char* prepare(ModifyOp op, size_t size) noexcept; ASMJIT_API Error _opString(ModifyOp op, const char* str, size_t size = SIZE_MAX) noexcept; @@ -310,7 +328,10 @@ public: //! Truncate the string length into `newSize`. ASMJIT_API Error truncate(size_t newSize) noexcept; + [[nodiscard]] ASMJIT_API bool equals(const char* other, size_t size = SIZE_MAX) const noexcept; + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool equals(const String& other) const noexcept { return equals(other.data(), other.size()); } //! \} @@ -323,15 +344,18 @@ public: //! \note This is always called internally after an external buffer was released as it zeroes all bytes //! used by String's embedded storage. inline void _resetInternal() noexcept { - for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++) + for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++) { _raw.uptr[i] = 0; + } } inline void _setSize(size_t newSize) noexcept { - if (isLargeOrExternal()) + if (isLargeOrExternal()) { _large.size = newSize; - else + } + else { _small.type = uint8_t(newSize); + } } //! \} diff --git a/src/asmjit/core/support.h b/src/asmjit/core/support.h index c0b39ef..bf25f2b 100644 --- a/src/asmjit/core/support.h +++ b/src/asmjit/core/support.h @@ -25,9 +25,9 @@ namespace Support { // ====================== #if ASMJIT_ARCH_X86 -typedef uint8_t FastUInt8; +using FastUInt8 = uint8_t; #else -typedef uint32_t FastUInt8; +using FastUInt8 = uint32_t; #endif //! \cond INTERNAL @@ -53,63 +53,71 @@ namespace Internal { template struct StdInt {}; // Fail if not specialized. - template<> struct StdInt<1, 0> { typedef int8_t Type; }; - template<> struct StdInt<1, 1> { typedef uint8_t Type; }; - template<> struct StdInt<2, 0> { typedef int16_t Type; }; - template<> struct StdInt<2, 1> { typedef uint16_t Type; }; - template<> struct StdInt<4, 0> { typedef int32_t Type; }; - template<> struct StdInt<4, 1> { typedef uint32_t Type; }; - template<> struct StdInt<8, 0> { typedef int64_t Type; }; - template<> struct StdInt<8, 1> { typedef uint64_t Type; }; + template<> struct StdInt<1, 0> { using Type = int8_t; }; + template<> struct StdInt<1, 1> { using Type = uint8_t; }; + template<> struct StdInt<2, 0> { using Type = int16_t; }; + template<> struct StdInt<2, 1> { using Type = uint16_t; }; + template<> struct StdInt<4, 0> { using Type = int32_t; }; + template<> struct StdInt<4, 1> { using Type = uint32_t; }; + template<> struct StdInt<8, 0> { using Type = int64_t; }; + template<> struct StdInt<8, 1> { using Type = uint64_t; }; - template::value> + template> struct Int32Or64 : public StdInt {}; } //! \endcond -template -static ASMJIT_INLINE_NODEBUG constexpr bool isUnsigned() noexcept { return std::is_unsigned::value; } - //! Casts an integer `x` to either `int32_t` or `int64_t` depending on `T`. template -static ASMJIT_INLINE_NODEBUG constexpr typename Internal::Int32Or64::Type asInt(const T& x) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR typename Internal::Int32Or64::Type asInt(const T& x) noexcept { return (typename Internal::Int32Or64::Type)x; } //! Casts an integer `x` to either `uint32_t` or `uint64_t` depending on `T`. template -static ASMJIT_INLINE_NODEBUG constexpr typename Internal::Int32Or64::Type asUInt(const T& x) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR typename Internal::Int32Or64::Type asUInt(const T& x) noexcept { return (typename Internal::Int32Or64::Type)x; } //! Casts an integer `x` to either `int32_t`, uint32_t`, `int64_t`, or `uint64_t` depending on `T`. template -static ASMJIT_INLINE_NODEBUG constexpr typename Internal::Int32Or64::Type asNormalized(const T& x) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR typename Internal::Int32Or64::Type asNormalized(const T& x) noexcept { return (typename Internal::Int32Or64::Type)x; } //! Casts an integer `x` to the same type as defined by ``. template -static ASMJIT_INLINE_NODEBUG constexpr typename Internal::StdInt()>::Type asStdInt(const T& x) noexcept { - return (typename Internal::StdInt()>::Type)x; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR typename Internal::StdInt>::Type asStdInt(const T& x) noexcept { + return (typename Internal::StdInt>::Type)x; } //! A helper class that can be used to iterate over enum values. template struct EnumValues { - typedef typename std::underlying_type::type ValueType; + using ValueType = std::underlying_type_t; struct Iterator { ValueType value; + [[nodiscard]] ASMJIT_INLINE_NODEBUG T operator*() const { return (T)value; } ASMJIT_INLINE_NODEBUG void operator++() { ++value; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator==(const Iterator& other) const noexcept { return value == other.value; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const Iterator& other) const noexcept { return value != other.value; } }; + [[nodiscard]] ASMJIT_INLINE_NODEBUG Iterator begin() const noexcept { return Iterator{ValueType(from)}; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG Iterator end() const noexcept { return Iterator{ValueType(to) + 1}; } }; @@ -137,74 +145,85 @@ static ASMJIT_INLINE_NODEBUG Dst bitCast(const Src& x) noexcept { return Interna // ================ //! Storage used to store a pack of bits (should by compatible with a machine word). -typedef Internal::StdInt::Type BitWord; +using BitWord = Internal::StdInt::Type; template -static ASMJIT_INLINE_NODEBUG constexpr uint32_t bitSizeOf() noexcept { return uint32_t(sizeof(T) * 8u); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR uint32_t bitSizeOf() noexcept { return uint32_t(sizeof(T) * 8u); } //! Number of bits stored in a single `BitWord`. static constexpr uint32_t kBitWordSizeInBits = bitSizeOf(); //! Returns `0 - x` in a safe way (no undefined behavior), works for unsigned numbers as well. template -static ASMJIT_INLINE_NODEBUG constexpr T neg(const T& x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T neg(const T& x) noexcept { + using U = std::make_unsigned_t; return T(U(0) - U(x)); } template -static ASMJIT_INLINE_NODEBUG constexpr T allOnes() noexcept { return neg(T(1)); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T allOnes() noexcept { return neg(T(1)); } //! Returns `x << y` (shift left logical) by explicitly casting `x` to an unsigned type and back. template -static ASMJIT_INLINE_NODEBUG constexpr X shl(const X& x, const Y& y) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR X shl(const X& x, const Y& y) noexcept { + using U = std::make_unsigned_t; return X(U(x) << y); } //! Returns `x >> y` (shift right logical) by explicitly casting `x` to an unsigned type and back. template -static ASMJIT_INLINE_NODEBUG constexpr X shr(const X& x, const Y& y) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR X shr(const X& x, const Y& y) noexcept { + using U = std::make_unsigned_t; return X(U(x) >> y); } //! Returns `x >> y` (shift right arithmetic) by explicitly casting `x` to a signed type and back. template -static ASMJIT_INLINE_NODEBUG constexpr X sar(const X& x, const Y& y) noexcept { - typedef typename std::make_signed::type S; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR X sar(const X& x, const Y& y) noexcept { + using S = std::make_signed_t; return X(S(x) >> y); } template -static ASMJIT_INLINE_NODEBUG constexpr X ror(const X& x, const Y& y) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR X ror(const X& x, const Y& y) noexcept { + using U = std::make_unsigned_t; return X((U(x) >> y) | (U(x) << (bitSizeOf() - U(y)))); } //! Returns `x | (x >> y)` - helper used by some bit manipulation helpers. template -static ASMJIT_INLINE_NODEBUG constexpr X or_shr(const X& x, const Y& y) noexcept { return X(x | shr(x, y)); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR X or_shr(const X& x, const Y& y) noexcept { return X(x | shr(x, y)); } //! Returns `x & -x` - extracts lowest set isolated bit (like BLSI instruction). template -static ASMJIT_INLINE_NODEBUG constexpr T blsi(T x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T blsi(T x) noexcept { + using U = std::make_unsigned_t; return T(U(x) & neg(U(x))); } //! Tests whether the given value `x` has `n`th bit set. template -static ASMJIT_INLINE_NODEBUG constexpr bool bitTest(T x, IndexT n) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool bitTest(T x, IndexT n) noexcept { + using U = std::make_unsigned_t; return (U(x) & (U(1) << asStdInt(n))) != 0; } // Tests whether the given `value` is a consecutive mask of bits that starts at // the least significant bit. template -static ASMJIT_INLINE_NODEBUG constexpr bool isLsbMask(const T& value) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isLsbMask(const T& value) noexcept { + using U = std::make_unsigned_t; return value && ((U(value) + 1u) & U(value)) == 0; } @@ -214,15 +233,17 @@ static ASMJIT_INLINE_NODEBUG constexpr bool isLsbMask(const T& value) noexcept { // This function is similar to \ref isLsbMask(), but the mask doesn't have to // start at a least significant bit. template -static ASMJIT_INLINE_NODEBUG constexpr bool isConsecutiveMask(const T& value) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isConsecutiveMask(const T& value) noexcept { + using U = std::make_unsigned_t; return value && isLsbMask((U(value) - 1u) | U(value)); } //! Generates a trailing bit-mask that has `n` least significant (trailing) bits set. template -static ASMJIT_INLINE_NODEBUG constexpr T lsbMask(const CountT& n) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T lsbMask(const CountT& n) noexcept { + using U = std::make_unsigned_t; return (sizeof(U) < sizeof(uintptr_t)) // Prevent undefined behavior by using a larger type than T. ? T(U((uintptr_t(1) << n) - uintptr_t(1))) @@ -232,8 +253,9 @@ static ASMJIT_INLINE_NODEBUG constexpr T lsbMask(const CountT& n) noexcept { //! Generates a leading bit-mask that has `n` most significant (leading) bits set. template -static ASMJIT_INLINE_NODEBUG constexpr T msbMask(const CountT& n) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T msbMask(const CountT& n) noexcept { + using U = std::make_unsigned_t; return (sizeof(U) < sizeof(uintptr_t)) // Prevent undefined behavior by using a larger type than T. ? T(allOnes() >> (bitSizeOf() - n)) @@ -243,40 +265,52 @@ static ASMJIT_INLINE_NODEBUG constexpr T msbMask(const CountT& n) noexcept { //! Returns a bit-mask that has `x` bit set. template -static ASMJIT_INLINE_NODEBUG constexpr uint32_t bitMask(const Index& x) noexcept { return (1u << asUInt(x)); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR uint32_t bitMask(const Index& x) noexcept { return (1u << asUInt(x)); } //! Returns a bit-mask that has `x` bit set (multiple arguments). template -static ASMJIT_INLINE_NODEBUG constexpr uint32_t bitMask(const Index& x, Args... args) noexcept { return bitMask(x) | bitMask(args...); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR uint32_t bitMask(const Index& x, Args... args) noexcept { return bitMask(x) | bitMask(args...); } //! Converts a boolean value `b` to zero or full mask (all bits set). template -static ASMJIT_INLINE_NODEBUG constexpr DstT bitMaskFromBool(SrcT b) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR DstT bitMaskFromBool(SrcT b) noexcept { + using U = std::make_unsigned_t; return DstT(U(0) - U(b)); } //! Tests whether `a & b` is non-zero. template +[[nodiscard]] static inline constexpr bool test(A a, B b) noexcept { return (asUInt(a) & asUInt(b)) != 0; } //! \cond namespace Internal { // Fills all trailing bits right from the first most significant bit set. - static ASMJIT_INLINE_NODEBUG constexpr uint8_t fillTrailingBitsImpl(uint8_t x) noexcept { return or_shr(or_shr(or_shr(x, 1), 2), 4); } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR uint8_t fillTrailingBitsImpl(uint8_t x) noexcept { return or_shr(or_shr(or_shr(x, 1), 2), 4); } + // Fills all trailing bits right from the first most significant bit set. - static ASMJIT_INLINE_NODEBUG constexpr uint16_t fillTrailingBitsImpl(uint16_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8); } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR uint16_t fillTrailingBitsImpl(uint16_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8); } + // Fills all trailing bits right from the first most significant bit set. - static ASMJIT_INLINE_NODEBUG constexpr uint32_t fillTrailingBitsImpl(uint32_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8), 16); } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR uint32_t fillTrailingBitsImpl(uint32_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8), 16); } + // Fills all trailing bits right from the first most significant bit set. - static ASMJIT_INLINE_NODEBUG constexpr uint64_t fillTrailingBitsImpl(uint64_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8), 16), 32); } + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR uint64_t fillTrailingBitsImpl(uint64_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8), 16), 32); } } //! \endcond // Fills all trailing bits right from the first most significant bit set. template -static ASMJIT_INLINE_NODEBUG constexpr T fillTrailingBits(const T& x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T fillTrailingBits(const T& x) noexcept { + using U = std::make_unsigned_t; return T(Internal::fillTrailingBitsImpl(U(x))); } @@ -292,59 +326,95 @@ struct BitScanData { T x; uint32_t n; }; template struct BitScanCalc { - static ASMJIT_INLINE_NODEBUG constexpr BitScanData advanceLeft(const BitScanData& data, uint32_t n) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR BitScanData advanceLeft(const BitScanData& data, uint32_t n) noexcept { return BitScanData { data.x << n, data.n + n }; } - static ASMJIT_INLINE_NODEBUG constexpr BitScanData advanceRight(const BitScanData& data, uint32_t n) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR BitScanData advanceRight(const BitScanData& data, uint32_t n) noexcept { return BitScanData { data.x >> n, data.n + n }; } - static ASMJIT_INLINE_NODEBUG constexpr BitScanData clz(const BitScanData& data) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR BitScanData clz(const BitScanData& data) noexcept { return BitScanCalc::clz(advanceLeft(data, data.x & (allOnes() << (bitSizeOf() - N)) ? uint32_t(0) : N)); } - static ASMJIT_INLINE_NODEBUG constexpr BitScanData ctz(const BitScanData& data) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR BitScanData ctz(const BitScanData& data) noexcept { return BitScanCalc::ctz(advanceRight(data, data.x & (allOnes() >> (bitSizeOf() - N)) ? uint32_t(0) : N)); } }; template struct BitScanCalc { - static ASMJIT_INLINE_NODEBUG constexpr BitScanData clz(const BitScanData& ctx) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR BitScanData clz(const BitScanData& ctx) noexcept { return BitScanData { 0, ctx.n - uint32_t(ctx.x >> (bitSizeOf() - 1)) }; } - static ASMJIT_INLINE_NODEBUG constexpr BitScanData ctz(const BitScanData& ctx) noexcept { + [[nodiscard]] + static ASMJIT_INLINE_CONSTEXPR BitScanData ctz(const BitScanData& ctx) noexcept { return BitScanData { 0, ctx.n - uint32_t(ctx.x & 0x1) }; } }; template -ASMJIT_INLINE_NODEBUG constexpr uint32_t clzFallback(const T& x) noexcept { +[[nodiscard]] +ASMJIT_INLINE_CONSTEXPR uint32_t clzFallback(const T& x) noexcept { return BitScanCalc() / 2u>::clz(BitScanData{x, 1}).n; } template -ASMJIT_INLINE_NODEBUG constexpr uint32_t ctzFallback(const T& x) noexcept { +[[nodiscard]] +ASMJIT_INLINE_CONSTEXPR uint32_t ctzFallback(const T& x) noexcept { return BitScanCalc() / 2u>::ctz(BitScanData{x, 1}).n; } -template ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const T& x) noexcept { return clzFallback(asUInt(x)); } -template ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const T& x) noexcept { return ctzFallback(asUInt(x)); } +template +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const T& x) noexcept { return clzFallback(asUInt(x)); } + +template +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const T& x) noexcept { return ctzFallback(asUInt(x)); } #if !defined(ASMJIT_NO_INTRINSICS) # if defined(__GNUC__) -template<> ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const uint32_t& x) noexcept { return uint32_t(__builtin_clz(x)); } -template<> ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const uint64_t& x) noexcept { return uint32_t(__builtin_clzll(x)); } -template<> ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint32_t& x) noexcept { return uint32_t(__builtin_ctz(x)); } -template<> ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint64_t& x) noexcept { return uint32_t(__builtin_ctzll(x)); } +template<> +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const uint32_t& x) noexcept { return uint32_t(__builtin_clz(x)); } + +template<> +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const uint64_t& x) noexcept { return uint32_t(__builtin_clzll(x)); } + +template<> +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint32_t& x) noexcept { return uint32_t(__builtin_ctz(x)); } + +template<> +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint64_t& x) noexcept { return uint32_t(__builtin_ctzll(x)); } + # elif defined(_MSC_VER) -template<> ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const uint32_t& x) noexcept { unsigned long i; _BitScanReverse(&i, x); return uint32_t(i ^ 31); } -template<> ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint32_t& x) noexcept { unsigned long i; _BitScanForward(&i, x); return uint32_t(i); } +template<> +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const uint32_t& x) noexcept { unsigned long i; _BitScanReverse(&i, x); return uint32_t(i ^ 31); } + +template<> +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint32_t& x) noexcept { unsigned long i; _BitScanForward(&i, x); return uint32_t(i); } + # if ASMJIT_ARCH_X86 == 64 || ASMJIT_ARCH_ARM == 64 -template<> ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const uint64_t& x) noexcept { unsigned long i; _BitScanReverse64(&i, x); return uint32_t(i ^ 63); } -template<> ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint64_t& x) noexcept { unsigned long i; _BitScanForward64(&i, x); return uint32_t(i); } +template<> +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t clzImpl(const uint64_t& x) noexcept { unsigned long i; _BitScanReverse64(&i, x); return uint32_t(i ^ 63); } + +template<> +[[nodiscard]] +ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint64_t& x) noexcept { unsigned long i; _BitScanForward64(&i, x); return uint32_t(i); } # endif # endif #endif @@ -357,17 +427,19 @@ template<> ASMJIT_INLINE_NODEBUG uint32_t ctzImpl(const uint64_t& x) noexcept { //! //! \note The input MUST NOT be zero, otherwise the result is undefined. template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t clz(T x) noexcept { return Internal::clzImpl(asUInt(x)); } //! Count trailing zeros in `x` (returns a position of a first bit set in `x`). //! //! \note The input MUST NOT be zero, otherwise the result is undefined. template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t ctz(T x) noexcept { return Internal::ctzImpl(asUInt(x)); } template struct ConstCTZ { - static constexpr uint32_t value = + static inline constexpr uint32_t value = (kInput & (uint64_t(1) << 0)) ? 0 : (kInput & (uint64_t(1) << 1)) ? 1 : (kInput & (uint64_t(1) << 2)) ? 2 : @@ -450,12 +522,14 @@ struct ConstCTZ { //! \cond namespace Internal { + [[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t constPopcntImpl(uint32_t x) noexcept { x = x - ((x >> 1) & 0x55555555u); x = (x & 0x33333333u) + ((x >> 2) & 0x33333333u); return (((x + (x >> 4)) & 0x0F0F0F0Fu) * 0x01010101u) >> 24; } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t constPopcntImpl(uint64_t x) noexcept { #if ASMJIT_ARCH_BITS >= 64 x = x - ((x >> 1) & 0x5555555555555555u); @@ -467,6 +541,7 @@ namespace Internal { #endif } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t popcntImpl(uint32_t x) noexcept { #if defined(__GNUC__) return uint32_t(__builtin_popcount(x)); @@ -475,6 +550,7 @@ namespace Internal { #endif } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t popcntImpl(uint64_t x) noexcept { #if defined(__GNUC__) return uint32_t(__builtin_popcountll(x)); @@ -487,10 +563,12 @@ namespace Internal { //! Calculates count of bits in `x`. template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t popcnt(T x) noexcept { return Internal::popcntImpl(asUInt(x)); } //! Calculates count of bits in `x` (useful in constant expressions). template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t constPopcnt(T x) noexcept { return Internal::constPopcntImpl(asUInt(x)); } // Support - Min/Max @@ -501,16 +579,20 @@ static ASMJIT_INLINE_NODEBUG uint32_t constPopcnt(T x) noexcept { return Interna // a reference to `a` or `b` but it's a new value instead. template -static ASMJIT_INLINE_NODEBUG constexpr T min(const T& a, const T& b) noexcept { return b < a ? b : a; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T min(const T& a, const T& b) noexcept { return b < a ? b : a; } template -static ASMJIT_INLINE_NODEBUG constexpr T min(const T& a, const T& b, Args&&... args) noexcept { return min(min(a, b), std::forward(args)...); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T min(const T& a, const T& b, Args&&... args) noexcept { return min(min(a, b), std::forward(args)...); } template -static ASMJIT_INLINE_NODEBUG constexpr T max(const T& a, const T& b) noexcept { return a < b ? b : a; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T max(const T& a, const T& b) noexcept { return a < b ? b : a; } template -static ASMJIT_INLINE_NODEBUG constexpr T max(const T& a, const T& b, Args&&... args) noexcept { return max(max(a, b), std::forward(args)...); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T max(const T& a, const T& b, Args&&... args) noexcept { return max(max(a, b), std::forward(args)...); } // Support - Immediate Helpers // =========================== @@ -519,7 +601,7 @@ namespace Internal { template struct ImmConv { static ASMJIT_INLINE_NODEBUG int64_t fromT(const T& x) noexcept { return int64_t(x); } - static ASMJIT_INLINE_NODEBUG T toT(int64_t x) noexcept { return T(uint64_t(x) & Support::allOnes::type>()); } + static ASMJIT_INLINE_NODEBUG T toT(int64_t x) noexcept { return T(uint64_t(x) & Support::allOnes>()); } }; template @@ -530,10 +612,12 @@ namespace Internal { } template -static ASMJIT_INLINE_NODEBUG int64_t immediateFromT(const T& x) noexcept { return Internal::ImmConv::value>::fromT(x); } +[[nodiscard]] +static ASMJIT_INLINE_NODEBUG int64_t immediateFromT(const T& x) noexcept { return Internal::ImmConv>::fromT(x); } template -static ASMJIT_INLINE_NODEBUG T immediateToT(int64_t x) noexcept { return Internal::ImmConv::value>::toT(x); } +[[nodiscard]] +static ASMJIT_INLINE_NODEBUG T immediateToT(int64_t x) noexcept { return Internal::ImmConv>::toT(x); } // Support - Overflow Arithmetic // ============================= @@ -542,29 +626,29 @@ static ASMJIT_INLINE_NODEBUG T immediateToT(int64_t x) noexcept { return Interna namespace Internal { template inline T addOverflowFallback(T x, T y, FastUInt8* of) noexcept { - typedef typename std::make_unsigned::type U; + using U = std::make_unsigned_t; U result = U(U(x) + U(y)); - *of = FastUInt8(*of | FastUInt8(isUnsigned() ? result < U(x) : T((U(x) ^ ~U(y)) & (U(x) ^ result)) < 0)); + *of = FastUInt8(*of | FastUInt8(std::is_unsigned_v ? result < U(x) : T((U(x) ^ ~U(y)) & (U(x) ^ result)) < 0)); return T(result); } template inline T subOverflowFallback(T x, T y, FastUInt8* of) noexcept { - typedef typename std::make_unsigned::type U; + using U = std::make_unsigned_t; U result = U(x) - U(y); - *of = FastUInt8(*of | FastUInt8(isUnsigned() ? result > U(x) : T((U(x) ^ U(y)) & (U(x) ^ result)) < 0)); + *of = FastUInt8(*of | FastUInt8(std::is_unsigned_v ? result > U(x) : T((U(x) ^ U(y)) & (U(x) ^ result)) < 0)); return T(result); } template inline T mulOverflowFallback(T x, T y, FastUInt8* of) noexcept { - typedef typename Internal::StdInt()>::Type I; - typedef typename std::make_unsigned::type U; + using I = typename Internal::StdInt>::Type; + using U = std::make_unsigned_t; U mask = allOnes(); - if (std::is_signed::value) { + if constexpr (std::is_signed_v) { U prod = U(I(x)) * U(I(y)); *of = FastUInt8(*of | FastUInt8(I(prod) < I(std::numeric_limits::lowest()) || I(prod) > I(std::numeric_limits::max()))); return T(I(prod & mask)); @@ -596,7 +680,6 @@ namespace Internal { template inline T mulOverflowImpl(const T& x, const T& y, FastUInt8* of) noexcept { return mulOverflowFallback(x, y, of); } #if defined(__GNUC__) && !defined(ASMJIT_NO_INTRINSICS) -#if defined(__clang__) || __GNUC__ >= 5 #define ASMJIT_ARITH_OVERFLOW_SPECIALIZE(FUNC, T, RESULT_T, BUILTIN) \ template<> \ inline T FUNC(const T& x, const T& y, FastUInt8* of) noexcept { \ @@ -617,7 +700,6 @@ namespace Internal { ASMJIT_ARITH_OVERFLOW_SPECIALIZE(mulOverflowImpl, int64_t , long long , __builtin_smulll_overflow) ASMJIT_ARITH_OVERFLOW_SPECIALIZE(mulOverflowImpl, uint64_t, unsigned long long, __builtin_umulll_overflow) #undef ASMJIT_ARITH_OVERFLOW_SPECIALIZE -#endif #endif // There is a bug in MSVC that makes these specializations unusable, maybe in the future... @@ -653,53 +735,58 @@ static inline T mulOverflow(const T& x, const T& y, FastUInt8* of) noexcept { re // =================== template -static ASMJIT_INLINE_NODEBUG constexpr bool isAligned(X base, Y alignment) noexcept { - typedef typename Internal::StdInt::Type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isAligned(X base, Y alignment) noexcept { + using U = typename Internal::StdInt::Type; return ((U)base % (U)alignment) == 0; } //! Tests whether the `x` is a power of two (only one bit is set). template -static ASMJIT_INLINE_NODEBUG constexpr bool isPowerOf2(T x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isPowerOf2(T x) noexcept { + using U = std::make_unsigned_t; return x && !(U(x) & (U(x) - U(1))); } template -static ASMJIT_INLINE_NODEBUG constexpr X alignUp(X x, Y alignment) noexcept { - typedef typename Internal::StdInt::Type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR X alignUp(X x, Y alignment) noexcept { + using U = typename Internal::StdInt::Type; return (X)( ((U)x + ((U)(alignment) - 1u)) & ~((U)(alignment) - 1u) ); } template -static ASMJIT_INLINE_NODEBUG constexpr T alignUpPowerOf2(T x) noexcept { - typedef typename Internal::StdInt::Type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T alignUpPowerOf2(T x) noexcept { + using U = typename Internal::StdInt::Type; return (T)(fillTrailingBits(U(x) - 1u) + 1u); } -//! Returns either zero or a positive difference between `base` and `base` when -//! aligned to `alignment`. +//! Returns either zero or a positive difference between `base` and `base` when aligned to `alignment`. template -static ASMJIT_INLINE_NODEBUG constexpr typename Internal::StdInt::Type alignUpDiff(X base, Y alignment) noexcept { - typedef typename Internal::StdInt::Type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR typename Internal::StdInt::Type alignUpDiff(X base, Y alignment) noexcept { + using U = typename Internal::StdInt::Type; return alignUp(U(base), alignment) - U(base); } template -static ASMJIT_INLINE_NODEBUG constexpr X alignDown(X x, Y alignment) noexcept { - typedef typename Internal::StdInt::Type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR X alignDown(X x, Y alignment) noexcept { + using U = typename Internal::StdInt::Type; return (X)( (U)x & ~((U)(alignment) - 1u) ); } // Support - NumGranularized // ========================= -//! Calculates the number of elements that would be required if `base` is -//! granularized by `granularity`. This function can be used to calculate -//! the number of BitWords to represent N bits, for example. +//! Calculates the number of elements that would be required if `base` is granularized by `granularity`. +//! This function can be used to calculate the number of BitWords to represent N bits, for example. template -static ASMJIT_INLINE_NODEBUG constexpr X numGranularized(X base, Y granularity) noexcept { - typedef typename Internal::StdInt::Type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR X numGranularized(X base, Y granularity) noexcept { + using U = typename Internal::StdInt::Type; return X((U(base) + U(granularity) - 1) / U(granularity)); } @@ -708,7 +795,8 @@ static ASMJIT_INLINE_NODEBUG constexpr X numGranularized(X base, Y granularity) //! Checks whether `x` is greater than or equal to `a` and lesser than or equal to `b`. template -static ASMJIT_INLINE_NODEBUG constexpr bool isBetween(const T& x, const T& a, const T& b) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isBetween(const T& x, const T& a, const T& b) noexcept { return x >= a && x <= b; } @@ -717,127 +805,142 @@ static ASMJIT_INLINE_NODEBUG constexpr bool isBetween(const T& x, const T& a, co //! Checks whether the given integer `x` can be casted to a 4-bit signed integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isInt4(T x) noexcept { - typedef typename std::make_signed::type S; - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt4(T x) noexcept { + using S = std::make_signed_t; + using U = std::make_unsigned_t; - return std::is_signed::value ? isBetween(S(x), -8, 7) : U(x) <= U(7u); + return std::is_signed_v ? isBetween(S(x), -8, 7) : U(x) <= U(7u); } //! Checks whether the given integer `x` can be casted to a 7-bit signed integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isInt7(T x) noexcept { - typedef typename std::make_signed::type S; - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt7(T x) noexcept { + using S = std::make_signed_t; + using U = std::make_unsigned_t; - return std::is_signed::value ? isBetween(S(x), -64, 63) : U(x) <= U(63u); + return std::is_signed_v ? isBetween(S(x), -64, 63) : U(x) <= U(63u); } //! Checks whether the given integer `x` can be casted to an 8-bit signed integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isInt8(T x) noexcept { - typedef typename std::make_signed::type S; - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt8(T x) noexcept { + using S = std::make_signed_t; + using U = std::make_unsigned_t; - return std::is_signed::value ? sizeof(T) <= 1 || isBetween(S(x), -128, 127) : U(x) <= U(127u); + return std::is_signed_v ? sizeof(T) <= 1 || isBetween(S(x), -128, 127) : U(x) <= U(127u); } //! Checks whether the given integer `x` can be casted to a 9-bit signed integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isInt9(T x) noexcept { - typedef typename std::make_signed::type S; - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt9(T x) noexcept { + using S = std::make_signed_t; + using U = std::make_unsigned_t; - return std::is_signed::value ? sizeof(T) <= 1 || isBetween(S(x), -256, 255) - : sizeof(T) <= 1 || U(x) <= U(255u); + return std::is_signed_v ? sizeof(T) <= 1 || isBetween(S(x), -256, 255) + : sizeof(T) <= 1 || U(x) <= U(255u); } //! Checks whether the given integer `x` can be casted to a 10-bit signed integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isInt10(T x) noexcept { - typedef typename std::make_signed::type S; - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt10(T x) noexcept { + using S = std::make_signed_t; + using U = std::make_unsigned_t; - return std::is_signed::value ? sizeof(T) <= 1 || isBetween(S(x), -512, 511) - : sizeof(T) <= 1 || U(x) <= U(511u); + return std::is_signed_v ? sizeof(T) <= 1 || isBetween(S(x), -512, 511) + : sizeof(T) <= 1 || U(x) <= U(511u); } //! Checks whether the given integer `x` can be casted to a 16-bit signed integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isInt16(T x) noexcept { - typedef typename std::make_signed::type S; - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt16(T x) noexcept { + using S = std::make_signed_t; + using U = std::make_unsigned_t; - return std::is_signed::value ? sizeof(T) <= 2 || isBetween(S(x), -32768, 32767) - : sizeof(T) <= 1 || U(x) <= U(32767u); + return std::is_signed_v ? sizeof(T) <= 2 || isBetween(S(x), -32768, 32767) + : sizeof(T) <= 1 || U(x) <= U(32767u); } //! Checks whether the given integer `x` can be casted to a 32-bit signed integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isInt32(T x) noexcept { - typedef typename std::make_signed::type S; - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt32(T x) noexcept { + using S = std::make_signed_t; + using U = std::make_unsigned_t; - return std::is_signed::value ? sizeof(T) <= 4 || isBetween(S(x), -2147483647 - 1, 2147483647) - : sizeof(T) <= 2 || U(x) <= U(2147483647u); + return std::is_signed_v ? sizeof(T) <= 4 || isBetween(S(x), -2147483647 - 1, 2147483647) + : sizeof(T) <= 2 || U(x) <= U(2147483647u); } //! Checks whether the given integer `x` can be casted to a 4-bit unsigned integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt4(T x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt4(T x) noexcept { + using U = std::make_unsigned_t; - return std::is_signed::value ? x >= T(0) && x <= T(15) - : U(x) <= U(15u); + return std::is_signed_v ? x >= T(0) && x <= T(15) + : U(x) <= U(15u); } //! Checks whether the given integer `x` can be casted to an 8-bit unsigned integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt8(T x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt8(T x) noexcept { + using U = std::make_unsigned_t; - return std::is_signed::value ? (sizeof(T) <= 1 || T(x) <= T(255)) && x >= T(0) - : (sizeof(T) <= 1 || U(x) <= U(255u)); + return std::is_signed_v ? (sizeof(T) <= 1 || T(x) <= T(255)) && x >= T(0) + : (sizeof(T) <= 1 || U(x) <= U(255u)); } //! Checks whether the given integer `x` can be casted to a 12-bit unsigned integer (ARM specific). template -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt12(T x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt12(T x) noexcept { + using U = std::make_unsigned_t; - return std::is_signed::value ? (sizeof(T) <= 1 || T(x) <= T(4095)) && x >= T(0) - : (sizeof(T) <= 1 || U(x) <= U(4095u)); + return std::is_signed_v ? (sizeof(T) <= 1 || T(x) <= T(4095)) && x >= T(0) + : (sizeof(T) <= 1 || U(x) <= U(4095u)); } //! Checks whether the given integer `x` can be casted to a 16-bit unsigned integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt16(T x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt16(T x) noexcept { + using U = std::make_unsigned_t; - return std::is_signed::value ? (sizeof(T) <= 2 || T(x) <= T(65535)) && x >= T(0) - : (sizeof(T) <= 2 || U(x) <= U(65535u)); + return std::is_signed_v ? (sizeof(T) <= 2 || T(x) <= T(65535)) && x >= T(0) + : (sizeof(T) <= 2 || U(x) <= U(65535u)); } //! Checks whether the given integer `x` can be casted to a 32-bit unsigned integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt32(T x) noexcept { - typedef typename std::make_unsigned::type U; +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt32(T x) noexcept { + using U = std::make_unsigned_t; - return std::is_signed::value ? (sizeof(T) <= 4 || T(x) <= T(4294967295u)) && x >= T(0) - : (sizeof(T) <= 4 || U(x) <= U(4294967295u)); + return std::is_signed_v ? (sizeof(T) <= 4 || T(x) <= T(4294967295u)) && x >= T(0) + : (sizeof(T) <= 4 || U(x) <= U(4294967295u)); } //! Checks whether the given integer `x` can be casted to a 32-bit unsigned integer. template -static ASMJIT_INLINE_NODEBUG constexpr bool isIntOrUInt32(T x) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isIntOrUInt32(T x) noexcept { return sizeof(T) <= 4 ? true : (uint32_t(uint64_t(x) >> 32) + 1u) <= 1u; } +[[nodiscard]] static bool ASMJIT_INLINE_NODEBUG isEncodableOffset32(int32_t offset, uint32_t nBits) noexcept { uint32_t nRev = 32 - nBits; return Support::sar(Support::shl(offset, nRev), nRev) == offset; } +[[nodiscard]] static bool ASMJIT_INLINE_NODEBUG isEncodableOffset64(int64_t offset, uint32_t nBits) noexcept { uint32_t nRev = 64 - nBits; return Support::sar(Support::shl(offset, nRev), nRev) == offset; @@ -846,14 +949,17 @@ static bool ASMJIT_INLINE_NODEBUG isEncodableOffset64(int64_t offset, uint32_t n // Support - ByteSwap // ================== +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint16_t byteswap16(uint16_t x) noexcept { return uint16_t(((x >> 8) & 0xFFu) | ((x & 0xFFu) << 8)); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t byteswap32(uint32_t x) noexcept { return (x << 24) | (x >> 24) | ((x << 8) & 0x00FF0000u) | ((x >> 8) & 0x0000FF00); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint64_t byteswap64(uint64_t x) noexcept { #if (defined(__GNUC__) || defined(__clang__)) && !defined(ASMJIT_NO_INTRINSICS) return uint64_t(__builtin_bswap64(uint64_t(x))); @@ -869,19 +975,24 @@ static ASMJIT_INLINE_NODEBUG uint64_t byteswap64(uint64_t x) noexcept { // =========================== //! Pack four 8-bit integer into a 32-bit integer as it is an array of `{b0,b1,b2,b3}`. -static ASMJIT_INLINE_NODEBUG constexpr uint32_t bytepack32_4x8(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR uint32_t bytepack32_4x8(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept { return ASMJIT_ARCH_LE ? (a | (b << 8) | (c << 16) | (d << 24)) : (d | (c << 8) | (b << 16) | (a << 24)); } template -static ASMJIT_INLINE_NODEBUG constexpr uint32_t unpackU32At0(T x) noexcept { return ASMJIT_ARCH_LE ? uint32_t(uint64_t(x) & 0xFFFFFFFFu) : uint32_t(uint64_t(x) >> 32); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR uint32_t unpackU32At0(T x) noexcept { return ASMJIT_ARCH_LE ? uint32_t(uint64_t(x) & 0xFFFFFFFFu) : uint32_t(uint64_t(x) >> 32); } + template -static ASMJIT_INLINE_NODEBUG constexpr uint32_t unpackU32At1(T x) noexcept { return ASMJIT_ARCH_BE ? uint32_t(uint64_t(x) & 0xFFFFFFFFu) : uint32_t(uint64_t(x) >> 32); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR uint32_t unpackU32At1(T x) noexcept { return ASMJIT_ARCH_BE ? uint32_t(uint64_t(x) & 0xFFFFFFFFu) : uint32_t(uint64_t(x) >> 32); } // Support - Position of byte (in bit-shift) // ========================================= +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t byteShiftOfDWordStruct(uint32_t index) noexcept { return ASMJIT_ARCH_LE ? index * 8 : (uint32_t(sizeof(uint32_t)) - 1u - index) * 8; } @@ -890,11 +1001,14 @@ static ASMJIT_INLINE_NODEBUG uint32_t byteShiftOfDWordStruct(uint32_t index) noe // ========================== template -static ASMJIT_INLINE_NODEBUG constexpr T asciiToLower(T c) noexcept { return T(c ^ T(T(c >= T('A') && c <= T('Z')) << 5)); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T asciiToLower(T c) noexcept { return T(c ^ T(T(c >= T('A') && c <= T('Z')) << 5)); } template -static ASMJIT_INLINE_NODEBUG constexpr T asciiToUpper(T c) noexcept { return T(c ^ T(T(c >= T('a') && c <= T('z')) << 5)); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR T asciiToUpper(T c) noexcept { return T(c ^ T(T(c >= T('a') && c <= T('z')) << 5)); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG size_t strLen(const char* s, size_t maxSize) noexcept { size_t i = 0; while (i < maxSize && s[i] != '\0') @@ -902,11 +1016,13 @@ static ASMJIT_INLINE_NODEBUG size_t strLen(const char* s, size_t maxSize) noexce return i; } -static ASMJIT_INLINE_NODEBUG constexpr uint32_t hashRound(uint32_t hash, uint32_t c) noexcept { return hash * 65599 + c; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR uint32_t hashRound(uint32_t hash, uint32_t c) noexcept { return hash * 65599 + c; } // Gets a hash of the given string `data` of size `size`. Size must be valid // as this function doesn't check for a null terminator and allows it in the // middle of the string. +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t hashString(const char* data, size_t size) noexcept { uint32_t hashCode = 0; for (uint32_t i = 0; i < size; i++) @@ -914,6 +1030,7 @@ static ASMJIT_INLINE_NODEBUG uint32_t hashString(const char* data, size_t size) return hashCode; } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG const char* findPackedString(const char* p, uint32_t id) noexcept { uint32_t i = 0; while (i < id) { @@ -926,7 +1043,8 @@ static ASMJIT_INLINE_NODEBUG const char* findPackedString(const char* p, uint32_ } //! Compares two string views. -static ASMJIT_FORCE_INLINE int compareStringViews(const char* aData, size_t aSize, const char* bData, size_t bSize) noexcept { +[[nodiscard]] +static ASMJIT_INLINE int compareStringViews(const char* aData, size_t aSize, const char* bData, size_t bSize) noexcept { size_t size = Support::min(aSize, bSize); for (size_t i = 0; i < size; i++) { @@ -941,48 +1059,74 @@ static ASMJIT_FORCE_INLINE int compareStringViews(const char* aData, size_t aSiz // Support - Memory Read Access - 8 Bits // ===================================== +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint8_t readU8(const void* p) noexcept { return static_cast(p)[0]; } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int8_t readI8(const void* p) noexcept { return static_cast(p)[0]; } // Support - Memory Read Access - 16 Bits // ====================================== template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint16_t readU16x(const void* p) noexcept { - typedef typename Internal::AliasedUInt::T U16AlignedToN; + using U16AlignedToN = typename Internal::AliasedUInt::T; uint16_t x = static_cast(p)[0]; return BO == ByteOrder::kNative ? x : byteswap16(x); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint16_t readU16u(const void* p) noexcept { return readU16x(p); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint16_t readU16uLE(const void* p) noexcept { return readU16x(p); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint16_t readU16uBE(const void* p) noexcept { return readU16x(p); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint16_t readU16a(const void* p) noexcept { return readU16x(p); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint16_t readU16aLE(const void* p) noexcept { return readU16x(p); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint16_t readU16aBE(const void* p) noexcept { return readU16x(p); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int16_t readI16x(const void* p) noexcept { return int16_t(readU16x(p)); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int16_t readI16u(const void* p) noexcept { return int16_t(readU16x(p)); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int16_t readI16uLE(const void* p) noexcept { return int16_t(readU16x(p)); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int16_t readI16uBE(const void* p) noexcept { return int16_t(readU16x(p)); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int16_t readI16a(const void* p) noexcept { return int16_t(readU16x(p)); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int16_t readI16aLE(const void* p) noexcept { return int16_t(readU16x(p)); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int16_t readI16aBE(const void* p) noexcept { return int16_t(readU16x(p)); } // Support - Memory Read Access - 24 Bits // ====================================== template +[[nodiscard]] static inline uint32_t readU24u(const void* p) noexcept { uint32_t b0 = readU8(static_cast(p) + (BO == ByteOrder::kLE ? 2u : 0u)); uint32_t b1 = readU8(static_cast(p) + 1u); @@ -990,77 +1134,124 @@ static inline uint32_t readU24u(const void* p) noexcept { return (b0 << 16) | (b1 << 8) | b2; } +[[nodiscard]] static inline uint32_t readU24uLE(const void* p) noexcept { return readU24u(p); } + +[[nodiscard]] static inline uint32_t readU24uBE(const void* p) noexcept { return readU24u(p); } // Support - Memory Read Access - 32 Bits // ====================================== template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t readU32x(const void* p) noexcept { - typedef typename Internal::AliasedUInt::T U32AlignedToN; + using U32AlignedToN = typename Internal::AliasedUInt::T; uint32_t x = static_cast(p)[0]; return BO == ByteOrder::kNative ? x : byteswap32(x); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t readU32u(const void* p) noexcept { return readU32x(p); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t readU32uLE(const void* p) noexcept { return readU32x(p); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t readU32uBE(const void* p) noexcept { return readU32x(p); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t readU32a(const void* p) noexcept { return readU32x(p); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t readU32aLE(const void* p) noexcept { return readU32x(p); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t readU32aBE(const void* p) noexcept { return readU32x(p); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t readI32x(const void* p) noexcept { return int32_t(readU32x(p)); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int32_t readI32u(const void* p) noexcept { return int32_t(readU32x(p)); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int32_t readI32uLE(const void* p) noexcept { return int32_t(readU32x(p)); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int32_t readI32uBE(const void* p) noexcept { return int32_t(readU32x(p)); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int32_t readI32a(const void* p) noexcept { return int32_t(readU32x(p)); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int32_t readI32aLE(const void* p) noexcept { return int32_t(readU32x(p)); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int32_t readI32aBE(const void* p) noexcept { return int32_t(readU32x(p)); } // Support - Memory Read Access - 64 Bits // ====================================== template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint64_t readU64x(const void* p) noexcept { - typedef typename Internal::AliasedUInt::T U64AlignedToN; + using U64AlignedToN = typename Internal::AliasedUInt::T; uint64_t x = static_cast(p)[0]; return BO == ByteOrder::kNative ? x : byteswap64(x); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint64_t readU64u(const void* p) noexcept { return readU64x(p); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint64_t readU64uLE(const void* p) noexcept { return readU64x(p); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint64_t readU64uBE(const void* p) noexcept { return readU64x(p); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint64_t readU64a(const void* p) noexcept { return readU64x(p); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint64_t readU64aLE(const void* p) noexcept { return readU64x(p); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint64_t readU64aBE(const void* p) noexcept { return readU64x(p); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int64_t readI64x(const void* p) noexcept { return int64_t(readU64x(p)); } template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int64_t readI64u(const void* p) noexcept { return int64_t(readU64x(p)); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int64_t readI64uLE(const void* p) noexcept { return int64_t(readU64x(p)); } + template +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int64_t readI64uBE(const void* p) noexcept { return int64_t(readU64x(p)); } +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int64_t readI64a(const void* p) noexcept { return int64_t(readU64x(p)); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int64_t readI64aLE(const void* p) noexcept { return int64_t(readU64x(p)); } + +[[nodiscard]] static ASMJIT_INLINE_NODEBUG int64_t readI64aBE(const void* p) noexcept { return int64_t(readU64x(p)); } // Support - Memory Write Access - 8 Bits @@ -1074,7 +1265,7 @@ static ASMJIT_INLINE_NODEBUG void writeI8(void* p, int8_t x) noexcept { static_c template static ASMJIT_INLINE_NODEBUG void writeU16x(void* p, uint16_t x) noexcept { - typedef typename Internal::AliasedUInt::T U16AlignedToN; + using U16AlignedToN = typename Internal::AliasedUInt::T; static_cast(p)[0] = BO == ByteOrder::kNative ? x : byteswap16(x); } @@ -1118,7 +1309,7 @@ static inline void writeU24uBE(void* p, uint32_t v) noexcept { writeU24u static ASMJIT_INLINE_NODEBUG void writeU32x(void* p, uint32_t x) noexcept { - typedef typename Internal::AliasedUInt::T U32AlignedToN; + using U32AlignedToN = typename Internal::AliasedUInt::T; static_cast(p)[0] = (BO == ByteOrder::kNative) ? x : Support::byteswap32(x); } @@ -1152,7 +1343,7 @@ static ASMJIT_INLINE_NODEBUG void writeI32aBE(void* p, int32_t x) noexcept { wri template static ASMJIT_INLINE_NODEBUG void writeU64x(void* p, uint64_t x) noexcept { - typedef typename Internal::AliasedUInt::T U64AlignedToN; + using U64AlignedToN = typename Internal::AliasedUInt::T; static_cast(p)[0] = BO == ByteOrder::kNative ? x : byteswap64(x); } @@ -1221,9 +1412,12 @@ public: : _bitWord(bitWord) {} ASMJIT_INLINE_NODEBUG void init(T bitWord) noexcept { _bitWord = bitWord; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasNext() const noexcept { return _bitWord != 0; } - ASMJIT_FORCE_INLINE uint32_t next() noexcept { + [[nodiscard]] + ASMJIT_INLINE uint32_t next() noexcept { ASMJIT_ASSERT(_bitWord != 0); uint32_t index = ctz(_bitWord); _bitWord &= T(_bitWord - 1); @@ -1239,9 +1433,10 @@ public: //! \cond namespace Internal { template - static ASMJIT_FORCE_INLINE void bitVectorOp(T* buf, size_t index, size_t count) noexcept { - if (count == 0) + static ASMJIT_INLINE void bitVectorOp(T* buf, size_t index, size_t count) noexcept { + if (count == 0) { return; + } const size_t kTSizeInBits = bitSizeOf(); size_t vecIndex = index / kTSizeInBits; // T[] @@ -1265,8 +1460,9 @@ namespace Internal { } // The last BitWord requires special handling as well - if (count) + if (count) { buf[0] = OperatorT::op(buf[0], kFillMask >> (kTSizeInBits - count)); + } } } //! \endcond @@ -1291,10 +1487,12 @@ static ASMJIT_INLINE_NODEBUG void bitVectorSetBit(T* buf, size_t index, bool val size_t bitIndex = index % kTSizeInBits; T bitMask = T(1u) << bitIndex; - if (value) + if (value) { buf[vecIndex] |= bitMask; - else + } + else { buf[vecIndex] &= ~bitMask; + } } //! Sets bit in a bit-vector `buf` at `index` to `value`. @@ -1318,7 +1516,7 @@ template static ASMJIT_INLINE_NODEBUG void bitVectorClear(T* buf, size_t index, size_t count) noexcept { Internal::bitVectorOp(buf, index, count); } template -static ASMJIT_FORCE_INLINE size_t bitVectorIndexOf(T* buf, size_t start, bool value) noexcept { +static ASMJIT_INLINE size_t bitVectorIndexOf(T* buf, size_t start, bool value) noexcept { const size_t kTSizeInBits = bitSizeOf(); size_t vecIndex = start / kTSizeInBits; // T[] size_t bitIndex = start % kTSizeInBits; // T[][] @@ -1332,8 +1530,9 @@ static ASMJIT_FORCE_INLINE size_t bitVectorIndexOf(T* buf, size_t start, bool va // The first BitWord requires special handling as there are some bits we want to ignore. T bits = (*p ^ kFlipMask) & (kFillMask << bitIndex); for (;;) { - if (bits) + if (bits) { return (size_t)(p - buf) * kTSizeInBits + ctz(bits); + } bits = *++p ^ kFlipMask; } } @@ -1355,7 +1554,7 @@ public: init(data, numBitWords, start); } - ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords, size_t start = 0) noexcept { + ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start = 0) noexcept { const T* ptr = data + (start / bitSizeOf()); size_t idx = alignDown(start, bitSizeOf()); size_t end = numBitWords * bitSizeOf(); @@ -1363,8 +1562,9 @@ public: T bitWord = T(0); if (idx < end) { bitWord = *ptr++ & (allOnes() << (start % bitSizeOf())); - while (!bitWord && (idx += bitSizeOf()) < end) + while (!bitWord && (idx += bitSizeOf()) < end) { bitWord = *ptr++; + } } _ptr = ptr; @@ -1373,11 +1573,13 @@ public: _current = bitWord; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasNext() const noexcept { return _current != T(0); } - ASMJIT_FORCE_INLINE size_t next() noexcept { + [[nodiscard]] + ASMJIT_INLINE size_t next() noexcept { T bitWord = _current; ASMJIT_ASSERT(bitWord != T(0)); @@ -1385,14 +1587,16 @@ public: bitWord &= T(bitWord - 1u); size_t n = _idx + bit; - while (!bitWord && (_idx += bitSizeOf()) < _end) + while (!bitWord && (_idx += bitSizeOf()) < _end) { bitWord = *_ptr++; + } _current = bitWord; return n; } - ASMJIT_FORCE_INLINE size_t peekNext() const noexcept { + [[nodiscard]] + ASMJIT_INLINE size_t peekNext() const noexcept { ASMJIT_ASSERT(_current != T(0)); return _idx + ctz(_current); } @@ -1404,9 +1608,7 @@ public: template class BitVectorOpIterator { public: - enum : uint32_t { - kTSizeInBits = bitSizeOf() - }; + static inline constexpr uint32_t kTSizeInBits = bitSizeOf(); const T* _aPtr; const T* _bPtr; @@ -1418,7 +1620,7 @@ public: init(aData, bData, numBitWords, start); } - ASMJIT_FORCE_INLINE void init(const T* aData, const T* bData, size_t numBitWords, size_t start = 0) noexcept { + ASMJIT_INLINE void init(const T* aData, const T* bData, size_t numBitWords, size_t start = 0) noexcept { const T* aPtr = aData + (start / bitSizeOf()); const T* bPtr = bData + (start / bitSizeOf()); size_t idx = alignDown(start, bitSizeOf()); @@ -1427,8 +1629,9 @@ public: T bitWord = T(0); if (idx < end) { bitWord = OperatorT::op(*aPtr++, *bPtr++) & (allOnes() << (start % bitSizeOf())); - while (!bitWord && (idx += kTSizeInBits) < end) + while (!bitWord && (idx += kTSizeInBits) < end) { bitWord = OperatorT::op(*aPtr++, *bPtr++); + } } _aPtr = aPtr; @@ -1438,11 +1641,13 @@ public: _current = bitWord; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasNext() noexcept { return _current != T(0); } - ASMJIT_FORCE_INLINE size_t next() noexcept { + [[nodiscard]] + ASMJIT_INLINE size_t next() noexcept { T bitWord = _current; ASMJIT_ASSERT(bitWord != T(0)); @@ -1450,8 +1655,9 @@ public: bitWord &= T(bitWord - 1u); size_t n = _idx + bit; - while (!bitWord && (_idx += kTSizeInBits) < _end) + while (!bitWord && (_idx += kTSizeInBits) < _end) { bitWord = OperatorT::op(*_aPtr++, *_bPtr++); + } _current = bitWord; return n; @@ -1482,9 +1688,11 @@ struct Compare { //! Insertion sort. template> static inline void iSort(T* base, size_t size, const CompareT& cmp = CompareT()) noexcept { - for (T* pm = base + 1; pm < base + size; pm++) - for (T* pl = pm; pl > base && cmp(pl[-1], pl[0]) > 0; pl--) + for (T* pm = base + 1; pm < base + size; pm++) { + for (T* pl = pm; pl > base && cmp(pl[-1], pl[0]) > 0; pl--) { std::swap(pl[-1], pl[0]); + } + } } //! \cond @@ -1492,10 +1700,8 @@ namespace Internal { //! Quick-sort implementation. template struct QSortImpl { - enum : size_t { - kStackSize = 64 * 2, - kISortThreshold = 7 - }; + static inline constexpr size_t kStackSize = 64u * 2u; + static inline constexpr size_t kISortThreshold = 7u; // Based on "PDCLib - Public Domain C Library" and rewritten to C++. static void sort(T* base, size_t size, const CompareT& cmp) noexcept { @@ -1510,16 +1716,18 @@ namespace Internal { T* pj = end - 1; std::swap(base[(size_t)(end - base) / 2], base[0]); - if (cmp(*pi , *pj ) > 0) std::swap(*pi , *pj ); - if (cmp(*base, *pj ) > 0) std::swap(*base, *pj ); - if (cmp(*pi , *base) > 0) std::swap(*pi , *base); + if (cmp(*pi , *pj ) > 0) { std::swap(*pi , *pj ); } + if (cmp(*base, *pj ) > 0) { std::swap(*base, *pj ); } + if (cmp(*pi , *base) > 0) { std::swap(*pi , *base); } // Now we have the median for pivot element, entering main loop. for (;;) { while (pi < pj && cmp(*++pi, *base) < 0) continue; // Move `i` right until `*i >= pivot`. while (pj > base && cmp(*--pj, *base) > 0) continue; // Move `j` left until `*j <= pivot`. - if (pi > pj) break; + if (pi > pj) { + break; + } std::swap(*pi, *pj); } @@ -1543,11 +1751,13 @@ namespace Internal { } else { // UB sanitizer doesn't like applying offset to a nullptr base. - if (base != end) + if (base != end) { iSort(base, (size_t)(end - base), cmp); + } - if (stackptr == stack) + if (stackptr == stack) { break; + } end = *--stackptr; base = *--stackptr; @@ -1585,9 +1795,9 @@ public: //! \name Construction & Destruction //! \{ - ASMJIT_INLINE_NODEBUG constexpr ArrayReverseIterator() noexcept = default; - ASMJIT_INLINE_NODEBUG constexpr ArrayReverseIterator(const ArrayReverseIterator& other) noexcept = default; - ASMJIT_INLINE_NODEBUG constexpr ArrayReverseIterator(T* ptr) noexcept : _ptr(ptr) {} + ASMJIT_INLINE_CONSTEXPR ArrayReverseIterator() noexcept = default; + ASMJIT_INLINE_CONSTEXPR ArrayReverseIterator(const ArrayReverseIterator& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR ArrayReverseIterator(T* ptr) noexcept : _ptr(ptr) {} //! \} @@ -1626,8 +1836,8 @@ public: template ASMJIT_INLINE_NODEBUG ArrayReverseIterator& operator+=(const Diff& n) noexcept { _ptr -= n; return *this; } template ASMJIT_INLINE_NODEBUG ArrayReverseIterator& operator-=(const Diff& n) noexcept { _ptr += n; return *this; } - ASMJIT_INLINE_NODEBUG constexpr T& operator*() const noexcept { return _ptr[-1]; } - ASMJIT_INLINE_NODEBUG constexpr T* operator->() const noexcept { return &_ptr[-1]; } + ASMJIT_INLINE_CONSTEXPR T& operator*() const noexcept { return _ptr[-1]; } + ASMJIT_INLINE_CONSTEXPR T* operator->() const noexcept { return &_ptr[-1]; } template ASMJIT_INLINE_NODEBUG T& operator[](const Diff& n) noexcept { return *(_ptr - n - 1); } @@ -1654,18 +1864,18 @@ struct Array { //! \cond // std compatibility. - typedef T value_type; - typedef size_t size_type; - typedef ptrdiff_t difference_type; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; - typedef value_type& reference; - typedef const value_type& const_reference; + using reference = value_type&; + using const_reference = const value_type&; - typedef value_type* pointer; - typedef const value_type* const_pointer; + using pointer = value_type*; + using const_pointer = const value_type*; - typedef pointer iterator; - typedef const_pointer const_iterator; + using iterator = pointer; + using const_iterator = const_pointer; //! \endcond //! \name Overloaded Operators @@ -1673,26 +1883,28 @@ struct Array { template inline T& operator[](const Index& index) noexcept { - typedef typename Internal::StdInt::Type U; + using U = typename Internal::StdInt::Type; ASMJIT_ASSERT(U(index) < N); return _data[U(index)]; } template inline const T& operator[](const Index& index) const noexcept { - typedef typename Internal::StdInt::Type U; + using U = typename Internal::StdInt::Type; ASMJIT_ASSERT(U(index) < N); return _data[U(index)]; } - inline bool operator==(const Array& other) const noexcept { - for (size_t i = 0; i < N; i++) - if (_data[i] != other._data[i]) + constexpr inline bool operator==(const Array& other) const noexcept { + for (size_t i = 0; i < N; i++) { + if (_data[i] != other._data[i]) { return false; + } + } return true; } - inline bool operator!=(const Array& other) const noexcept { + ASMJIT_INLINE_CONSTEXPR bool operator!=(const Array& other) const noexcept { return !operator==(other); } @@ -1701,26 +1913,47 @@ struct Array { //! \name Accessors //! \{ - ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return false; } - ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return N; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool empty() const noexcept { return false; } - ASMJIT_INLINE_NODEBUG T* data() noexcept { return _data; } - ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return _data; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR size_t size() const noexcept { return N; } - ASMJIT_INLINE_NODEBUG T& front() noexcept { return _data[0]; } - ASMJIT_INLINE_NODEBUG const T& front() const noexcept { return _data[0]; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR T* data() noexcept { return _data; } - ASMJIT_INLINE_NODEBUG T& back() noexcept { return _data[N - 1]; } - ASMJIT_INLINE_NODEBUG const T& back() const noexcept { return _data[N - 1]; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const T* data() const noexcept { return _data; } - ASMJIT_INLINE_NODEBUG T* begin() noexcept { return _data; } - ASMJIT_INLINE_NODEBUG T* end() noexcept { return _data + N; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR T& front() noexcept { return _data[0]; } - ASMJIT_INLINE_NODEBUG const T* begin() const noexcept { return _data; } - ASMJIT_INLINE_NODEBUG const T* end() const noexcept { return _data + N; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const T& front() const noexcept { return _data[0]; } - ASMJIT_INLINE_NODEBUG const T* cbegin() const noexcept { return _data; } - ASMJIT_INLINE_NODEBUG const T* cend() const noexcept { return _data + N; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR T& back() noexcept { return _data[N - 1]; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const T& back() const noexcept { return _data[N - 1]; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR T* begin() noexcept { return _data; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR T* end() noexcept { return _data + N; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const T* begin() const noexcept { return _data; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const T* end() const noexcept { return _data + N; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const T* cbegin() const noexcept { return _data; } + + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR const T* cend() const noexcept { return _data + N; } //! \} @@ -1728,38 +1961,44 @@ struct Array { //! \{ inline void swap(Array& other) noexcept { - for (size_t i = 0; i < N; i++) + for (size_t i = 0; i < N; i++) { std::swap(_data[i], other._data[i]); + } } inline void fill(const T& value) noexcept { - for (size_t i = 0; i < N; i++) + for (size_t i = 0; i < N; i++) { _data[i] = value; + } } inline void copyFrom(const Array& other) noexcept { - for (size_t i = 0; i < N; i++) + for (size_t i = 0; i < N; i++) { _data[i] = other._data[i]; + } } template inline void combine(const Array& other) noexcept { - for (size_t i = 0; i < N; i++) + for (size_t i = 0; i < N; i++) { _data[i] = Operator::op(_data[i], other._data[i]); + } } template inline T aggregate(T initialValue = T()) const noexcept { T value = initialValue; - for (size_t i = 0; i < N; i++) + for (size_t i = 0; i < N; i++) { value = Operator::op(value, _data[i]); + } return value; } template inline void forEach(Fn&& fn) noexcept { - for (size_t i = 0; i < N; i++) + for (size_t i = 0; i < N; i++) { fn(_data[i]); + } } //! \} }; @@ -1783,8 +2022,8 @@ struct Temporary { //! \name Construction & Destruction //! \{ - ASMJIT_INLINE_NODEBUG constexpr Temporary(const Temporary& other) noexcept = default; - ASMJIT_INLINE_NODEBUG constexpr Temporary(void* data, size_t size) noexcept + ASMJIT_INLINE_CONSTEXPR Temporary(const Temporary& other) noexcept = default; + ASMJIT_INLINE_CONSTEXPR Temporary(void* data, size_t size) noexcept : _data(data), _size(size) {} @@ -1802,9 +2041,12 @@ struct Temporary { //! Returns the data storage. template - ASMJIT_INLINE_NODEBUG constexpr T* data() const noexcept { return static_cast(_data); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR T* data() const noexcept { return static_cast(_data); } + //! Returns the data storage size in bytes. - ASMJIT_INLINE_NODEBUG constexpr size_t size() const noexcept { return _size; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR size_t size() const noexcept { return _size; } //! \} }; diff --git a/src/asmjit/core/target.h b/src/asmjit/core/target.h index 4365be1..85df976 100644 --- a/src/asmjit/core/target.h +++ b/src/asmjit/core/target.h @@ -40,12 +40,18 @@ public: //! \{ //! Returns target's environment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; } + //! Returns the target architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _environment.arch(); } + //! Returns the target sub-architecture. + [[nodiscard]] ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _environment.subArch(); } + [[nodiscard]] //! Returns target CPU features. ASMJIT_INLINE_NODEBUG const CpuFeatures& cpuFeatures() const noexcept { return _cpuFeatures; } diff --git a/src/asmjit/core/type.cpp b/src/asmjit/core/type.cpp index 536fb88..837cecf 100644 --- a/src/asmjit/core/type.cpp +++ b/src/asmjit/core/type.cpp @@ -13,50 +13,46 @@ namespace TypeUtils { template struct ScalarOfTypeId { - enum : uint32_t { - kTypeId = uint32_t( - isScalar(TypeId(Index)) ? TypeId(Index) : - isMask8 (TypeId(Index)) ? TypeId::kUInt8 : - isMask16(TypeId(Index)) ? TypeId::kUInt16 : - isMask32(TypeId(Index)) ? TypeId::kUInt32 : - isMask64(TypeId(Index)) ? TypeId::kUInt64 : - isMmx32 (TypeId(Index)) ? TypeId::kUInt32 : - isMmx64 (TypeId(Index)) ? TypeId::kUInt64 : - isVec32 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec32Start ) + uint32_t(TypeId::kInt8)) & 0xFF) : - isVec64 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec64Start ) + uint32_t(TypeId::kInt8)) & 0xFF) : - isVec128(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec128Start) + uint32_t(TypeId::kInt8)) & 0xFF) : - isVec256(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec256Start) + uint32_t(TypeId::kInt8)) & 0xFF) : - isVec512(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec512Start) + uint32_t(TypeId::kInt8)) & 0xFF) : TypeId::kVoid) - }; + static inline constexpr uint32_t kTypeId = uint32_t( + isScalar(TypeId(Index)) ? TypeId(Index) : + isMask8 (TypeId(Index)) ? TypeId::kUInt8 : + isMask16(TypeId(Index)) ? TypeId::kUInt16 : + isMask32(TypeId(Index)) ? TypeId::kUInt32 : + isMask64(TypeId(Index)) ? TypeId::kUInt64 : + isMmx32 (TypeId(Index)) ? TypeId::kUInt32 : + isMmx64 (TypeId(Index)) ? TypeId::kUInt64 : + isVec32 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec32Start ) + uint32_t(TypeId::kInt8)) & 0xFF) : + isVec64 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec64Start ) + uint32_t(TypeId::kInt8)) & 0xFF) : + isVec128(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec128Start) + uint32_t(TypeId::kInt8)) & 0xFF) : + isVec256(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec256Start) + uint32_t(TypeId::kInt8)) & 0xFF) : + isVec512(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec512Start) + uint32_t(TypeId::kInt8)) & 0xFF) : TypeId::kVoid); }; template struct SizeOfTypeId { - enum : uint32_t { - kTypeSize = - isInt8 (TypeId(Index)) ? 1 : - isUInt8 (TypeId(Index)) ? 1 : - isInt16 (TypeId(Index)) ? 2 : - isUInt16 (TypeId(Index)) ? 2 : - isInt32 (TypeId(Index)) ? 4 : - isUInt32 (TypeId(Index)) ? 4 : - isInt64 (TypeId(Index)) ? 8 : - isUInt64 (TypeId(Index)) ? 8 : - isFloat32(TypeId(Index)) ? 4 : - isFloat64(TypeId(Index)) ? 8 : - isFloat80(TypeId(Index)) ? 10 : - isMask8 (TypeId(Index)) ? 1 : - isMask16 (TypeId(Index)) ? 2 : - isMask32 (TypeId(Index)) ? 4 : - isMask64 (TypeId(Index)) ? 8 : - isMmx32 (TypeId(Index)) ? 4 : - isMmx64 (TypeId(Index)) ? 8 : - isVec32 (TypeId(Index)) ? 4 : - isVec64 (TypeId(Index)) ? 8 : - isVec128 (TypeId(Index)) ? 16 : - isVec256 (TypeId(Index)) ? 32 : - isVec512 (TypeId(Index)) ? 64 : 0 - }; + static inline constexpr uint32_t kTypeSize = + isInt8 (TypeId(Index)) ? 1 : + isUInt8 (TypeId(Index)) ? 1 : + isInt16 (TypeId(Index)) ? 2 : + isUInt16 (TypeId(Index)) ? 2 : + isInt32 (TypeId(Index)) ? 4 : + isUInt32 (TypeId(Index)) ? 4 : + isInt64 (TypeId(Index)) ? 8 : + isUInt64 (TypeId(Index)) ? 8 : + isFloat32(TypeId(Index)) ? 4 : + isFloat64(TypeId(Index)) ? 8 : + isFloat80(TypeId(Index)) ? 10 : + isMask8 (TypeId(Index)) ? 1 : + isMask16 (TypeId(Index)) ? 2 : + isMask32 (TypeId(Index)) ? 4 : + isMask64 (TypeId(Index)) ? 8 : + isMmx32 (TypeId(Index)) ? 4 : + isMmx64 (TypeId(Index)) ? 8 : + isVec32 (TypeId(Index)) ? 4 : + isVec64 (TypeId(Index)) ? 8 : + isVec128 (TypeId(Index)) ? 16 : + isVec256 (TypeId(Index)) ? 32 : + isVec512 (TypeId(Index)) ? 64 : 0; }; const TypeData _typeData = { diff --git a/src/asmjit/core/type.h b/src/asmjit/core/type.h index 415fe0a..7b84d48 100644 --- a/src/asmjit/core/type.h +++ b/src/asmjit/core/type.h @@ -164,98 +164,164 @@ struct TypeData { ASMJIT_VARAPI const TypeData _typeData; //! Returns the scalar type of `typeId`. +[[nodiscard]] static ASMJIT_INLINE_NODEBUG TypeId scalarOf(TypeId typeId) noexcept { return _typeData.scalarOf[uint32_t(typeId)]; } //! Returns the size [in bytes] of `typeId`. +[[nodiscard]] static ASMJIT_INLINE_NODEBUG uint32_t sizeOf(TypeId typeId) noexcept { return _typeData.sizeOf[uint32_t(typeId)]; } //! Tests whether a given type `typeId` is between `a` and `b`. -static ASMJIT_INLINE_NODEBUG constexpr bool isBetween(TypeId typeId, TypeId a, TypeId b) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isBetween(TypeId typeId, TypeId a, TypeId b) noexcept { return Support::isBetween(uint32_t(typeId), uint32_t(a), uint32_t(b)); } //! Tests whether a given type `typeId` is \ref TypeId::kVoid. -static ASMJIT_INLINE_NODEBUG constexpr bool isVoid(TypeId typeId) noexcept { return typeId == TypeId::kVoid; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isVoid(TypeId typeId) noexcept { return typeId == TypeId::kVoid; } + //! Tests whether a given type `typeId` is a valid non-void type. -static ASMJIT_INLINE_NODEBUG constexpr bool isValid(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kVec512End); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isValid(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kVec512End); } + //! Tests whether a given type `typeId` is scalar (has no vector part). -static ASMJIT_INLINE_NODEBUG constexpr bool isScalar(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kBaseStart, TypeId::_kBaseEnd); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isScalar(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kBaseStart, TypeId::_kBaseEnd); } + //! Tests whether a given type `typeId` is abstract, which means that its size depends on register size. -static ASMJIT_INLINE_NODEBUG constexpr bool isAbstract(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kIntPtr, TypeId::kUIntPtr); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isAbstract(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kIntPtr, TypeId::kUIntPtr); } //! Tests whether a given type is a scalar integer (signed or unsigned) of any size. -static ASMJIT_INLINE_NODEBUG constexpr bool isInt(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kIntEnd); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kIntEnd); } + //! Tests whether a given type is a scalar 8-bit integer (signed). -static ASMJIT_INLINE_NODEBUG constexpr bool isInt8(TypeId typeId) noexcept { return typeId == TypeId::kInt8; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt8(TypeId typeId) noexcept { return typeId == TypeId::kInt8; } + //! Tests whether a given type is a scalar 8-bit integer (unsigned). -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt8(TypeId typeId) noexcept { return typeId == TypeId::kUInt8; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt8(TypeId typeId) noexcept { return typeId == TypeId::kUInt8; } + //! Tests whether a given type is a scalar 16-bit integer (signed). -static ASMJIT_INLINE_NODEBUG constexpr bool isInt16(TypeId typeId) noexcept { return typeId == TypeId::kInt16; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt16(TypeId typeId) noexcept { return typeId == TypeId::kInt16; } + //! Tests whether a given type is a scalar 16-bit integer (unsigned). -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt16(TypeId typeId) noexcept { return typeId == TypeId::kUInt16; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt16(TypeId typeId) noexcept { return typeId == TypeId::kUInt16; } + //! Tests whether a given type is a scalar 32-bit integer (signed). -static ASMJIT_INLINE_NODEBUG constexpr bool isInt32(TypeId typeId) noexcept { return typeId == TypeId::kInt32; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt32(TypeId typeId) noexcept { return typeId == TypeId::kInt32; } + //! Tests whether a given type is a scalar 32-bit integer (unsigned). -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt32(TypeId typeId) noexcept { return typeId == TypeId::kUInt32; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt32(TypeId typeId) noexcept { return typeId == TypeId::kUInt32; } + //! Tests whether a given type is a scalar 64-bit integer (signed). -static ASMJIT_INLINE_NODEBUG constexpr bool isInt64(TypeId typeId) noexcept { return typeId == TypeId::kInt64; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isInt64(TypeId typeId) noexcept { return typeId == TypeId::kInt64; } + //! Tests whether a given type is a scalar 64-bit integer (unsigned). -static ASMJIT_INLINE_NODEBUG constexpr bool isUInt64(TypeId typeId) noexcept { return typeId == TypeId::kUInt64; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isUInt64(TypeId typeId) noexcept { return typeId == TypeId::kUInt64; } //! Tests whether a given type is an 8-bit general purpose register representing either signed or unsigned 8-bit integer. -static ASMJIT_INLINE_NODEBUG constexpr bool isGp8(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt8, TypeId::kUInt8); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isGp8(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt8, TypeId::kUInt8); } + //! Tests whether a given type is a 16-bit general purpose register representing either signed or unsigned 16-bit integer -static ASMJIT_INLINE_NODEBUG constexpr bool isGp16(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt16, TypeId::kUInt16); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isGp16(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt16, TypeId::kUInt16); } + //! Tests whether a given type is a 32-bit general purpose register representing either signed or unsigned 32-bit integer -static ASMJIT_INLINE_NODEBUG constexpr bool isGp32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt32, TypeId::kUInt32); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isGp32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt32, TypeId::kUInt32); } + //! Tests whether a given type is a 64-bit general purpose register representing either signed or unsigned 64-bit integer -static ASMJIT_INLINE_NODEBUG constexpr bool isGp64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt64, TypeId::kUInt64); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isGp64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt64, TypeId::kUInt64); } //! Tests whether a given type is a scalar floating point of any size. -static ASMJIT_INLINE_NODEBUG constexpr bool isFloat(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kFloatStart, TypeId::_kFloatEnd); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isFloat(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kFloatStart, TypeId::_kFloatEnd); } + //! Tests whether a given type is a scalar 32-bit float. -static ASMJIT_INLINE_NODEBUG constexpr bool isFloat32(TypeId typeId) noexcept { return typeId == TypeId::kFloat32; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isFloat32(TypeId typeId) noexcept { return typeId == TypeId::kFloat32; } + //! Tests whether a given type is a scalar 64-bit float. -static ASMJIT_INLINE_NODEBUG constexpr bool isFloat64(TypeId typeId) noexcept { return typeId == TypeId::kFloat64; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isFloat64(TypeId typeId) noexcept { return typeId == TypeId::kFloat64; } + //! Tests whether a given type is a scalar 80-bit float. -static ASMJIT_INLINE_NODEBUG constexpr bool isFloat80(TypeId typeId) noexcept { return typeId == TypeId::kFloat80; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isFloat80(TypeId typeId) noexcept { return typeId == TypeId::kFloat80; } //! Tests whether a given type is a mask register of any size. -static ASMJIT_INLINE_NODEBUG constexpr bool isMask(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMaskStart, TypeId::_kMaskEnd); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isMask(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMaskStart, TypeId::_kMaskEnd); } + //! Tests whether a given type is an 8-bit mask register. -static ASMJIT_INLINE_NODEBUG constexpr bool isMask8(TypeId typeId) noexcept { return typeId == TypeId::kMask8; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isMask8(TypeId typeId) noexcept { return typeId == TypeId::kMask8; } + //! Tests whether a given type is an 16-bit mask register. -static ASMJIT_INLINE_NODEBUG constexpr bool isMask16(TypeId typeId) noexcept { return typeId == TypeId::kMask16; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isMask16(TypeId typeId) noexcept { return typeId == TypeId::kMask16; } + //! Tests whether a given type is an 32-bit mask register. -static ASMJIT_INLINE_NODEBUG constexpr bool isMask32(TypeId typeId) noexcept { return typeId == TypeId::kMask32; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isMask32(TypeId typeId) noexcept { return typeId == TypeId::kMask32; } + //! Tests whether a given type is an 64-bit mask register. -static ASMJIT_INLINE_NODEBUG constexpr bool isMask64(TypeId typeId) noexcept { return typeId == TypeId::kMask64; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isMask64(TypeId typeId) noexcept { return typeId == TypeId::kMask64; } //! Tests whether a given type is an MMX register. //! //! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness. -static ASMJIT_INLINE_NODEBUG constexpr bool isMmx(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMmxStart, TypeId::_kMmxEnd); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isMmx(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMmxStart, TypeId::_kMmxEnd); } + //! Tests whether a given type is an MMX register, which only uses the low 32 bits of data (only specific cases). //! //! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness. -static ASMJIT_INLINE_NODEBUG constexpr bool isMmx32(TypeId typeId) noexcept { return typeId == TypeId::kMmx32; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isMmx32(TypeId typeId) noexcept { return typeId == TypeId::kMmx32; } + //! Tests whether a given type is an MMX register, which uses 64 bits of data (default). //! //! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness. -static ASMJIT_INLINE_NODEBUG constexpr bool isMmx64(TypeId typeId) noexcept { return typeId == TypeId::kMmx64; } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isMmx64(TypeId typeId) noexcept { return typeId == TypeId::kMmx64; } //! Tests whether a given type is a vector register of any size. -static ASMJIT_INLINE_NODEBUG constexpr bool isVec(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec512End); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isVec(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec512End); } + //! Tests whether a given type is a 32-bit or 32-bit view of a vector register. -static ASMJIT_INLINE_NODEBUG constexpr bool isVec32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec32End); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isVec32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec32End); } + //! Tests whether a given type is a 64-bit or 64-bit view of a vector register. -static ASMJIT_INLINE_NODEBUG constexpr bool isVec64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec64Start, TypeId::_kVec64End); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isVec64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec64Start, TypeId::_kVec64End); } + //! Tests whether a given type is a 128-bit or 128-bit view of a vector register. -static ASMJIT_INLINE_NODEBUG constexpr bool isVec128(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec128Start, TypeId::_kVec128End); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isVec128(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec128Start, TypeId::_kVec128End); } + //! Tests whether a given type is a 256-bit or 256-bit view of a vector register. -static ASMJIT_INLINE_NODEBUG constexpr bool isVec256(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec256Start, TypeId::_kVec256End); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isVec256(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec256Start, TypeId::_kVec256End); } + //! Tests whether a given type is a 512-bit or 512-bit view of a vector register. -static ASMJIT_INLINE_NODEBUG constexpr bool isVec512(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec512Start, TypeId::_kVec512End); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR bool isVec512(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec512Start, TypeId::_kVec512End); } //! \cond enum TypeCategory : uint32_t { @@ -271,38 +337,32 @@ struct TypeIdOfT_ByCategory {}; // Fails if not specialized. template struct TypeIdOfT_ByCategory { - enum : uint32_t { - kTypeId = uint32_t( - (sizeof(T) == 1 && std::is_signed::value) ? TypeId::kInt8 : - (sizeof(T) == 1 && !std::is_signed::value) ? TypeId::kUInt8 : - (sizeof(T) == 2 && std::is_signed::value) ? TypeId::kInt16 : - (sizeof(T) == 2 && !std::is_signed::value) ? TypeId::kUInt16 : - (sizeof(T) == 4 && std::is_signed::value) ? TypeId::kInt32 : - (sizeof(T) == 4 && !std::is_signed::value) ? TypeId::kUInt32 : - (sizeof(T) == 8 && std::is_signed::value) ? TypeId::kInt64 : - (sizeof(T) == 8 && !std::is_signed::value) ? TypeId::kUInt64 : TypeId::kVoid) - }; + static inline constexpr uint32_t kTypeId = uint32_t( + (sizeof(T) == 1 && std::is_signed_v) ? TypeId::kInt8 : + (sizeof(T) == 1 && !std::is_signed_v) ? TypeId::kUInt8 : + (sizeof(T) == 2 && std::is_signed_v) ? TypeId::kInt16 : + (sizeof(T) == 2 && !std::is_signed_v) ? TypeId::kUInt16 : + (sizeof(T) == 4 && std::is_signed_v) ? TypeId::kInt32 : + (sizeof(T) == 4 && !std::is_signed_v) ? TypeId::kUInt32 : + (sizeof(T) == 8 && std::is_signed_v) ? TypeId::kInt64 : + (sizeof(T) == 8 && !std::is_signed_v) ? TypeId::kUInt64 : TypeId::kVoid); }; template struct TypeIdOfT_ByCategory { - enum : uint32_t { - kTypeId = uint32_t( - (sizeof(T) == 4 ) ? TypeId::kFloat32 : - (sizeof(T) == 8 ) ? TypeId::kFloat64 : - (sizeof(T) >= 10) ? TypeId::kFloat80 : TypeId::kVoid) - }; + static inline constexpr uint32_t kTypeId = uint32_t( + (sizeof(T) == 4 ) ? TypeId::kFloat32 : + (sizeof(T) == 8 ) ? TypeId::kFloat64 : + (sizeof(T) >= 10) ? TypeId::kFloat80 : TypeId::kVoid); }; template struct TypeIdOfT_ByCategory - : public TypeIdOfT_ByCategory::type, kTypeCategoryIntegral> {}; + : public TypeIdOfT_ByCategory, kTypeCategoryIntegral> {}; template struct TypeIdOfT_ByCategory { - enum : uint32_t { - kTypeId = uint32_t(TypeId::kUIntPtr) - }; + static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr); }; //! \endcond @@ -311,37 +371,33 @@ struct TypeIdOfT_ByCategory { template struct TypeIdOfT { //! TypeId of C++ type `T`. - static constexpr TypeId kTypeId = _TypeIdDeducedAtCompileTime_; + static inline constexpr TypeId kTypeId = _TypeIdDeducedAtCompileTime_; }; #else template struct TypeIdOfT : public TypeIdOfT_ByCategory::value ? kTypeCategoryEnum : - std::is_integral::value ? kTypeCategoryIntegral : - std::is_floating_point::value ? kTypeCategoryFloatingPoint : - std::is_function::value ? kTypeCategoryFunction : kTypeCategoryUnknown> {}; + std::is_enum_v ? kTypeCategoryEnum : + std::is_integral_v ? kTypeCategoryIntegral : + std::is_floating_point_v ? kTypeCategoryFloatingPoint : + std::is_function_v ? kTypeCategoryFunction : kTypeCategoryUnknown> {}; #endif //! \cond template struct TypeIdOfT { - enum : uint32_t { - kTypeId = uint32_t(TypeId::kUIntPtr) - }; + static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr); }; template struct TypeIdOfT { - enum : uint32_t { - kTypeId = uint32_t(TypeId::kUIntPtr) - }; + static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr); }; //! \endcond //! Returns a corresponding \ref TypeId of `T` type. template -static ASMJIT_INLINE_NODEBUG constexpr TypeId typeIdOfT() noexcept { return TypeId(TypeIdOfT::kTypeId); } +static ASMJIT_INLINE_CONSTEXPR TypeId typeIdOfT() noexcept { return TypeId(TypeIdOfT::kTypeId); } //! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId into a type that matches `registerSize` //! (general-purpose register size). If you find such TypeId it's then only about adding the offset to it. @@ -360,18 +416,21 @@ static ASMJIT_INLINE_NODEBUG constexpr TypeId typeIdOfT() noexcept { return Type //! // The same, but by using TypeUtils::deabstract() function. //! typeId = TypeUtils::deabstract(typeId, deabstractDelta); //! ``` -static ASMJIT_INLINE_NODEBUG constexpr uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept { return registerSize >= 8 ? uint32_t(TypeId::kInt64) - uint32_t(TypeId::kIntPtr) : uint32_t(TypeId::kInt32) - uint32_t(TypeId::kIntPtr); } //! Deabstracts a given `typeId` into a native type by using `deabstractDelta`, which was previously //! calculated by calling \ref deabstractDeltaOfSize() with a target native register size. -static ASMJIT_INLINE_NODEBUG constexpr TypeId deabstract(TypeId typeId, uint32_t deabstractDelta) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR TypeId deabstract(TypeId typeId, uint32_t deabstractDelta) noexcept { return isAbstract(typeId) ? TypeId(uint32_t(typeId) + deabstractDelta) : typeId; } -static ASMJIT_INLINE_NODEBUG constexpr TypeId scalarToVector(TypeId scalarTypeId, TypeId vecStartId) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR TypeId scalarToVector(TypeId scalarTypeId, TypeId vecStartId) noexcept { return TypeId(uint32_t(vecStartId) + uint32_t(scalarTypeId) - uint32_t(TypeId::kInt8)); } @@ -410,14 +469,12 @@ struct Float64 {}; } // {Type} //! \cond -#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \ -namespace TypeUtils { \ - template<> \ - struct TypeIdOfT { \ - enum : uint32_t { \ - kTypeId = uint32_t(TYPE_ID) \ - }; \ - }; \ +#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \ +namespace TypeUtils { \ + template<> \ + struct TypeIdOfT { \ + static inline constexpr uint32_t kTypeId = uint32_t(TYPE_ID); \ + }; \ } ASMJIT_DEFINE_TYPE_ID(void , TypeId::kVoid); diff --git a/src/asmjit/core/virtmem.cpp b/src/asmjit/core/virtmem.cpp index 7438477..c1b9523 100644 --- a/src/asmjit/core/virtmem.cpp +++ b/src/asmjit/core/virtmem.cpp @@ -146,7 +146,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(VirtMem) // Virtual Memory Utilities // ======================== -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static const constexpr MemoryFlags dualMappingFilter[2] = { MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite, MemoryFlags::kAccessExecute | MemoryFlags::kMMapMaxAccessExecute @@ -162,8 +162,9 @@ struct ScopedHandle { : value(nullptr) {} inline ~ScopedHandle() noexcept { - if (value != nullptr) + if (value != nullptr) { ::CloseHandle(value); + } } HANDLE value; @@ -191,12 +192,15 @@ static DWORD protectFlagsFromMemoryFlags(MemoryFlags memoryFlags) noexcept { DWORD protectFlags; // READ|WRITE|EXECUTE. - if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) + if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) { protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ; - else if (Support::test(memoryFlags, MemoryFlags::kAccessRW)) + } + else if (Support::test(memoryFlags, MemoryFlags::kAccessRW)) { protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY; - else + } + else { protectFlags = PAGE_NOACCESS; + } // Any other flags to consider? return protectFlags; @@ -204,24 +208,28 @@ static DWORD protectFlagsFromMemoryFlags(MemoryFlags memoryFlags) noexcept { static DWORD desiredAccessFromMemoryFlags(MemoryFlags memoryFlags) noexcept { DWORD access = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ; - if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) + if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) { access |= FILE_MAP_EXECUTE; + } return access; } static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept { HardenedRuntimeFlags flags = HardenedRuntimeFlags::kNone; - if (hasDualMappingSupport()) + if (hasDualMappingSupport()) { flags |= HardenedRuntimeFlags::kDualMapping; + } return flags; } Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept { *p = nullptr; - if (size == 0) + + if (size == 0) { return DebugUtils::errored(kErrorInvalidArgument); + } DWORD allocationType = MEM_COMMIT | MEM_RESERVE; DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags); @@ -230,18 +238,21 @@ Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept { size_t lpSize = largePageSize(); // Does it make sense to call VirtualAlloc() if we failed to query large page size? - if (lpSize == 0) + if (lpSize == 0) { return DebugUtils::errored(kErrorFeatureNotEnabled); + } - if (!Support::isAligned(size, lpSize)) + if (!Support::isAligned(size, lpSize)) { return DebugUtils::errored(kErrorInvalidArgument); + } allocationType |= MEM_LARGE_PAGES; } void* result = ::VirtualAlloc(nullptr, size, allocationType, protectFlags); - if (!result) + if (!result) { return DebugUtils::errored(kErrorOutOfMemory); + } *p = result; return kErrorOk; @@ -249,10 +260,14 @@ Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept { Error release(void* p, size_t size) noexcept { DebugUtils::unused(size); + // NOTE: If the `dwFreeType` parameter is MEM_RELEASE, `size` parameter must be zero. constexpr DWORD dwFreeType = MEM_RELEASE; - if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, dwFreeType))) + + if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, dwFreeType))) { return DebugUtils::errored(kErrorInvalidArgument); + + } return kErrorOk; } @@ -260,8 +275,9 @@ Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept { DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags); DWORD oldFlags; - if (::VirtualProtect(p, size, protectFlags, &oldFlags)) + if (::VirtualProtect(p, size, protectFlags, &oldFlags)) { return kErrorOk; + } return DebugUtils::errored(kErrorInvalidArgument); } @@ -270,8 +286,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no dm->rx = nullptr; dm->rw = nullptr; - if (size == 0) + if (size == 0) { return DebugUtils::errored(kErrorInvalidArgument); + } ScopedHandle handle; handle.value = ::CreateFileMappingW( @@ -282,8 +299,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no (DWORD)(size & 0xFFFFFFFFu), nullptr); - if (ASMJIT_UNLIKELY(!handle.value)) + if (ASMJIT_UNLIKELY(!handle.value)) { return DebugUtils::errored(kErrorOutOfMemory); + } void* ptr[2]; for (uint32_t i = 0; i < 2; i++) { @@ -292,8 +310,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no ptr[i] = ::MapViewOfFile(handle.value, desiredAccess, 0, 0, size); if (ptr[i] == nullptr) { - if (i == 1u) + if (i == 1u) { ::UnmapViewOfFile(ptr[0]); + } return DebugUtils::errored(kErrorOutOfMemory); } } @@ -307,14 +326,17 @@ Error releaseDualMapping(DualMapping* dm, size_t size) noexcept { DebugUtils::unused(size); bool failed = false; - if (!::UnmapViewOfFile(dm->rx)) + if (!::UnmapViewOfFile(dm->rx)) { failed = true; + } - if (dm->rx != dm->rw && !UnmapViewOfFile(dm->rw)) + if (dm->rx != dm->rw && !UnmapViewOfFile(dm->rw)) { failed = true; + } - if (failed) + if (failed) { return DebugUtils::errored(kErrorInvalidArgument); + } dm->rx = nullptr; dm->rw = nullptr; @@ -342,7 +364,7 @@ struct KernelVersion { inline bool ge(long major, long minor) const noexcept { return ver[0] > major || (ver[0] == major && ver[1] >= minor); } }; -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static KernelVersion getKernelVersion() noexcept { KernelVersion out {}; struct utsname buf {}; @@ -368,7 +390,7 @@ static KernelVersion getKernelVersion() noexcept { #endif // getKernelVersion // Translates libc errors specific to VirtualMemory mapping to `asmjit::Error`. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static Error asmjitErrorFromErrno(int e) noexcept { switch (e) { case EACCES: @@ -391,20 +413,20 @@ static Error asmjitErrorFromErrno(int e) noexcept { } } -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static MemoryFlags maxAccessFlagsToRegularAccessFlags(MemoryFlags memoryFlags) noexcept { static constexpr uint32_t kMaxProtShift = Support::ConstCTZ::value; return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kMMapMaxAccessRWX) >> kMaxProtShift); } -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static MemoryFlags regularAccessFlagsToMaxAccessFlags(MemoryFlags memoryFlags) noexcept { static constexpr uint32_t kMaxProtShift = Support::ConstCTZ::value; return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kAccessRWX) << kMaxProtShift); } // Returns `mmap()` protection flags from \ref MemoryFlags. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { int protection = 0; if (Support::test(memoryFlags, MemoryFlags::kAccessRead)) protection |= PROT_READ; @@ -418,7 +440,7 @@ static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { // Uses: // - `PROT_MPROTECT()` on NetBSD. // - `PROT_MAX()` when available on other BSDs. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { MemoryFlags acc = maxAccessFlagsToRegularAccessFlags(memoryFlags); if (acc != MemoryFlags::kNone) { @@ -450,8 +472,10 @@ static size_t detectLargePageSize() noexcept { return (getpagesizes(pageSize.data(), 2) < 2) ? 0 : uint32_t(pageSize[1]); #elif defined(__linux__) StringTmp<128> storage; - if (OSUtils::readFile("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", storage, 16) != kErrorOk || storage.empty()) + + if (OSUtils::readFile("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", storage, 16) != kErrorOk || storage.empty()) { return 0u; + } // The first value should be the size of the page (hpage_pmd_size). size_t largePageSize = 0; @@ -461,8 +485,9 @@ static size_t detectLargePageSize() noexcept { for (size_t i = 0; i < bufSize; i++) { uint32_t digit = uint32_t(uint8_t(buf[i]) - uint8_t('0')); - if (digit >= 10u) + if (digit >= 10u) { break; + } largePageSize = largePageSize * 10 + digit; } @@ -514,7 +539,7 @@ static uint32_t getMfdExecFlag() noexcept { // It's not fully random, just to avoid collisions when opening TMP or SHM file. -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static uint64_t generateRandomBits(uintptr_t stackPtr, uint32_t attempt) noexcept { static std::atomic internalCounter; @@ -573,14 +598,17 @@ public: if (!memfd_create_not_supported) { _fd = (int)syscall(__NR_memfd_create, "vmem", MFD_CLOEXEC | getMfdExecFlag()); - if (ASMJIT_LIKELY(_fd >= 0)) + if (ASMJIT_LIKELY(_fd >= 0)) { return kErrorOk; + } int e = errno; - if (e == ENOSYS) + if (e == ENOSYS) { memfd_create_not_supported = 1; - else + } + else { return DebugUtils::errored(asmjitErrorFromErrno(e)); + } } #endif // __linux__ && __NR_memfd_create @@ -589,10 +617,12 @@ public: DebugUtils::unused(preferTmpOverDevShm); _fd = ::shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR); - if (ASMJIT_LIKELY(_fd >= 0)) + if (ASMJIT_LIKELY(_fd >= 0)) { return kErrorOk; - else + } + else { return DebugUtils::errored(asmjitErrorFromErrno(errno)); + } #else // POSIX API. We have to generate somehow a unique name, so use `generateRandomBits()` helper. To prevent // having file collisions we use `shm_open()` with flags that require creation of the file so we never open @@ -625,8 +655,9 @@ public: #endif int e = errno; - if (e != EEXIST) + if (e != EEXIST) { return DebugUtils::errored(asmjitErrorFromErrno(e)); + } } return DebugUtils::errored(kErrorFailedToOpenAnonymousMemory); @@ -659,8 +690,9 @@ public: Error allocate(size_t size) noexcept { // TODO: Improve this by using `posix_fallocate()` when available. - if (ASMJIT_FILE64_API(ftruncate)(_fd, off_t(size)) != 0) + if (ASMJIT_FILE64_API(ftruncate)(_fd, off_t(size)) != 0) { return DebugUtils::errored(asmjitErrorFromErrno(errno)); + } return kErrorOk; } @@ -725,11 +757,9 @@ static bool hasHardenedRuntime() noexcept { #else static std::atomic cachedHardenedFlag; - enum HardenedFlag : uint32_t { - kHardenedFlagUnknown = 0, - kHardenedFlagDisabled = 1, - kHardenedFlagEnabled = 2 - }; + constexpr uint32_t kHardenedFlagUnknown = 0; + constexpr uint32_t kHardenedFlagDisabled = 1; + constexpr uint32_t kHardenedFlagEnabled = 2; uint32_t flag = cachedHardenedFlag.load(); if (flag == kHardenedFlagUnknown) { @@ -784,12 +814,14 @@ static inline int mmMapJitFromMemoryFlags(MemoryFlags memoryFlags) noexcept { // // MAP_JIT is not required when dual-mapping memory and is incompatible with MAP_SHARED, so it will not be // added when the latter is enabled. - bool useMapJit = (Support::test(memoryFlags, MemoryFlags::kMMapEnableMapJit) || hasHardenedRuntime()) - && !Support::test(memoryFlags, MemoryFlags::kMapShared); - if (useMapJit) + bool useMapJit = (Support::test(memoryFlags, MemoryFlags::kMMapEnableMapJit) || hasHardenedRuntime()) && + !Support::test(memoryFlags, MemoryFlags::kMapShared); + if (useMapJit) { return hasMapJitSupport() ? int(MAP_JIT) : 0; - else + } + else { return 0; + } #else DebugUtils::unused(memoryFlags); return 0; @@ -807,40 +839,48 @@ static inline bool hasDualMappingSupport() noexcept { static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept { HardenedRuntimeFlags flags = HardenedRuntimeFlags::kNone; - if (hasHardenedRuntime()) + if (hasHardenedRuntime()) { flags |= HardenedRuntimeFlags::kEnabled; + } - if (hasMapJitSupport()) + if (hasMapJitSupport()) { flags |= HardenedRuntimeFlags::kMapJit; + } - if (hasDualMappingSupport()) + if (hasDualMappingSupport()) { flags |= HardenedRuntimeFlags::kDualMapping; + } return flags; } static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd = -1, off_t offset = 0) noexcept { *p = nullptr; - if (size == 0) + + if (size == 0) { return DebugUtils::errored(kErrorInvalidArgument); + } int protection = mmProtFromMemoryFlags(memoryFlags) | mmMaxProtFromMemoryFlags(memoryFlags); int mmFlags = mmMapJitFromMemoryFlags(memoryFlags); mmFlags |= Support::test(memoryFlags, MemoryFlags::kMapShared) ? MAP_SHARED : MAP_PRIVATE; - if (fd == -1) + if (fd == -1) { mmFlags |= MAP_ANONYMOUS; + } bool useLargePages = Support::test(memoryFlags, VirtMem::MemoryFlags::kMMapLargePages); if (useLargePages) { #if defined(__linux__) size_t lpSize = largePageSize(); - if (lpSize == 0) + if (lpSize == 0) { return DebugUtils::errored(kErrorFeatureNotEnabled); + } - if (!Support::isAligned(size, lpSize)) + if (!Support::isAligned(size, lpSize)) { return DebugUtils::errored(kErrorInvalidArgument); + } unsigned lpSizeLog2 = Support::ctz(lpSize); mmFlags |= int(unsigned(MAP_HUGETLB) | (lpSizeLog2 << MAP_HUGE_SHIFT)); @@ -850,8 +890,9 @@ static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd = } void* ptr = mmap(nullptr, size, protection, mmFlags, fd, offset); - if (ptr == MAP_FAILED) + if (ptr == MAP_FAILED) { return DebugUtils::errored(asmjitErrorFromErrno(errno)); + } #if defined(MADV_HUGEPAGE) if (useLargePages) { @@ -864,8 +905,9 @@ static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd = } static Error unmapMemory(void* p, size_t size) noexcept { - if (ASMJIT_UNLIKELY(munmap(p, size) != 0)) + if (ASMJIT_UNLIKELY(munmap(p, size) != 0)) { return DebugUtils::errored(asmjitErrorFromErrno(errno)); + } return kErrorOk; } @@ -880,9 +922,9 @@ Error release(void* p, size_t size) noexcept { Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept { int protection = mmProtFromMemoryFlags(memoryFlags); - if (mprotect(p, size, protection) == 0) + if (mprotect(p, size, protection) == 0) { return kErrorOk; - + } return DebugUtils::errored(asmjitErrorFromErrno(errno)); } @@ -894,12 +936,14 @@ static Error unmapDualMapping(DualMapping* dm, size_t size) noexcept { Error err1 = unmapMemory(dm->rx, size); Error err2 = kErrorOk; - if (dm->rx != dm->rw) + if (dm->rx != dm->rw) { err2 = unmapMemory(dm->rw, size); + } // We can report only one error, so report the first... - if (err1 || err2) + if (err1 || err2) { return DebugUtils::errored(err1 ? err1 : err2); + } dm->rx = nullptr; dm->rw = nullptr; @@ -964,8 +1008,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M int rwProtectFlags = VM_PROT_READ | VM_PROT_WRITE; int rxProtectFlags = VM_PROT_READ; - if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) + if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) { rxProtectFlags |= VM_PROT_EXECUTE; + } kern_return_t result {}; do { @@ -992,8 +1037,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M &maxProt, // max_protection VM_INHERIT_DEFAULT); // inheritance - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { break; + } dm.rw = (void*)remappedAddr; @@ -1011,8 +1057,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M setMaximum, // set_maximum rxProtectFlags); // new_protection - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { break; + } result = vm_protect(task, // target_task (vm_address_t)dm.rw, // address @@ -1020,8 +1067,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M setMaximum, // set_maximum rwProtectFlags); // new_protection - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { break; + } } } while (0); @@ -1053,8 +1101,9 @@ static Error allocDualMappingUsingFile(DualMapping* dm, size_t size, MemoryFlags MemoryFlags restrictedMemoryFlags = memoryFlags & ~dualMappingFilter[i]; Error err = mapMemory(&ptr[i], size, restrictedMemoryFlags | MemoryFlags::kMapShared, anonMem.fd(), 0); if (err != kErrorOk) { - if (i == 1) + if (i == 1) { unmapMemory(ptr[0], size); + } return err; } } @@ -1073,8 +1122,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no DebugUtils::unused(size, memoryFlags); return DebugUtils::errored(kErrorFeatureNotEnabled); #else - if (off_t(size) <= 0) + if (off_t(size) <= 0) { return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge); + } #if defined(ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP) return allocDualMappingUsingRemapdup(dm, size, memoryFlags); @@ -1143,11 +1193,13 @@ size_t largePageSize() noexcept { static constexpr size_t kNotAvailable = 1; size_t size = largePageSize.load(); - if (ASMJIT_LIKELY(size > kNotAvailable)) + if (ASMJIT_LIKELY(size > kNotAvailable)) { return size; + } - if (size == kNotAvailable) + if (size == kNotAvailable) { return 0; + } size = detectLargePageSize(); largePageSize.store(size != 0 ? size : kNotAvailable); diff --git a/src/asmjit/core/virtmem.h b/src/asmjit/core/virtmem.h index 17996dc..3520b19 100644 --- a/src/asmjit/core/virtmem.h +++ b/src/asmjit/core/virtmem.h @@ -50,6 +50,7 @@ struct Info { }; //! Returns virtual memory information, see `VirtMem::Info` for more details. +[[nodiscard]] ASMJIT_API Info info() noexcept; //! Returns the size of the smallest large page supported. @@ -59,6 +60,7 @@ ASMJIT_API Info info() noexcept; //! //! Returns either the detected large page size or 0, if large page support is either not supported by AsmJit //! or not accessible to the process. +[[nodiscard]] ASMJIT_API size_t largePageSize() noexcept; //! Virtual memory access and mmap-specific flags. @@ -164,15 +166,18 @@ ASMJIT_DEFINE_ENUM_FLAGS(MemoryFlags) //! //! \note `size` should be aligned to page size, use \ref VirtMem::info() to obtain it. Invalid size will not be //! corrected by the implementation and the allocation would not succeed in such case. +[[nodiscard]] ASMJIT_API Error alloc(void** p, size_t size, MemoryFlags flags) noexcept; //! Releases virtual memory previously allocated by \ref VirtMem::alloc(). //! //! \note The size must be the same as used by \ref VirtMem::alloc(). If the size is not the same value the call //! will fail on any POSIX system, but pass on Windows, because it's implemented differently. +[[nodiscard]] ASMJIT_API Error release(void* p, size_t size) noexcept; //! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect()` (Windows). +[[nodiscard]] ASMJIT_API Error protect(void* p, size_t size, MemoryFlags flags) noexcept; //! Dual memory mapping used to map an anonymous memory into two memory regions where one region is read-only, but @@ -195,11 +200,13 @@ struct DualMapping { //! release the memory returned by `allocDualMapping()` as that would fail on Windows. //! //! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails. +[[nodiscard]] ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags flags) noexcept; //! Releases virtual memory mapping previously allocated by \ref VirtMem::allocDualMapping(). //! //! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds. +[[nodiscard]] ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept; //! Hardened runtime flags. @@ -238,12 +245,14 @@ struct HardenedRuntimeInfo { //! \{ //! Tests whether the hardened runtime `flag` is set. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(HardenedRuntimeFlags flag) const noexcept { return Support::test(flags, flag); } //! \} }; //! Returns runtime features provided by the OS. +[[nodiscard]] ASMJIT_API HardenedRuntimeInfo hardenedRuntimeInfo() noexcept; //! Values that can be used with `protectJitMemory()` function. @@ -296,10 +305,11 @@ public: //! \{ //! Makes the given memory block RW protected. - ASMJIT_FORCE_INLINE ProtectJitReadWriteScope( + ASMJIT_INLINE ProtectJitReadWriteScope( void* rxPtr, size_t size, - CachePolicy policy = CachePolicy::kDefault) noexcept + CachePolicy policy = CachePolicy::kDefault + ) noexcept : _rxPtr(rxPtr), _size(size), _policy(policy) { @@ -307,11 +317,12 @@ public: } //! Makes the memory block RX protected again and flushes instruction cache. - ASMJIT_FORCE_INLINE ~ProtectJitReadWriteScope() noexcept { + ASMJIT_INLINE ~ProtectJitReadWriteScope() noexcept { protectJitMemory(ProtectJitAccess::kReadExecute); - if (_policy != CachePolicy::kNeverFlush) + if (_policy != CachePolicy::kNeverFlush) { flushInstructionCache(_rxPtr, _size); + } } //! \} diff --git a/src/asmjit/core/zone.cpp b/src/asmjit/core/zone.cpp index 24454c0..ef48335 100644 --- a/src/asmjit/core/zone.cpp +++ b/src/asmjit/core/zone.cpp @@ -210,28 +210,36 @@ void* Zone::_alloc(size_t size, size_t alignment) noexcept { void* Zone::allocZeroed(size_t size, size_t alignment) noexcept { void* p = alloc(size, alignment); - if (ASMJIT_UNLIKELY(!p)) + if (ASMJIT_UNLIKELY(!p)) { return p; + } return memset(p, 0, size); } void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept { - if (ASMJIT_UNLIKELY(!data || !size)) + if (ASMJIT_UNLIKELY(!data || !size)) { return nullptr; + } ASMJIT_ASSERT(size != SIZE_MAX); + uint8_t* m = allocT(size + nullTerminate); - if (ASMJIT_UNLIKELY(!m)) return nullptr; + if (ASMJIT_UNLIKELY(!m)) { + return nullptr; + } memcpy(m, data, size); - if (nullTerminate) m[size] = '\0'; + if (nullTerminate) { + m[size] = '\0'; + } return static_cast(m); } char* Zone::sformat(const char* fmt, ...) noexcept { - if (ASMJIT_UNLIKELY(!fmt)) + if (ASMJIT_UNLIKELY(!fmt)) { return nullptr; + } char buf[512]; size_t size; @@ -252,8 +260,9 @@ char* Zone::sformat(const char* fmt, ...) noexcept { static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept { ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks; while (cur) { - if (cur == block) + if (cur == block) { return true; + } cur = cur->next; } return false; @@ -334,8 +343,9 @@ void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept { size_t blockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment; // Handle a possible overflow. - if (ASMJIT_UNLIKELY(blockOverhead >= SIZE_MAX - size)) + if (ASMJIT_UNLIKELY(blockOverhead >= SIZE_MAX - size)) { return nullptr; + } void* p = ::malloc(size + blockOverhead); if (ASMJIT_UNLIKELY(!p)) { @@ -347,8 +357,9 @@ void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept { DynamicBlock* block = static_cast(p); DynamicBlock* next = _dynamicBlocks; - if (next) + if (next) { next->prev = block; + } block->prev = nullptr; block->next = next; @@ -368,7 +379,9 @@ void* ZoneAllocator::_allocZeroed(size_t size, size_t& allocatedSize) noexcept { ASMJIT_ASSERT(isInitialized()); void* p = _alloc(size, allocatedSize); - if (ASMJIT_UNLIKELY(!p)) return p; + if (ASMJIT_UNLIKELY(!p)) { + return p; + } return memset(p, 0, allocatedSize); } @@ -384,13 +397,16 @@ void ZoneAllocator::_releaseDynamic(void* p, size_t size) noexcept { DynamicBlock* prev = block->prev; DynamicBlock* next = block->next; - if (prev) + if (prev) { prev->next = next; - else + } + else { _dynamicBlocks = next; + } - if (next) + if (next) { next->prev = prev; + } ::free(block); } diff --git a/src/asmjit/core/zone.h b/src/asmjit/core/zone.h index 062a7b6..03b651a 100644 --- a/src/asmjit/core/zone.h +++ b/src/asmjit/core/zone.h @@ -41,16 +41,21 @@ public: size_t size; }; - enum Limits : size_t { - kMinBlockSize = 256, // The number is ridiculously small, but still possible. - kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 1), + static inline constexpr size_t kMinBlockSize = 256; // The number is ridiculously small, but still possible. + static inline constexpr size_t kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 1); - kMinAlignment = 1, - kMaxAlignment = 64, + static inline constexpr size_t kMinAlignment = 1; + static inline constexpr size_t kMaxAlignment = 64; - kBlockSize = sizeof(Block), - kBlockOverhead = kBlockSize + Globals::kAllocOverhead - }; + static inline constexpr size_t kBlockSize = sizeof(Block); + static inline constexpr size_t kBlockOverhead = kBlockSize + Globals::kAllocOverhead; + + static ASMJIT_API const Block _zeroBlock; + + //! \endcond + + //! \name Members + //! \{ //! Pointer in the current block. uint8_t* _ptr; @@ -72,9 +77,7 @@ public: //! Count of allocated blocks. size_t _blockCount; - static ASMJIT_API const Block _zeroBlock; - - //! \endcond + //! \} //! \name Construction & Destruction //! \{ @@ -141,25 +144,35 @@ public: //! \{ //! Returns the default block alignment. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; } + //! Returns a minimum block size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t minimumBlockSize() const noexcept { return size_t(1) << _minimumBlockSizeShift; } + //! Returns a maximum block size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t maximumBlockSize() const noexcept { return size_t(1) << _maximumBlockSizeShift; } + //! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint8_t hasStaticBlock() const noexcept { return _hasStaticBlock; } //! Returns remaining size of the current block. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); } //! Returns the current zone cursor (dangerous). //! //! This is a function that can be used to get exclusive access to the current block's memory buffer. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* ptr() noexcept { return reinterpret_cast(_ptr); } //! Returns the end of the current zone block, only useful if you use `ptr()`. template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* end() noexcept { return reinterpret_cast(_end); } //! Sets the current zone pointer to `ptr` (must be within the current block). @@ -209,11 +222,14 @@ public: //! //! \note This function doesn't respect any alignment. If you need to ensure there is enough room for an aligned //! allocation you need to call `align()` before calling `ensure()`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Error ensure(size_t size) noexcept { - if (ASMJIT_LIKELY(size <= remainingSize())) + if (ASMJIT_LIKELY(size <= remainingSize())) { return kErrorOk; - else + } + else { return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory); + } } //! \} @@ -252,6 +268,7 @@ public: //! // Reset or destroy `Zone`. //! zone.reset(); //! ``` + [[nodiscard]] inline void* alloc(size_t size) noexcept { if (ASMJIT_UNLIKELY(size > remainingSize())) return _alloc(size, 1); @@ -262,6 +279,7 @@ public: } //! Allocates the requested memory specified by `size` and `alignment`. + [[nodiscard]] inline void* alloc(size_t size, size_t alignment) noexcept { ASMJIT_ASSERT(Support::isPowerOf2(alignment)); uint8_t* ptr = Support::alignUp(_ptr, alignment); @@ -276,6 +294,7 @@ public: //! Allocates the requested memory specified by `size` without doing any checks. //! //! Can only be called if `remainingSize()` returns size at least equal to `size`. + [[nodiscard]] inline void* allocNoCheck(size_t size) noexcept { ASMJIT_ASSERT(remainingSize() >= size); @@ -287,6 +306,7 @@ public: //! Allocates the requested memory specified by `size` and `alignment` without doing any checks. //! //! Performs the same operation as `Zone::allocNoCheck(size)` with `alignment` applied. + [[nodiscard]] inline void* allocNoCheck(size_t size, size_t alignment) noexcept { ASMJIT_ASSERT(Support::isPowerOf2(alignment)); @@ -298,28 +318,33 @@ public: } //! Allocates `size` bytes of zeroed memory. See `alloc()` for more details. + [[nodiscard]] ASMJIT_API void* allocZeroed(size_t size, size_t alignment = 1) noexcept; //! Like `alloc()`, but the return pointer is casted to `T*`. template + [[nodiscard]] inline T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept { return static_cast(alloc(size, alignment)); } //! Like `allocNoCheck()`, but the return pointer is casted to `T*`. template + [[nodiscard]] inline T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept { return static_cast(allocNoCheck(size, alignment)); } //! Like `allocZeroed()`, but the return pointer is casted to `T*`. template + [[nodiscard]] inline T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept { return static_cast(allocZeroed(size, alignment)); } //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. template + [[nodiscard]] inline T* newT() noexcept { void* p = alloc(sizeof(T), alignof(T)); if (ASMJIT_UNLIKELY(!p)) @@ -329,6 +354,7 @@ public: //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. template + [[nodiscard]] inline T* newT(Args&&... args) noexcept { void* p = alloc(sizeof(T), alignof(T)); if (ASMJIT_UNLIKELY(!p)) @@ -339,28 +365,32 @@ public: //! \cond INTERNAL //! //! Internal alloc function used by other inlines. + [[nodiscard]] ASMJIT_API void* _alloc(size_t size, size_t alignment) noexcept; //! \endcond //! Helper to duplicate data. + [[nodiscard]] ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept; //! Helper to duplicate data. + [[nodiscard]] inline void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept { align(alignment); return dup(data, size, nullTerminate); } //! Helper to duplicate a formatted string, maximum size is 256 bytes. + [[nodiscard]] ASMJIT_API char* sformat(const char* str, ...) noexcept; //! \} #if !defined(ASMJIT_NO_DEPRECATED) - ASMJIT_DEPRECATED("Use Zone::minimumBlockSize() instead of Zone::blockSize()") + [[deprecated("Use Zone::minimumBlockSize() instead of Zone::blockSize()")]] ASMJIT_INLINE_NODEBUG size_t blockSize() const noexcept { return minimumBlockSize(); } - ASMJIT_DEPRECATED("Use Zone::hasStaticBlock() instead of Zone::isTemporary()") + [[deprecated("Use Zone::hasStaticBlock() instead of Zone::isTemporary()")]] ASMJIT_INLINE_NODEBUG bool isTemporary() const noexcept { return hasStaticBlock() != 0u; } #endif }; @@ -402,24 +432,22 @@ public: // In short, we pool chunks of these sizes: // [32, 64, 96, 128, 192, 256, 320, 384, 448, 512] - enum : uint32_t { - //! How many bytes per a low granularity pool (has to be at least 16). - kLoGranularity = 32, - //! Number of slots of a low granularity pool. - kLoCount = 4, - //! Maximum size of a block that can be allocated in a low granularity pool. - kLoMaxSize = kLoGranularity * kLoCount, + //! How many bytes per a low granularity pool (has to be at least 16). + static inline constexpr uint32_t kLoGranularity = 32; + //! Number of slots of a low granularity pool. + static inline constexpr uint32_t kLoCount = 4; + //! Maximum size of a block that can be allocated in a low granularity pool. + static inline constexpr uint32_t kLoMaxSize = kLoGranularity * kLoCount; - //! How many bytes per a high granularity pool. - kHiGranularity = 64, - //! Number of slots of a high granularity pool. - kHiCount = 6, - //! Maximum size of a block that can be allocated in a high granularity pool. - kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount, + //! How many bytes per a high granularity pool. + static inline constexpr uint32_t kHiGranularity = 64; + //! Number of slots of a high granularity pool. + static inline constexpr uint32_t kHiCount = 6; + //! Maximum size of a block that can be allocated in a high granularity pool. + static inline constexpr uint32_t kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount; - //! Alignment of every pointer returned by `alloc()`. - kBlockAlignment = kLoGranularity - }; + //! Alignment of every pointer returned by `alloc()`. + static inline constexpr uint32_t kBlockAlignment = kLoGranularity; //! Single-linked list used to store unused chunks. struct Slot { @@ -482,6 +510,7 @@ public: //! \{ //! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator` is not initialized. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Zone* zone() const noexcept { return _zone; } //! \} @@ -493,24 +522,30 @@ public: //! Returns the slot index to be used for `size`. Returns `true` if a valid slot has been written to `slot` and //! `allocatedSize` has been filled with slot exact size (`allocatedSize` can be equal or slightly greater than //! `size`). + [[nodiscard]] static inline bool _getSlotIndex(size_t size, uint32_t& slot) noexcept { ASMJIT_ASSERT(size > 0); - if (size > kHiMaxSize) + if (size > kHiMaxSize) { return false; + } - if (size <= kLoMaxSize) + if (size <= kLoMaxSize) { slot = uint32_t((size - 1) / kLoGranularity); - else + } + else { slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount; + } return true; } //! \overload + [[nodiscard]] static inline bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept { ASMJIT_ASSERT(size > 0); - if (size > kHiMaxSize) + if (size > kHiMaxSize) { return false; + } if (size <= kLoMaxSize) { slot = uint32_t((size - 1) / kLoGranularity); @@ -531,14 +566,19 @@ public: //! \{ //! \cond INTERNAL + [[nodiscard]] ASMJIT_API void* _alloc(size_t size, size_t& allocatedSize) noexcept; + + [[nodiscard]] ASMJIT_API void* _allocZeroed(size_t size, size_t& allocatedSize) noexcept; + ASMJIT_API void _releaseDynamic(void* p, size_t size) noexcept; //! \endcond //! Allocates `size` bytes of memory, ideally from an available pool. //! //! \note `size` can't be zero, it will assert in debug mode in such case. + [[nodiscard]] inline void* alloc(size_t size) noexcept { ASMJIT_ASSERT(isInitialized()); size_t allocatedSize; @@ -547,6 +587,7 @@ public: //! Like `alloc(size)`, but provides a second argument `allocatedSize` that provides a way to know how big //! the block returned actually is. This is useful for containers to prevent growing too early. + [[nodiscard]] inline void* alloc(size_t size, size_t& allocatedSize) noexcept { ASMJIT_ASSERT(isInitialized()); return _alloc(size, allocatedSize); @@ -554,11 +595,13 @@ public: //! Like `alloc()`, but the return pointer is casted to `T*`. template + [[nodiscard]] inline T* allocT(size_t size = sizeof(T)) noexcept { return static_cast(alloc(size)); } //! Like `alloc(size)`, but returns zeroed memory. + [[nodiscard]] inline void* allocZeroed(size_t size) noexcept { ASMJIT_ASSERT(isInitialized()); size_t allocatedSize; @@ -566,6 +609,7 @@ public: } //! Like `alloc(size, allocatedSize)`, but returns zeroed memory. + [[nodiscard]] inline void* allocZeroed(size_t size, size_t& allocatedSize) noexcept { ASMJIT_ASSERT(isInitialized()); return _allocZeroed(size, allocatedSize); @@ -573,24 +617,29 @@ public: //! Like `allocZeroed()`, but the return pointer is casted to `T*`. template + [[nodiscard]] inline T* allocZeroedT(size_t size = sizeof(T)) noexcept { return static_cast(allocZeroed(size)); } //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. template + [[nodiscard]] inline T* newT() noexcept { void* p = allocT(); - if (ASMJIT_UNLIKELY(!p)) + if (ASMJIT_UNLIKELY(!p)) { return nullptr; + } return new(Support::PlacementNew{p}) T(); } //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. template + [[nodiscard]] inline T* newT(Args&&... args) noexcept { void* p = allocT(); - if (ASMJIT_UNLIKELY(!p)) + if (ASMJIT_UNLIKELY(!p)) { return nullptr; + } return new(Support::PlacementNew{p}) T(std::forward(args)...); } diff --git a/src/asmjit/core/zonehash.cpp b/src/asmjit/core/zonehash.cpp index 578b083..40fc8ef 100644 --- a/src/asmjit/core/zonehash.cpp +++ b/src/asmjit/core/zonehash.cpp @@ -172,12 +172,12 @@ void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexce uint32_t newCount = ZoneHash_primeArray[primeIndex].prime; ZoneHashNode** oldData = _data; - ZoneHashNode** newData = reinterpret_cast( - allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*))); + ZoneHashNode** newData = reinterpret_cast(allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*))); // We can still store nodes into the table, but it will degrade. - if (ASMJIT_UNLIKELY(newData == nullptr)) + if (ASMJIT_UNLIKELY(newData == nullptr)) { return; + } uint32_t i; uint32_t oldCount = _bucketsCount; @@ -201,8 +201,9 @@ void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexce } } - if (oldData != _embedded) + if (oldData != _embedded) { allocator->release(oldData, oldCount * sizeof(ZoneHashNode*)); + } } // ZoneHashBase - Operations @@ -217,8 +218,9 @@ ZoneHashNode* ZoneHashBase::_insert(ZoneAllocator* allocator, ZoneHashNode* node if (++_size > _bucketsGrow) { uint32_t primeIndex = Support::min(_primeIndex + 2, ASMJIT_ARRAY_SIZE(ZoneHash_primeArray) - 1); - if (primeIndex > _primeIndex) + if (primeIndex > _primeIndex) { _rehash(allocator, primeIndex); + } } return node; diff --git a/src/asmjit/core/zonehash.h b/src/asmjit/core/zonehash.h index d6cd2e3..0d85143 100644 --- a/src/asmjit/core/zonehash.h +++ b/src/asmjit/core/zonehash.h @@ -73,7 +73,9 @@ public: _primeIndex = other._primeIndex; _embedded[0] = other._embedded[0]; - if (_data == other._embedded) _data = _embedded; + if (_data == other._embedded) { + _data = _embedded; + } } inline void reset() noexcept { @@ -89,8 +91,9 @@ public: inline void release(ZoneAllocator* allocator) noexcept { ZoneHashNode** oldData = _data; - if (oldData != _embedded) + if (oldData != _embedded) { allocator->release(oldData, _bucketsCount * sizeof(ZoneHashNode*)); + } reset(); } @@ -99,7 +102,10 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } //! \} @@ -117,8 +123,13 @@ public: std::swap(_primeIndex, other._primeIndex); std::swap(_embedded[0], other._embedded[0]); - if (_data == other._embedded) _data = _embedded; - if (other._data == _embedded) other._data = other._embedded; + if (_data == other._embedded) { + _data = _embedded; + } + + if (other._data == _embedded) { + other._data = other._embedded; + } } //! \cond INTERNAL @@ -145,7 +156,7 @@ class ZoneHash : public ZoneHashBase { public: ASMJIT_NONCOPYABLE(ZoneHash) - typedef NodeT Node; + using Node = NodeT; //! \name Construction & Destruction //! \{ @@ -164,12 +175,14 @@ public: ASMJIT_INLINE_NODEBUG void swap(ZoneHash& other) noexcept { ZoneHashBase::_swap(other); } template + [[nodiscard]] inline NodeT* get(const KeyT& key) const noexcept { uint32_t hashMod = _calcMod(key.hashCode()); NodeT* node = static_cast(_data[hashMod]); - while (node && !key.matches(node)) + while (node && !key.matches(node)) { node = static_cast(node->_hashNext); + } return node; } diff --git a/src/asmjit/core/zonelist.h b/src/asmjit/core/zonelist.h index 8980240..0ba3d97 100644 --- a/src/asmjit/core/zonelist.h +++ b/src/asmjit/core/zonelist.h @@ -22,10 +22,8 @@ public: //! \name Constants //! \{ - enum : size_t { - kNodeIndexPrev = 0, - kNodeIndexNext = 1 - }; + static inline constexpr size_t kNodeIndexPrev = 0; + static inline constexpr size_t kNodeIndexNext = 1; //! \} @@ -50,10 +48,16 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasPrev() const noexcept { return _listNodes[kNodeIndexPrev] != nullptr; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasNext() const noexcept { return _listNodes[kNodeIndexNext] != nullptr; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeT* prev() const noexcept { return _listNodes[kNodeIndexPrev]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeT* next() const noexcept { return _listNodes[kNodeIndexNext]; } //! \} @@ -68,10 +72,8 @@ public: //! \name Constants //! \{ - enum : size_t { - kNodeIndexFirst = 0, - kNodeIndexLast = 1 - }; + static inline constexpr size_t kNodeIndexFirst = 0; + static inline constexpr size_t kNodeIndexLast = 1; //! \} @@ -100,8 +102,13 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _nodes[0] == nullptr; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeT* first() const noexcept { return _nodes[kNodeIndexFirst]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeT* last() const noexcept { return _nodes[kNodeIndexLast]; } //! \} @@ -120,10 +127,12 @@ public: node->_listNodes[!dir] = prev; _nodes[dir] = node; - if (prev) + if (prev) { prev->_listNodes[dir] = node; - else + } + else { _nodes[!dir] = node; + } } // Can be used to both append and prepend. @@ -134,10 +143,12 @@ public: NodeT* next = ref->_listNodes[dir]; prev->_listNodes[dir] = node; - if (next) + if (next) { next->_listNodes[!dir] = node; - else + } + else { _nodes[dir] = node; + } node->_listNodes[!dir] = prev; node->_listNodes[ dir] = next; @@ -162,6 +173,7 @@ public: return node; } + [[nodiscard]] inline NodeT* popFirst() noexcept { NodeT* node = _nodes[0]; ASMJIT_ASSERT(node != nullptr); @@ -180,6 +192,7 @@ public: return node; } + [[nodiscard]] inline NodeT* pop() noexcept { NodeT* node = _nodes[1]; ASMJIT_ASSERT(node != nullptr); diff --git a/src/asmjit/core/zonestack.cpp b/src/asmjit/core/zonestack.cpp index e913a51..1da3664 100644 --- a/src/asmjit/core/zonestack.cpp +++ b/src/asmjit/core/zonestack.cpp @@ -30,8 +30,9 @@ Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcep if (allocator) { Block* block = static_cast(allocator->alloc(kBlockSize)); - if (ASMJIT_UNLIKELY(!block)) + if (ASMJIT_UNLIKELY(!block)) { return DebugUtils::errored(kErrorOutOfMemory); + } block->_link[kBlockIndexPrev] = nullptr; block->_link[kBlockIndexNext] = nullptr; @@ -56,8 +57,9 @@ Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept ASMJIT_ASSERT(!prev->empty()); Block* block = _allocator->allocT(kBlockSize); - if (ASMJIT_UNLIKELY(!block)) + if (ASMJIT_UNLIKELY(!block)) { return DebugUtils::errored(kErrorOutOfMemory); + } block->_link[ side] = nullptr; block->_link[!side] = prev; diff --git a/src/asmjit/core/zonestack.h b/src/asmjit/core/zonestack.h index 16d5d09..39cf2a8 100644 --- a/src/asmjit/core/zonestack.h +++ b/src/asmjit/core/zonestack.h @@ -21,15 +21,13 @@ public: //! \name Constants //! \{ - enum : size_t { - kBlockIndexPrev = 0, - kBlockIndexNext = 1, + static inline constexpr size_t kBlockIndexPrev = 0; + static inline constexpr size_t kBlockIndexNext = 1; - kBlockIndexFirst = 0, - kBlockIndexLast = 1, + static inline constexpr size_t kBlockIndexFirst = 0; + static inline constexpr size_t kBlockIndexLast = 1; - kBlockSize = ZoneAllocator::kHiMaxSize - }; + static inline constexpr size_t kBlockSize = ZoneAllocator::kHiMaxSize; //! \} @@ -44,32 +42,46 @@ public: //! Pointer to the end of the array. void* _end; + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _start == _end; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG Block* prev() const noexcept { return _link[kBlockIndexPrev]; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG Block* next() const noexcept { return _link[kBlockIndexNext]; } ASMJIT_INLINE_NODEBUG void setPrev(Block* block) noexcept { _link[kBlockIndexPrev] = block; } ASMJIT_INLINE_NODEBUG void setNext(Block* block) noexcept { _link[kBlockIndexNext] = block; } template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* start() const noexcept { return static_cast(_start); } + template ASMJIT_INLINE_NODEBUG void setStart(T* start) noexcept { _start = static_cast(start); } template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* end() const noexcept { return (T*)_end; } + template ASMJIT_INLINE_NODEBUG void setEnd(T* end) noexcept { _end = (void*)end; } template + [[nodiscard]] ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return (const T*)((const uint8_t*)(this) + sizeof(Block)); } + template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* data() noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); } template + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool canPrepend() const noexcept { return _start > data(); } template + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool canAppend() const noexcept { size_t kNumBlockItems = (kBlockSize - sizeof(Block)) / sizeof(T); size_t kStartBlockIndex = sizeof(Block); @@ -107,8 +119,10 @@ public: //! \{ //! Returns `ZoneAllocator` attached to this container. + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return _allocator; } + [[nodiscard]] inline bool empty() const noexcept { ASMJIT_ASSERT(isInitialized()); return _block[0]->start() == _block[1]->end(); @@ -120,7 +134,9 @@ public: //! \name Internal //! \{ + [[nodiscard]] ASMJIT_API Error _prepareBlock(uint32_t side, size_t initialIndex) noexcept; + ASMJIT_API void _cleanupBlock(uint32_t side, size_t middleIndex) noexcept; //! \} @@ -136,12 +152,10 @@ public: //! \name Constants //! \{ - enum : uint32_t { - kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T)), - kStartBlockIndex = uint32_t(sizeof(Block)), - kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T)), - kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T)) - }; + static inline constexpr uint32_t kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T)); + static inline constexpr uint32_t kStartBlockIndex = uint32_t(sizeof(Block)); + static inline constexpr uint32_t kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T)); + static inline constexpr uint32_t kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T)); //! \} @@ -191,6 +205,7 @@ public: return kErrorOk; } + [[nodiscard]] inline T popFirst() noexcept { ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(!empty()); @@ -202,12 +217,14 @@ public: T item = *ptr++; block->setStart(ptr); - if (block->empty()) + if (block->empty()) { _cleanupBlock(kBlockIndexFirst, kMidBlockIndex); + } return item; } + [[nodiscard]] inline T pop() noexcept { ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(!empty()); @@ -221,8 +238,9 @@ public: ASMJIT_ASSERT(ptr >= block->start()); block->setEnd(ptr); - if (block->empty()) + if (block->empty()) { _cleanupBlock(kBlockIndexLast, kMidBlockIndex); + } return item; } diff --git a/src/asmjit/core/zonestring.h b/src/asmjit/core/zonestring.h index e62ac50..872fcc0 100644 --- a/src/asmjit/core/zonestring.h +++ b/src/asmjit/core/zonestring.h @@ -63,10 +63,8 @@ public: //! \name Constants //! \{ - enum : uint32_t { - kWholeSize = (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase)), - kMaxEmbeddedSize = kWholeSize - 5 - }; + static inline constexpr uint32_t kWholeSize = (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase)); + static inline constexpr uint32_t kMaxEmbeddedSize = kWholeSize - 5; //! \} @@ -92,14 +90,19 @@ public: //! \{ //! Tests whether the string is empty. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _base._size == 0; } //! Returns the string data. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _base._size <= kMaxEmbeddedSize ? _base._embedded : _base._external; } + //! Returns the string size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _base._size; } //! Tests whether the string is embedded (e.g. no dynamically allocated). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEmbedded() const noexcept { return _base._size <= kMaxEmbeddedSize; } //! Copies a new `data` of the given `size` to the string. diff --git a/src/asmjit/core/zonetree.cpp b/src/asmjit/core/zonetree.cpp index e8a0e11..3bcce88 100644 --- a/src/asmjit/core/zonetree.cpp +++ b/src/asmjit/core/zonetree.cpp @@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE #if defined(ASMJIT_TEST) template struct ZoneRBUnit { - typedef ZoneTree Tree; + using Tree = ZoneTree; static void verifyTree(Tree& tree) noexcept { EXPECT_GT(checkHeight(static_cast(tree._root)), 0); diff --git a/src/asmjit/core/zonetree.h b/src/asmjit/core/zonetree.h index ffeb674..2b047d1 100644 --- a/src/asmjit/core/zonetree.h +++ b/src/asmjit/core/zonetree.h @@ -25,10 +25,8 @@ public: //! \name Constants //! \{ - enum : uintptr_t { - kRedMask = 0x1, - kPtrMask = ~kRedMask - }; + static inline constexpr uintptr_t kRedMask = 0x1; + static inline constexpr uintptr_t kPtrMask = ~kRedMask; //! \} @@ -49,17 +47,28 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isRed() const noexcept { return static_cast(_rbNodeData[0] & kRedMask); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasChild(size_t i) const noexcept { return _rbNodeData[i] > kRedMask; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasLeft() const noexcept { return _rbNodeData[0] > kRedMask; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRight() const noexcept { return _rbNodeData[1] != 0; } template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* child(size_t i) const noexcept { return static_cast(_getChild(i)); } + template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* left() const noexcept { return static_cast(_getLeft()); } + template + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* right() const noexcept { return static_cast(_getRight()); } //! \} @@ -68,8 +77,13 @@ public: //! \name Internal //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getChild(size_t i) const noexcept { return (ZoneTreeNode*)(_rbNodeData[i] & kPtrMask); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getLeft() const noexcept { return (ZoneTreeNode*)(_rbNodeData[0] & kPtrMask); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getRight() const noexcept { return (ZoneTreeNode*)(_rbNodeData[1]); } ASMJIT_INLINE_NODEBUG void _setChild(size_t i, ZoneTreeNode* node) noexcept { _rbNodeData[i] = (_rbNodeData[i] & kRedMask) | (uintptr_t)node; } @@ -80,6 +94,7 @@ public: ASMJIT_INLINE_NODEBUG void _makeBlack() noexcept { _rbNodeData[0] &= kPtrMask; } //! Tests whether the node is RED (RED node must be non-null and must have RED flag set). + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool _isValidRed(ZoneTreeNode* node) noexcept { return node && node->isRed(); } //! \} @@ -103,8 +118,13 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeT* child(size_t i) const noexcept { return static_cast(_getChild(i)); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeT* left() const noexcept { return static_cast(_getLeft()); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeT* right() const noexcept { return static_cast(_getRight()); } //! \} @@ -116,7 +136,7 @@ class ZoneTree { public: ASMJIT_NONCOPYABLE(ZoneTree) - typedef NodeT Node; + using Node = NodeT; NodeT* _root {}; //! \name Construction & Destruction @@ -132,7 +152,10 @@ public: //! \name Accessors //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _root == nullptr; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG NodeT* root() const noexcept { return static_cast(_root); } //! \} @@ -192,14 +215,17 @@ public: } // Stop if found. - if (q == node) + if (q == node) { break; + } last = dir; dir = cmp(*static_cast(q), *static_cast(node)) < 0; // Update helpers. - if (g) t = g; + if (g) { + t = g; + } g = p; p = q; @@ -319,15 +345,20 @@ public: // Update root and make it black. _root = static_cast(head._getRight()); - if (_root) _root->_makeBlack(); + if (_root) { + _root->_makeBlack(); + } } template> + [[nodiscard]] inline NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept { ZoneTreeNode* node = _root; while (node) { auto result = cmp(*static_cast(node), key); - if (result == 0) break; + if (result == 0) { + break; + } // Go left or right depending on the `result`. node = node->_getChild(result < 0); diff --git a/src/asmjit/core/zonevector.cpp b/src/asmjit/core/zonevector.cpp index 383e7a2..4eb7499 100644 --- a/src/asmjit/core/zonevector.cpp +++ b/src/asmjit/core/zonevector.cpp @@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE // ZoneVector is used as an array to hold short-lived data structures used during code generation. The growing // strategy is simple - use small capacity at the beginning (very good for ZoneAllocator) and then grow quicker // to prevent successive reallocations. -static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, uint32_t growMinimum, uint32_t sizeOfT) noexcept { +static ASMJIT_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, uint32_t growMinimum, uint32_t sizeOfT) noexcept { static constexpr size_t kGrowThreshold = Globals::kGrowThreshold; size_t byteSize = size_t(current) * sizeOfT; @@ -53,8 +53,9 @@ static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, ui // Bail to `growMinimum` in case of overflow - should never happen as it's unlikely we would hit this on a 32-bit // machine (consecutive near 4GiB allocation is impossible, and this should never happen on 64-bit machine as we // use 32-bit size & capacity, so overflow of 64 bit integer is not possible. Added just as an extreme measure. - if (byteSize < minimumByteSize) + if (byteSize < minimumByteSize) { return growMinimum; + } } } @@ -62,41 +63,48 @@ static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, ui return uint32_t(Support::min(n, 0xFFFFFFFFu)); } -static ASMJIT_FORCE_INLINE bool ZoneVector_byteSizeIsSafe(size_t nBytes, uint32_t n) noexcept { - if (sizeof(uint32_t) < sizeof(size_t)) +static ASMJIT_INLINE bool ZoneVector_byteSizeIsSafe(size_t nBytes, uint32_t n) noexcept { + if constexpr (sizeof(uint32_t) < sizeof(size_t)) { return true; // there is no problem when running on a 64-bit machine. - else + } + else { return nBytes >= size_t(n); + } }; Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept { uint32_t capacity = _capacity; uint32_t after = _size; - if (ASMJIT_UNLIKELY(std::numeric_limits::max() - n < after)) + if (ASMJIT_UNLIKELY(std::numeric_limits::max() - n < after)) { return DebugUtils::errored(kErrorOutOfMemory); + } after += n; - if (capacity >= after) + if (capacity >= after) { return kErrorOk; + } return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, after, sizeOfT)); } Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept { uint32_t oldCapacity = _capacity; - if (oldCapacity >= n) + if (oldCapacity >= n) { return kErrorOk; + } size_t nBytes = size_t(n) * sizeOfT; - if (ASMJIT_UNLIKELY(!ZoneVector_byteSizeIsSafe(nBytes, n))) + if (ASMJIT_UNLIKELY(!ZoneVector_byteSizeIsSafe(nBytes, n))) { return DebugUtils::errored(kErrorOutOfMemory); + } size_t allocatedBytes; uint8_t* newData = static_cast(allocator->alloc(nBytes, allocatedBytes)); - if (ASMJIT_UNLIKELY(!newData)) + if (ASMJIT_UNLIKELY(!newData)) { return DebugUtils::errored(kErrorOutOfMemory); + } uint32_t newCapacity = uint32_t(allocatedBytes / sizeOfT); ASMJIT_ASSERT(newCapacity >= n); @@ -115,8 +123,9 @@ Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint3 Error ZoneVectorBase::_growingReserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept { uint32_t capacity = _capacity; - if (capacity >= n) + if (capacity >= n) { return kErrorOk; + } return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, n, sizeOfT)); } @@ -128,8 +137,9 @@ Error ZoneVectorBase::_resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32 ASMJIT_ASSERT(_capacity >= n); } - if (size < n) + if (size < n) { memset(static_cast(_data) + size_t(size) * sizeOfT, 0, size_t(n - size) * sizeOfT); + } _size = n; return kErrorOk; @@ -150,16 +160,18 @@ Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& oth if (newSize > _capacity) { // Realloc needed... Calculate the minimum capacity (in bytes) required. uint32_t minimumCapacityInBits = Support::alignUp(newSize, kBitWordSizeInBits); - if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) + if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) { return DebugUtils::errored(kErrorOutOfMemory); + } // Normalize to bytes. uint32_t minimumCapacity = minimumCapacityInBits / 8; size_t allocatedCapacity; BitWord* newData = static_cast(allocator->alloc(minimumCapacity, allocatedCapacity)); - if (ASMJIT_UNLIKELY(!newData)) + if (ASMJIT_UNLIKELY(!newData)) { return DebugUtils::errored(kErrorOutOfMemory); + } // `allocatedCapacity` now contains number in bytes, we need bits. size_t allocatedCapacityInBits = allocatedCapacity * 8; @@ -167,11 +179,13 @@ Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& oth // Arithmetic overflow should normally not happen. If it happens we just // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as // this value is still safe to be used to call `_allocator->release(...)`. - if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) + if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) { allocatedCapacityInBits = minimumCapacityInBits; + } - if (data) + if (data) { allocator->release(data, _capacity / 8); + } data = newData; _data = data; @@ -197,8 +211,9 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_ // happens when `newSize` is a multiply of `kBitWordSizeInBits` like 64, 128, // and so on. In that case don't change anything as that would mean settings // bits outside of the `_size`. - if (bit) + if (bit) { _data[idx] &= (BitWord(1) << bit) - 1u; + } _size = newSize; return kErrorOk; @@ -211,16 +226,18 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_ // Realloc needed, calculate the minimum capacity (in bytes) required. uint32_t minimumCapacityInBits = Support::alignUp(idealCapacity, kBitWordSizeInBits); - if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) + if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) { return DebugUtils::errored(kErrorOutOfMemory); + } // Normalize to bytes. uint32_t minimumCapacity = minimumCapacityInBits / 8; size_t allocatedCapacity; BitWord* newData = static_cast(allocator->alloc(minimumCapacity, allocatedCapacity)); - if (ASMJIT_UNLIKELY(!newData)) + if (ASMJIT_UNLIKELY(!newData)) { return DebugUtils::errored(kErrorOutOfMemory); + } // `allocatedCapacity` now contains number in bytes, we need bits. size_t allocatedCapacityInBits = allocatedCapacity * 8; @@ -228,13 +245,15 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_ // Arithmetic overflow should normally not happen. If it happens we just // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as // this value is still safe to be used to call `_allocator->release(...)`. - if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) + if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) { allocatedCapacityInBits = minimumCapacityInBits; + } _copyBits(newData, data, _wordsPerBits(oldSize)); - if (data) + if (data) { allocator->release(data, _capacity / 8); + } data = newData; _data = data; @@ -274,8 +293,9 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_ while (idx < endIdx) data[idx++] = pattern; // Clear unused bits of the last bit-word. - if (endBit) + if (endBit) { data[endIdx - 1] = pattern & ((BitWord(1) << endBit) - 1); + } _size = newSize; return kErrorOk; @@ -286,16 +306,20 @@ Error ZoneBitVector::_append(ZoneAllocator* allocator, bool value) noexcept { uint32_t newSize = _size + 1; uint32_t idealCapacity = _capacity; - if (idealCapacity < 128) + if (idealCapacity < 128) { idealCapacity = 128; - else if (idealCapacity <= kThreshold) + } + else if (idealCapacity <= kThreshold) { idealCapacity *= 2; - else + } + else { idealCapacity += kThreshold; + } if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) { - if (ASMJIT_UNLIKELY(_size == std::numeric_limits::max())) + if (ASMJIT_UNLIKELY(_size == std::numeric_limits::max())) { return DebugUtils::errored(kErrorOutOfMemory); + } idealCapacity = newSize; } @@ -356,7 +380,7 @@ static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) { EXPECT_EQ(fsum, rsum); vec.release(allocator); - INFO("ZoneBitVector::growingReserve()"); + INFO("ZoneVector<%s>::growingReserve()", typeName); for (uint32_t j = 0; j < 40 / sizeof(T); j += 8) { EXPECT_EQ(vec.growingReserve(allocator, j * kMiB), kErrorOk); EXPECT_GE(vec.capacity(), j * kMiB); @@ -379,15 +403,17 @@ static void test_zone_bitvector(ZoneAllocator* allocator) { EXPECT_EQ(vec.resize(allocator, count, false), kErrorOk); EXPECT_EQ(vec.size(), count); - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { EXPECT_FALSE(vec.bitAt(i)); + } vec.clear(); EXPECT_EQ(vec.resize(allocator, count, true), kErrorOk); EXPECT_EQ(vec.size(), count); - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { EXPECT_TRUE(vec.bitAt(i)); + } } INFO("ZoneBitVector::fillBits() / clearBits()"); @@ -398,10 +424,12 @@ static void test_zone_bitvector(ZoneAllocator* allocator) { for (i = 0; i < (count + 1) / 2; i++) { bool value = bool(i & 1); - if (value) + if (value) { vec.fillBits(i, count - i * 2); - else + } + else { vec.clearBits(i, count - i * 2); + } } for (i = 0; i < count; i++) { diff --git a/src/asmjit/core/zonevector.h b/src/asmjit/core/zonevector.h index 7607613..c468b96 100644 --- a/src/asmjit/core/zonevector.h +++ b/src/asmjit/core/zonevector.h @@ -19,9 +19,16 @@ class ZoneVectorBase { public: ASMJIT_NONCOPYABLE(ZoneVectorBase) - // STL compatibility; - typedef uint32_t size_type; - typedef ptrdiff_t difference_type; + //! \name Types (C++ compatibility) + //! \{ + + using size_type = uint32_t; + using difference_type = ptrdiff_t; + + //! \} + + //! \name Members + //! \{ //! Vector data (untyped). void* _data = nullptr; @@ -30,6 +37,8 @@ public: //! Capacity of the vector. size_type _capacity = 0; + //! \} + protected: //! \name Construction & Destruction //! \{ @@ -74,10 +83,15 @@ public: //! \{ //! Tests whether the vector is empty. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } + //! Returns the vector size. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_type size() const noexcept { return _size; } + //! Returns the vector capacity. + [[nodiscard]] ASMJIT_INLINE_NODEBUG size_type capacity() const noexcept { return _capacity; } //! \} @@ -120,17 +134,21 @@ class ZoneVector : public ZoneVectorBase { public: ASMJIT_NONCOPYABLE(ZoneVector) - // STL compatibility; - typedef T value_type; - typedef T* pointer; - typedef const T* const_pointer; - typedef T& reference; - typedef const T& const_reference; + //! \name Types (C++ compatibility) + //! \{ - typedef T* iterator; - typedef const T* const_iterator; - typedef Support::ArrayReverseIterator reverse_iterator; - typedef Support::ArrayReverseIterator const_reverse_iterator; + using value_type = T; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + + using iterator = T*; + using const_iterator = const T*; + using reverse_iterator = Support::ArrayReverseIterator; + using const_reverse_iterator = Support::ArrayReverseIterator; + + //! \} //! \name Construction & Destruction //! \{ @@ -144,11 +162,15 @@ public: //! \{ //! Returns vector data. + [[nodiscard]] ASMJIT_INLINE_NODEBUG T* data() noexcept { return static_cast(_data); } + //! Returns vector data (const) + [[nodiscard]] ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return static_cast(_data); } //! Returns item at the given index `i` (const). + [[nodiscard]] inline const T& at(size_t i) const noexcept { ASMJIT_ASSERT(i < _size); return data()[i]; @@ -164,22 +186,40 @@ public: //! \name STL Compatibility (Iterators) //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG iterator begin() noexcept { return iterator(data()); }; + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const_iterator begin() const noexcept { return const_iterator(data()); }; + [[nodiscard]] ASMJIT_INLINE_NODEBUG iterator end() noexcept { return iterator(data() + _size); }; + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const_iterator end() const noexcept { return const_iterator(data() + _size); }; + [[nodiscard]] ASMJIT_INLINE_NODEBUG reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }; + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); }; + [[nodiscard]] ASMJIT_INLINE_NODEBUG reverse_iterator rend() noexcept { return reverse_iterator(begin()); }; + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); }; + [[nodiscard]] ASMJIT_INLINE_NODEBUG const_iterator cbegin() const noexcept { return const_iterator(data()); }; + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const_iterator cend() const noexcept { return const_iterator(data() + _size); }; + [[nodiscard]] ASMJIT_INLINE_NODEBUG const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(cend()); }; + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const_reverse_iterator crend() const noexcept { return const_reverse_iterator(cbegin()); }; //! \} @@ -188,12 +228,13 @@ public: //! \{ //! Swaps this vector with `other`. - ASMJIT_FORCE_INLINE void swap(ZoneVector& other) noexcept { _swap(other); } + ASMJIT_INLINE void swap(ZoneVector& other) noexcept { _swap(other); } //! Prepends `item` to the vector. - ASMJIT_FORCE_INLINE Error prepend(ZoneAllocator* allocator, const T& item) noexcept { - if (ASMJIT_UNLIKELY(_size == _capacity)) + ASMJIT_INLINE Error prepend(ZoneAllocator* allocator, const T& item) noexcept { + if (ASMJIT_UNLIKELY(_size == _capacity)) { ASMJIT_PROPAGATE(grow(allocator, 1)); + } memmove(static_cast(static_cast(_data) + 1), static_cast(_data), @@ -208,11 +249,12 @@ public: } //! Inserts an `item` at the specified `index`. - ASMJIT_FORCE_INLINE Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept { + ASMJIT_INLINE Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept { ASMJIT_ASSERT(index <= _size); - if (ASMJIT_UNLIKELY(_size == _capacity)) + if (ASMJIT_UNLIKELY(_size == _capacity)) { ASMJIT_PROPAGATE(grow(allocator, 1)); + } T* dst = static_cast(_data) + index; memmove(static_cast(dst + 1), @@ -228,9 +270,10 @@ public: } //! Appends `item` to the vector. - ASMJIT_FORCE_INLINE Error append(ZoneAllocator* allocator, const T& item) noexcept { - if (ASMJIT_UNLIKELY(_size == _capacity)) + ASMJIT_INLINE Error append(ZoneAllocator* allocator, const T& item) noexcept { + if (ASMJIT_UNLIKELY(_size == _capacity)) { ASMJIT_PROPAGATE(grow(allocator, 1)); + } memcpy(static_cast(static_cast(_data) + _size), static_cast(&item), @@ -241,10 +284,11 @@ public: } //! Appends `other` vector at the end of this vector. - ASMJIT_FORCE_INLINE Error concat(ZoneAllocator* allocator, const ZoneVector& other) noexcept { + ASMJIT_INLINE Error concat(ZoneAllocator* allocator, const ZoneVector& other) noexcept { uint32_t size = other._size; - if (_capacity - _size < size) + if (_capacity - _size < size) { ASMJIT_PROPAGATE(grow(allocator, size)); + } if (size) { memcpy(static_cast(static_cast(_data) + _size), @@ -260,7 +304,7 @@ public: //! //! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements //! can be added to the vector without checking if there is a place for them. Used mostly internally. - ASMJIT_FORCE_INLINE void prependUnsafe(const T& item) noexcept { + ASMJIT_INLINE void prependUnsafe(const T& item) noexcept { ASMJIT_ASSERT(_size < _capacity); T* data = static_cast(_data); @@ -280,7 +324,7 @@ public: //! //! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements //! can be added to the vector without checking if there is a place for them. Used mostly internally. - ASMJIT_FORCE_INLINE void appendUnsafe(const T& item) noexcept { + ASMJIT_INLINE void appendUnsafe(const T& item) noexcept { ASMJIT_ASSERT(_size < _capacity); memcpy(static_cast(static_cast(_data) + _size), @@ -290,7 +334,7 @@ public: } //! Inserts an `item` at the specified `index` (unsafe case). - ASMJIT_FORCE_INLINE void insertUnsafe(size_t index, const T& item) noexcept { + ASMJIT_INLINE void insertUnsafe(size_t index, const T& item) noexcept { ASMJIT_ASSERT(_size < _capacity); ASMJIT_ASSERT(index <= _size); @@ -307,7 +351,7 @@ public: } //! Concatenates all items of `other` at the end of the vector. - ASMJIT_FORCE_INLINE void concatUnsafe(const ZoneVector& other) noexcept { + ASMJIT_INLINE void concatUnsafe(const ZoneVector& other) noexcept { uint32_t size = other._size; ASMJIT_ASSERT(_capacity - _size >= size); @@ -320,7 +364,7 @@ public: } //! Returns index of the given `val` or `Globals::kNotFound` if it doesn't exist. - ASMJIT_FORCE_INLINE uint32_t indexOf(const T& val) const noexcept { + ASMJIT_INLINE uint32_t indexOf(const T& val) const noexcept { const T* data = static_cast(_data); uint32_t size = _size; @@ -350,6 +394,7 @@ public: } //! Pops the last element from the vector and returns it. + [[nodiscard]] inline T pop() noexcept { ASMJIT_ASSERT(_size > 0); @@ -363,12 +408,14 @@ public: } //! Returns item at index `i`. + [[nodiscard]] inline T& operator[](size_t i) noexcept { ASMJIT_ASSERT(i < _size); return data()[i]; } //! Returns item at index `i`. + [[nodiscard]] inline const T& operator[](size_t i) const noexcept { ASMJIT_ASSERT(i < _size); return data()[i]; @@ -378,16 +425,22 @@ public: //! //! \note The vector must have at least one element. Attempting to use `first()` on empty vector will trigger //! an assertion failure in debug builds. + [[nodiscard]] ASMJIT_INLINE_NODEBUG T& first() noexcept { return operator[](0); } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const T& first() const noexcept { return operator[](0); } //! Returns a reference to the last element of the vector. //! //! \note The vector must have at least one element. Attempting to use `last()` on empty vector will trigger //! an assertion failure in debug builds. + [[nodiscard]] inline T& last() noexcept { return operator[](_size - 1); } + //! \overload + [[nodiscard]] inline const T& last() const noexcept { return operator[](_size - 1); } //! \} @@ -401,6 +454,7 @@ public: } //! Called to grow the buffer to fit at least `n` elements more. + [[nodiscard]] inline Error grow(ZoneAllocator* allocator, uint32_t n) noexcept { return ZoneVectorBase::_grow(allocator, sizeof(T), n); } @@ -409,27 +463,34 @@ public: //! //! If `n` is greater than the current size then the additional elements' content will be initialized to zero. //! If `n` is less than the current size then the vector will be truncated to exactly `n` elements. + [[nodiscard]] inline Error resize(ZoneAllocator* allocator, uint32_t n) noexcept { return ZoneVectorBase::_resize(allocator, sizeof(T), n); } //! Reallocates the internal array to fit at least `n` items. + [[nodiscard]] inline Error reserve(ZoneAllocator* allocator, uint32_t n) noexcept { - if (ASMJIT_UNLIKELY(n > _capacity)) + if (ASMJIT_UNLIKELY(n > _capacity)) { return ZoneVectorBase::_reserve(allocator, sizeof(T), n); - else + } + else { return Error(kErrorOk); + } } //! Reallocates the internal array to fit at least `n` items with growing semantics. //! //! If the vector is smaller than `n` the same growing calculations will be used as if N items were appended //! to an empty vector, which means reserving additional space for more append operations that could follow. + [[nodiscard]] inline Error growingReserve(ZoneAllocator* allocator, uint32_t n) noexcept { - if (ASMJIT_UNLIKELY(n > _capacity)) + if (ASMJIT_UNLIKELY(n > _capacity)) { return ZoneVectorBase::_growingReserve(allocator, sizeof(T), n); - else + } + else { return Error(kErrorOk); + } } inline Error willGrow(ZoneAllocator* allocator, uint32_t n = 1) noexcept { @@ -442,16 +503,19 @@ public: //! Zone-allocated bit vector. class ZoneBitVector { public: - typedef Support::BitWord BitWord; - ASMJIT_NONCOPYABLE(ZoneBitVector) + //! \name Types + //! \{ + + using BitWord = Support::BitWord; + + //! \} + //! \name Constants //! \{ - enum : uint32_t { - kBitWordSizeInBits = Support::kBitWordSizeInBits - }; + static inline constexpr uint32_t kBitWordSizeInBits = Support::kBitWordSizeInBits; //! \} @@ -476,18 +540,21 @@ public: } static ASMJIT_INLINE_NODEBUG void _zeroBits(BitWord* dst, uint32_t nBitWords) noexcept { - for (uint32_t i = 0; i < nBitWords; i++) + for (uint32_t i = 0; i < nBitWords; i++) { dst[i] = 0; + } } static ASMJIT_INLINE_NODEBUG void _fillBits(BitWord* dst, uint32_t nBitWords) noexcept { - for (uint32_t i = 0; i < nBitWords; i++) + for (uint32_t i = 0; i < nBitWords; i++) { dst[i] = ~BitWord(0); + } } static ASMJIT_INLINE_NODEBUG void _copyBits(BitWord* dst, const BitWord* src, uint32_t nBitWords) noexcept { - for (uint32_t i = 0; i < nBitWords; i++) + for (uint32_t i = 0; i < nBitWords; i++) { dst[i] = src[i]; + } } //! \} @@ -508,7 +575,10 @@ public: //! \name Overloaded Operators //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator==(const ZoneBitVector& other) const noexcept { return equals(other); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool operator!=(const ZoneBitVector& other) const noexcept { return !equals(other); } //! \} @@ -517,20 +587,31 @@ public: //! \{ //! Tests whether the bit-vector is empty (has no bits). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } + //! Returns the size of this bit-vector (in bits). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _size; } + //! Returns the capacity of this bit-vector (in bits). + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t capacity() const noexcept { return _capacity; } //! Returns the size of the `BitWord[]` array in `BitWord` units. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t sizeInBitWords() const noexcept { return _wordsPerBits(_size); } + //! Returns the capacity of the `BitWord[]` array in `BitWord` units. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t capacityInBitWords() const noexcept { return _wordsPerBits(_capacity); } //! Returns bit-vector data as `BitWord[]`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG BitWord* data() noexcept { return _data; } + //! \overload + [[nodiscard]] ASMJIT_INLINE_NODEBUG const BitWord* data() const noexcept { return _data; } //! \} @@ -559,6 +640,7 @@ public: _clearUnusedBits(); } + [[nodiscard]] inline bool bitAt(uint32_t index) const noexcept { ASMJIT_ASSERT(index < _size); return Support::bitVectorGetBit(_data, index); @@ -574,7 +656,7 @@ public: Support::bitVectorFlipBit(_data, index); } - ASMJIT_FORCE_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept { + ASMJIT_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept { uint32_t index = _size; if (ASMJIT_UNLIKELY(index >= _capacity)) return _append(allocator, value); @@ -593,23 +675,23 @@ public: ASMJIT_API Error copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept; - ASMJIT_FORCE_INLINE void clearAll() noexcept { + ASMJIT_INLINE void clearAll() noexcept { _zeroBits(_data, _wordsPerBits(_size)); } - ASMJIT_FORCE_INLINE void fillAll() noexcept { + ASMJIT_INLINE void fillAll() noexcept { _fillBits(_data, _wordsPerBits(_size)); _clearUnusedBits(); } - ASMJIT_FORCE_INLINE void clearBits(uint32_t start, uint32_t count) noexcept { + ASMJIT_INLINE void clearBits(uint32_t start, uint32_t count) noexcept { ASMJIT_ASSERT(start <= _size); ASMJIT_ASSERT(_size - start >= count); Support::bitVectorClear(_data, start, count); } - ASMJIT_FORCE_INLINE void fillBits(uint32_t start, uint32_t count) noexcept { + ASMJIT_INLINE void fillBits(uint32_t start, uint32_t count) noexcept { ASMJIT_ASSERT(start <= _size); ASMJIT_ASSERT(_size - start >= count); @@ -620,7 +702,7 @@ public: //! bits than `this` then all remaining bits are set to zero. //! //! \note The size of the BitVector is unaffected by this operation. - ASMJIT_FORCE_INLINE void and_(const ZoneBitVector& other) noexcept { + ASMJIT_INLINE void and_(const ZoneBitVector& other) noexcept { BitWord* dst = _data; const BitWord* src = other._data; @@ -644,49 +726,57 @@ public: //! has less bits than `this` then all remaining bits are kept intact. //! //! \note The size of the BitVector is unaffected by this operation. - ASMJIT_FORCE_INLINE void andNot(const ZoneBitVector& other) noexcept { + ASMJIT_INLINE void andNot(const ZoneBitVector& other) noexcept { BitWord* dst = _data; const BitWord* src = other._data; uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size)); - for (uint32_t i = 0; i < commonBitWordCount; i++) + for (uint32_t i = 0; i < commonBitWordCount; i++) { dst[i] = dst[i] & ~src[i]; + } } //! Performs a logical bitwise OP between bits specified in this array and bits in `other`. If `other` has less //! bits than `this` then all remaining bits are kept intact. //! //! \note The size of the BitVector is unaffected by this operation. - ASMJIT_FORCE_INLINE void or_(const ZoneBitVector& other) noexcept { + ASMJIT_INLINE void or_(const ZoneBitVector& other) noexcept { BitWord* dst = _data; const BitWord* src = other._data; uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size)); - for (uint32_t i = 0; i < commonBitWordCount; i++) + for (uint32_t i = 0; i < commonBitWordCount; i++) { dst[i] = dst[i] | src[i]; + } _clearUnusedBits(); } - ASMJIT_FORCE_INLINE void _clearUnusedBits() noexcept { + ASMJIT_INLINE void _clearUnusedBits() noexcept { uint32_t idx = _size / kBitWordSizeInBits; uint32_t bit = _size % kBitWordSizeInBits; - if (!bit) + if (!bit) { return; + } + _data[idx] &= (BitWord(1) << bit) - 1u; } - ASMJIT_FORCE_INLINE bool equals(const ZoneBitVector& other) const noexcept { - if (_size != other._size) + [[nodiscard]] + ASMJIT_INLINE bool equals(const ZoneBitVector& other) const noexcept { + if (_size != other._size) { return false; + } const BitWord* aData = _data; const BitWord* bData = other._data; uint32_t numBitWords = _wordsPerBits(_size); - for (uint32_t i = 0; i < numBitWords; i++) - if (aData[i] != bData[i]) + for (uint32_t i = 0; i < numBitWords; i++) { + if (aData[i] != bData[i]) { return false; + } + } return true; } @@ -696,12 +786,15 @@ public: //! \{ inline void release(ZoneAllocator* allocator) noexcept { - if (!_data) + if (!_data) { return; + } + allocator->release(_data, _capacity / 8); reset(); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG Error resize(ZoneAllocator* allocator, uint32_t newSize, bool newBitsValue = false) noexcept { return _resize(allocator, newSize, newSize, newBitsValue); } diff --git a/src/asmjit/x86/x86archtraits_p.h b/src/asmjit/x86/x86archtraits_p.h index 90ae5d5..f05c7a5 100644 --- a/src/asmjit/x86/x86archtraits_p.h +++ b/src/asmjit/x86/x86archtraits_p.h @@ -118,8 +118,8 @@ static const constexpr ArchTraits x64ArchTraits = { index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kX86_Gpd : \ index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt64) ? RegType::kX86_Gpq : \ index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt64) ? RegType::kX86_Gpq : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kX86_Gpd : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kX86_Gpd : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kX86_Gpq : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kX86_Gpq : \ index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kX86_Xmm : \ index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kX86_Xmm : \ index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask8) ? RegType::kX86_KReg : \ diff --git a/src/asmjit/x86/x86assembler.cpp b/src/asmjit/x86/x86assembler.cpp index 2478c6d..c7fd495 100644 --- a/src/asmjit/x86/x86assembler.cpp +++ b/src/asmjit/x86/x86assembler.cpp @@ -23,7 +23,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) -typedef Support::FastUInt8 FastUInt8; +using FastUInt8 = Support::FastUInt8; // x86::Assembler - Constants // ========================== @@ -168,34 +168,35 @@ enum X86MemInfo_Enum { template struct X86MemInfo_T { - enum : uint32_t { - B = (X ) & 0x1F, - I = (X >> 5) & 0x1F, + static inline constexpr uint32_t B = (X ) & 0x1F; + static inline constexpr uint32_t I = (X >> 5) & 0x1F; - kBase = (B >= uint32_t(RegType::kX86_Gpw) && B <= uint32_t(RegType::kX86_Gpq)) ? kX86MemInfo_BaseGp : - (B == uint32_t(RegType::kX86_Rip) ) ? kX86MemInfo_BaseRip : - (B == uint32_t(RegType::kLabelTag) ) ? kX86MemInfo_BaseLabel : 0, + static inline constexpr uint32_t kBase = + (B >= uint32_t(RegType::kX86_Gpw) && B <= uint32_t(RegType::kX86_Gpq)) ? kX86MemInfo_BaseGp : + (B == uint32_t(RegType::kX86_Rip) ) ? kX86MemInfo_BaseRip : + (B == uint32_t(RegType::kLabelTag) ) ? kX86MemInfo_BaseLabel : 0; - kIndex = (I >= uint32_t(RegType::kX86_Gpw) && I <= uint32_t(RegType::kX86_Gpq)) ? kX86MemInfo_Index : - (I >= uint32_t(RegType::kX86_Xmm) && I <= uint32_t(RegType::kX86_Zmm)) ? kX86MemInfo_Index : 0, + static inline constexpr uint32_t kIndex = + (I >= uint32_t(RegType::kX86_Gpw) && I <= uint32_t(RegType::kX86_Gpq)) ? kX86MemInfo_Index : + (I >= uint32_t(RegType::kX86_Xmm) && I <= uint32_t(RegType::kX86_Zmm)) ? kX86MemInfo_Index : 0; - k67H = (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kNone) ) ? kX86MemInfo_67H_X86 : - (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kNone) ) ? kX86MemInfo_67H_X64 : - (B == uint32_t(RegType::kNone) && I == uint32_t(RegType::kX86_Gpw)) ? kX86MemInfo_67H_X86 : - (B == uint32_t(RegType::kNone) && I == uint32_t(RegType::kX86_Gpd)) ? kX86MemInfo_67H_X64 : - (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kX86_Gpw)) ? kX86MemInfo_67H_X86 : - (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kX86_Gpd)) ? kX86MemInfo_67H_X64 : - (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kX86_Xmm)) ? kX86MemInfo_67H_X86 : - (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kX86_Xmm)) ? kX86MemInfo_67H_X64 : - (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kX86_Ymm)) ? kX86MemInfo_67H_X86 : - (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kX86_Ymm)) ? kX86MemInfo_67H_X64 : - (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kX86_Zmm)) ? kX86MemInfo_67H_X86 : - (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kX86_Zmm)) ? kX86MemInfo_67H_X64 : - (B == uint32_t(RegType::kLabelTag) && I == uint32_t(RegType::kX86_Gpw)) ? kX86MemInfo_67H_X86 : - (B == uint32_t(RegType::kLabelTag) && I == uint32_t(RegType::kX86_Gpd)) ? kX86MemInfo_67H_X64 : 0, + static inline constexpr uint32_t k67H = + (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kNone) ) ? kX86MemInfo_67H_X86 : + (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kNone) ) ? kX86MemInfo_67H_X64 : + (B == uint32_t(RegType::kNone) && I == uint32_t(RegType::kX86_Gpw)) ? kX86MemInfo_67H_X86 : + (B == uint32_t(RegType::kNone) && I == uint32_t(RegType::kX86_Gpd)) ? kX86MemInfo_67H_X64 : + (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kX86_Gpw)) ? kX86MemInfo_67H_X86 : + (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kX86_Gpd)) ? kX86MemInfo_67H_X64 : + (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kX86_Xmm)) ? kX86MemInfo_67H_X86 : + (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kX86_Xmm)) ? kX86MemInfo_67H_X64 : + (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kX86_Ymm)) ? kX86MemInfo_67H_X86 : + (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kX86_Ymm)) ? kX86MemInfo_67H_X64 : + (B == uint32_t(RegType::kX86_Gpw) && I == uint32_t(RegType::kX86_Zmm)) ? kX86MemInfo_67H_X86 : + (B == uint32_t(RegType::kX86_Gpd) && I == uint32_t(RegType::kX86_Zmm)) ? kX86MemInfo_67H_X64 : + (B == uint32_t(RegType::kLabelTag) && I == uint32_t(RegType::kX86_Gpw)) ? kX86MemInfo_67H_X86 : + (B == uint32_t(RegType::kLabelTag) && I == uint32_t(RegType::kX86_Gpd)) ? kX86MemInfo_67H_X64 : 0; - kValue = kBase | kIndex | k67H | 0x04 | 0x08 - }; + static inline constexpr uint32_t kValue = kBase | kIndex | k67H | 0x04u | 0x08u; }; // The result stored in the LUT is a combination of @@ -236,16 +237,15 @@ static const uint32_t x86LLByRegType[] = { ASMJIT_LOOKUP_TABLE_16(VALUE, 0) }; // all EVEX encoded instructions. template struct X86CDisp8SHL_T { - enum { - TT = (X >> 3) << Opcode::kCDTT_Shift, - LL = (X >> 0) & 0x3, - W = (X >> 2) & 0x1, + static inline constexpr uint32_t TT = (X >> 3) << Opcode::kCDTT_Shift; + static inline constexpr uint32_t LL = (X >> 0) & 0x3; + static inline constexpr uint32_t W = (X >> 2) & 0x1; - kValue = (TT == Opcode::kCDTT_None ? ((LL==0) ? 0 : (LL==1) ? 0 : 0 ) : - TT == Opcode::kCDTT_ByLL ? ((LL==0) ? 0 : (LL==1) ? 1 : 2 ) : - TT == Opcode::kCDTT_T1W ? ((LL==0) ? W : (LL==1) ? 1+W : 2+W) : - TT == Opcode::kCDTT_DUP ? ((LL==0) ? 0 : (LL==1) ? 2 : 3 ) : 0) << Opcode::kCDSHL_Shift - }; + static inline constexpr uint32_t kValue = ( + TT == Opcode::kCDTT_None ? ((LL==0) ? 0 : (LL==1) ? 0 : 0 ) : + TT == Opcode::kCDTT_ByLL ? ((LL==0) ? 0 : (LL==1) ? 1 : 2 ) : + TT == Opcode::kCDTT_T1W ? ((LL==0) ? W : (LL==1) ? 1+W : 2+W) : + TT == Opcode::kCDTT_DUP ? ((LL==0) ? 0 : (LL==1) ? 2 : 3 ) : 0) << Opcode::kCDSHL_Shift; }; #define VALUE(x) X86CDisp8SHL_T::kValue @@ -269,15 +269,14 @@ static const uint8_t x86Mod16BaseTable[8] = { // 0xFF == Invalid. template struct X86Mod16BaseIndexTable_T { - enum { - B = X >> 3, - I = X & 0x7, + static inline constexpr uint32_t B = X >> 3; + static inline constexpr uint32_t I = X & 0x7u; - kValue = ((B == Gp::kIdBx && I == Gp::kIdSi) || (B == Gp::kIdSi && I == Gp::kIdBx)) ? 0x00 : - ((B == Gp::kIdBx && I == Gp::kIdDi) || (B == Gp::kIdDi && I == Gp::kIdBx)) ? 0x01 : - ((B == Gp::kIdBp && I == Gp::kIdSi) || (B == Gp::kIdSi && I == Gp::kIdBp)) ? 0x02 : - ((B == Gp::kIdBp && I == Gp::kIdDi) || (B == Gp::kIdDi && I == Gp::kIdBp)) ? 0x03 : 0xFF - }; + static inline constexpr uint32_t kValue = + ((B == Gp::kIdBx && I == Gp::kIdSi) || (B == Gp::kIdSi && I == Gp::kIdBx)) ? 0x00u : + ((B == Gp::kIdBx && I == Gp::kIdDi) || (B == Gp::kIdDi && I == Gp::kIdBx)) ? 0x01u : + ((B == Gp::kIdBp && I == Gp::kIdSi) || (B == Gp::kIdSi && I == Gp::kIdBp)) ? 0x02u : + ((B == Gp::kIdBp && I == Gp::kIdDi) || (B == Gp::kIdDi && I == Gp::kIdBp)) ? 0x03u : 0xFFu; }; #define VALUE(x) X86Mod16BaseIndexTable_T::kValue @@ -287,29 +286,29 @@ static const uint8_t x86Mod16BaseIndexTable[] = { ASMJIT_LOOKUP_TABLE_64(VALUE, // x86::Assembler - Helpers // ======================== -static ASMJIT_FORCE_INLINE bool x86IsJmpOrCall(InstId instId) noexcept { +static ASMJIT_INLINE bool x86IsJmpOrCall(InstId instId) noexcept { return instId == Inst::kIdJmp || instId == Inst::kIdCall; } -static ASMJIT_FORCE_INLINE bool x86IsImplicitMem(const Operand_& op, uint32_t base) noexcept { +static ASMJIT_INLINE bool x86IsImplicitMem(const Operand_& op, uint32_t base) noexcept { return op.isMem() && op.as().baseId() == base && !op.as().hasOffset(); } //! Combine `regId` and `vvvvvId` into a single value (used by AVX and AVX-512). -static ASMJIT_FORCE_INLINE uint32_t x86PackRegAndVvvvv(uint32_t regId, uint32_t vvvvvId) noexcept { +static ASMJIT_INLINE uint32_t x86PackRegAndVvvvv(uint32_t regId, uint32_t vvvvvId) noexcept { return regId + (vvvvvId << kVexVVVVVShift); } -static ASMJIT_FORCE_INLINE uint32_t x86OpcodeLByVMem(const Operand_& op) noexcept { +static ASMJIT_INLINE uint32_t x86OpcodeLByVMem(const Operand_& op) noexcept { return x86LLByRegType[size_t(op.as().indexType())]; } -static ASMJIT_FORCE_INLINE uint32_t x86OpcodeLBySize(uint32_t size) noexcept { +static ASMJIT_INLINE uint32_t x86OpcodeLBySize(uint32_t size) noexcept { return x86LLBySizeDiv16[size / 16]; } //! Encode MOD byte. -static ASMJIT_FORCE_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept { +static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept { ASMJIT_ASSERT(m <= 3); ASMJIT_ASSERT(o <= 7); ASMJIT_ASSERT(rm <= 7); @@ -317,14 +316,14 @@ static ASMJIT_FORCE_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_ } //! Encode SIB byte. -static ASMJIT_FORCE_INLINE uint32_t x86EncodeSib(uint32_t s, uint32_t i, uint32_t b) noexcept { +static ASMJIT_INLINE uint32_t x86EncodeSib(uint32_t s, uint32_t i, uint32_t b) noexcept { ASMJIT_ASSERT(s <= 3); ASMJIT_ASSERT(i <= 7); ASMJIT_ASSERT(b <= 7); return (s << 6) + (i << 3) + b; } -static ASMJIT_FORCE_INLINE bool x86IsRexInvalid(uint32_t rex) noexcept { +static ASMJIT_INLINE bool x86IsRexInvalid(uint32_t rex) noexcept { // Validates the following possibilities: // REX == 0x00 -> OKAY (X86_32 / X86_64). // REX == 0x40-0x4F -> OKAY (X86_64). @@ -333,19 +332,19 @@ static ASMJIT_FORCE_INLINE bool x86IsRexInvalid(uint32_t rex) noexcept { return rex > kX86ByteInvalidRex; } -static ASMJIT_FORCE_INLINE uint32_t x86GetForceEvex3MaskInLastBit(InstOptions options) noexcept { +static ASMJIT_INLINE uint32_t x86GetForceEvex3MaskInLastBit(InstOptions options) noexcept { constexpr uint32_t kVex3Bit = Support::ConstCTZ::value; return uint32_t(options & InstOptions::kX86_Vex3) << (31 - kVex3Bit); } template -static ASMJIT_FORCE_INLINE constexpr T x86SignExtendI32(T imm) noexcept { return T(int64_t(int32_t(imm & T(0xFFFFFFFF)))); } +static ASMJIT_INLINE_CONSTEXPR T x86SignExtendI32(T imm) noexcept { return T(int64_t(int32_t(imm & T(0xFFFFFFFF)))); } -static ASMJIT_FORCE_INLINE uint32_t x86AltOpcodeOf(const InstDB::InstInfo* info) noexcept { +static ASMJIT_INLINE uint32_t x86AltOpcodeOf(const InstDB::InstInfo* info) noexcept { return InstDB::_altOpcodeTable[info->_altOpcodeIndex]; } -static ASMJIT_FORCE_INLINE bool x86IsMmxOrXmm(const Reg& reg) noexcept { +static ASMJIT_INLINE bool x86IsMmxOrXmm(const Reg& reg) noexcept { return reg.type() == RegType::kX86_Mm || reg.type() == RegType::kX86_Xmm; } @@ -354,16 +353,16 @@ static ASMJIT_FORCE_INLINE bool x86IsMmxOrXmm(const Reg& reg) noexcept { class X86BufferWriter : public CodeWriter { public: - ASMJIT_FORCE_INLINE explicit X86BufferWriter(Assembler* a) noexcept + ASMJIT_INLINE explicit X86BufferWriter(Assembler* a) noexcept : CodeWriter(a) {} - ASMJIT_FORCE_INLINE void emitPP(uint32_t opcode) noexcept { + ASMJIT_INLINE void emitPP(uint32_t opcode) noexcept { uint32_t ppIndex = (opcode >> Opcode::kPP_Shift) & (Opcode::kPP_FPUMask >> Opcode::kPP_Shift) ; emit8If(x86OpcodePP[ppIndex], ppIndex != 0); } - ASMJIT_FORCE_INLINE void emitMMAndOpcode(uint32_t opcode) noexcept { + ASMJIT_INLINE void emitMMAndOpcode(uint32_t opcode) noexcept { uint32_t mmIndex = (opcode & Opcode::kMM_Mask) >> Opcode::kMM_Shift; const X86OpcodeMM& mmCode = x86OpcodeMM[mmIndex]; @@ -372,7 +371,7 @@ public: emit8(opcode); } - ASMJIT_FORCE_INLINE void emitSegmentOverride(uint32_t segmentId) noexcept { + ASMJIT_INLINE void emitSegmentOverride(uint32_t segmentId) noexcept { ASMJIT_ASSERT(segmentId < ASMJIT_ARRAY_SIZE(x86SegmentPrefix)); FastUInt8 prefix = x86SegmentPrefix[segmentId]; @@ -380,13 +379,14 @@ public: } template - ASMJIT_FORCE_INLINE void emitAddressOverride(CondT condition) noexcept { + ASMJIT_INLINE void emitAddressOverride(CondT condition) noexcept { emit8If(0x67, condition); } - ASMJIT_FORCE_INLINE void emitImmByteOrDWord(uint64_t immValue, FastUInt8 immSize) noexcept { - if (!immSize) + ASMJIT_INLINE void emitImmByteOrDWord(uint64_t immValue, FastUInt8 immSize) noexcept { + if (!immSize) { return; + } ASMJIT_ASSERT(immSize == 1 || immSize == 4); @@ -408,7 +408,7 @@ public: emit8(imm & 0xFFu); } - ASMJIT_FORCE_INLINE void emitImmediate(uint64_t immValue, FastUInt8 immSize) noexcept { + ASMJIT_INLINE void emitImmediate(uint64_t immValue, FastUInt8 immSize) noexcept { #if ASMJIT_ARCH_BITS >= 64 uint64_t imm = immValue; if (immSize >= 4) { @@ -425,23 +425,27 @@ public: } #endif - if (!immSize) + if (!immSize) { return; + } emit8(imm & 0xFFu); imm >>= 8; - if (--immSize == 0) + if (--immSize == 0) { return; + } emit8(imm & 0xFFu); imm >>= 8; - if (--immSize == 0) + if (--immSize == 0) { return; + } emit8(imm & 0xFFu); imm >>= 8; - if (--immSize == 0) + if (--immSize == 0) { return; + } emit8(imm & 0xFFu); } }; @@ -484,7 +488,7 @@ public: // x86::Assembler - Movabs Heuristics // ================================== -static ASMJIT_FORCE_INLINE uint32_t x86GetMovAbsInstSize64Bit(uint32_t regSize, InstOptions options, const Mem& rmRel) noexcept { +static ASMJIT_INLINE uint32_t x86GetMovAbsInstSize64Bit(uint32_t regSize, InstOptions options, const Mem& rmRel) noexcept { uint32_t segmentPrefixSize = rmRel.segmentId() != 0; uint32_t _66hPrefixSize = regSize == 2; uint32_t rexPrefixSize = regSize == 8 || Support::test(options, InstOptions::kX86_Rex); @@ -494,15 +498,16 @@ static ASMJIT_FORCE_INLINE uint32_t x86GetMovAbsInstSize64Bit(uint32_t regSize, return segmentPrefixSize + _66hPrefixSize + rexPrefixSize + opCodeByteSize + immediateSize; } -static ASMJIT_FORCE_INLINE bool x86ShouldUseMovabs(Assembler* self, X86BufferWriter& writer, uint32_t regSize, InstOptions options, const Mem& rmRel) noexcept { +static ASMJIT_INLINE bool x86ShouldUseMovabs(Assembler* self, X86BufferWriter& writer, uint32_t regSize, InstOptions options, const Mem& rmRel) noexcept { if (self->is32Bit()) { // There is no relative addressing, just decide whether to use MOV encoded with MOD R/M or absolute. return !Support::test(options, InstOptions::kX86_ModMR | InstOptions::kX86_ModRM); } else { // If the addressing type is REL or MOD R/M was specified then absolute mov won't be used. - if (rmRel.addrType() == Mem::AddrType::kRel || Support::test(options, InstOptions::kX86_ModMR | InstOptions::kX86_ModRM)) + if (rmRel.addrType() == Mem::AddrType::kRel || Support::test(options, InstOptions::kX86_ModMR | InstOptions::kX86_ModRM)) { return false; + } int64_t addrValue = rmRel.offset(); uint64_t baseAddress = self->code()->baseAddress(); @@ -515,12 +520,14 @@ static ASMJIT_FORCE_INLINE bool x86ShouldUseMovabs(Assembler* self, X86BufferWri uint64_t rip64 = baseAddress + self->_section->offset() + virtualOffset + instructionSize; uint64_t rel64 = uint64_t(addrValue) - rip64; - if (Support::isInt32(int64_t(rel64))) + if (Support::isInt32(int64_t(rel64))) { return false; + } } else { - if (Support::isInt32(addrValue)) + if (Support::isInt32(addrValue)) { return false; + } } return uint64_t(addrValue) > 0xFFFFFFFFu; @@ -533,8 +540,9 @@ static ASMJIT_FORCE_INLINE bool x86ShouldUseMovabs(Assembler* self, X86BufferWri Assembler::Assembler(CodeHolder* code) noexcept : BaseAssembler() { _archMask = (uint64_t(1) << uint32_t(Arch::kX86)) | (uint64_t(1) << uint32_t(Arch::kX64)) ; - if (code) + if (code) { code->attach(this); + } } Assembler::~Assembler() noexcept {} @@ -577,8 +585,9 @@ ASMJIT_FAVOR_SPEED Error Assembler::_emit(InstId instId, const Operand_& o0, con X86BufferWriter writer(this); - if (instId >= Inst::_kIdCount) + if (instId >= Inst::_kIdCount) { instId = 0; + } const InstDB::InstInfo* instInfo = &InstDB::_instInfoTable[instId]; const InstDB::CommonInfo* commonInfo = &instInfo->commonInfo(); @@ -594,17 +603,20 @@ ASMJIT_FAVOR_SPEED Error Assembler::_emit(InstId instId, const Operand_& o0, con // Handle failure and rare cases first. if (ASMJIT_UNLIKELY(Support::test(options, kRequiresSpecialHandling))) { - if (ASMJIT_UNLIKELY(!_code)) + if (ASMJIT_UNLIKELY(!_code)) { return reportError(DebugUtils::errored(kErrorNotInitialized)); + } // Unknown instruction. - if (ASMJIT_UNLIKELY(instId == 0)) + if (ASMJIT_UNLIKELY(instId == 0)) { goto InvalidInstruction; + } // Grow request, happens rarely. err = writer.ensureSpace(this, 16); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { goto Failed; + } #ifndef ASMJIT_NO_VALIDATION // Strict validation. @@ -613,8 +625,9 @@ ASMJIT_FAVOR_SPEED Error Assembler::_emit(InstId instId, const Operand_& o0, con EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt); err = _funcs.validate(BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount, ValidationFlags::kNone); - if (ASMJIT_UNLIKELY(err)) + if (ASMJIT_UNLIKELY(err)) { goto Failed; + } } #endif @@ -624,15 +637,18 @@ ASMJIT_FAVOR_SPEED Error Assembler::_emit(InstId instId, const Operand_& o0, con if (Support::test(options, InstOptions::kX86_Lock)) { bool xAcqRel = Support::test(options, InstOptions::kX86_XAcquire | InstOptions::kX86_XRelease); - if (ASMJIT_UNLIKELY(!Support::test(iFlags, InstDB::InstFlags::kLock) && !xAcqRel)) + if (ASMJIT_UNLIKELY(!Support::test(iFlags, InstDB::InstFlags::kLock) && !xAcqRel)) { goto InvalidLockPrefix; + } if (xAcqRel) { - if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_XAcquire) && !Support::test(iFlags, InstDB::InstFlags::kXAcquire))) + if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_XAcquire) && !Support::test(iFlags, InstDB::InstFlags::kXAcquire))) { goto InvalidXAcquirePrefix; + } - if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_XRelease) && !Support::test(iFlags, InstDB::InstFlags::kXRelease))) + if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_XRelease) && !Support::test(iFlags, InstDB::InstFlags::kXRelease))) { goto InvalidXReleasePrefix; + } writer.emit8(Support::test(options, InstOptions::kX86_XAcquire) ? 0xF2 : 0xF3); } @@ -642,11 +658,13 @@ ASMJIT_FAVOR_SPEED Error Assembler::_emit(InstId instId, const Operand_& o0, con // REP and REPNE prefixes. if (Support::test(options, InstOptions::kX86_Rep | InstOptions::kX86_Repne)) { - if (ASMJIT_UNLIKELY(!Support::test(iFlags, InstDB::InstFlags::kRep))) + if (ASMJIT_UNLIKELY(!Support::test(iFlags, InstDB::InstFlags::kRep))) { goto InvalidRepPrefix; + } - if (ASMJIT_UNLIKELY(_extraReg.isReg() && (_extraReg.group() != RegGroup::kGp || _extraReg.id() != Gp::kIdCx))) + if (ASMJIT_UNLIKELY(_extraReg.isReg() && (_extraReg.group() != RegGroup::kGp || _extraReg.id() != Gp::kIdCx))) { goto InvalidRepPrefix; + } writer.emit8(Support::test(options, InstOptions::kX86_Repne) ? 0xF2 : 0xF3); } @@ -746,7 +764,7 @@ ASMJIT_FAVOR_SPEED Error Assembler::_emit(InstId instId, const Operand_& o0, con case InstDB::kEncodingX86M: opcode.addPrefixBySize(o0.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingX86M_NoSize: CaseX86M_NoSize: @@ -805,7 +823,7 @@ CaseX86M_GPB_MulDiv: goto InvalidInstruction; } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingX86M_GPB: if (isign3 == ENC_OPS1(Reg)) { @@ -834,7 +852,7 @@ CaseX86M_GPB_MulDiv: rmRel = &o0; goto EmitX86M; } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingX86M_Only: if (isign3 == ENC_OPS1(Mem)) { @@ -913,7 +931,7 @@ CaseX86M_GPB_MulDiv: case InstDB::kEncodingX86Rm: opcode.addPrefixBySize(o0.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingX86Rm_NoSize: if (isign3 == ENC_OPS2(Reg, Reg)) { @@ -956,7 +974,7 @@ CaseX86M_GPB_MulDiv: case InstDB::kEncodingX86Mr: opcode.addPrefixBySize(o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingX86Mr_NoSize: if (isign3 == ENC_OPS2(Reg, Reg)) { @@ -1933,7 +1951,7 @@ CaseX86M_GPB_MulDiv: opcode = immSize == 1 ? 0x6A : 0x68; goto EmitX86Op; } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingX86Pop: if (isign3 == ENC_OPS1(Reg)) { @@ -2242,7 +2260,7 @@ CaseX86PushPop_Gp: FIXUP_GPB(o0, opReg); goto EmitX86M; } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingX86Xadd: if (isign3 == ENC_OPS2(Reg, Reg)) { @@ -2446,7 +2464,7 @@ CaseFpuArith_Mem: opcode += 1u; goto EmitFpuOp; } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingFpuR: if (isign3 == ENC_OPS1(Reg)) { @@ -2707,11 +2725,11 @@ CaseExtMovd: case InstDB::kEncodingExtRm_Wx: opcode.addWIf(o1.x86RmSize() == 8); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingExtRm_Wx_GpqOnly: opcode.addWIf(Reg::isGpq(o0)); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingExtRm: CaseExtRm: @@ -3028,7 +3046,7 @@ CaseExtRm: case InstDB::kEncodingVexMri_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexMri: CaseVexMri: @@ -3073,11 +3091,11 @@ CaseVexMri: rbReg = o1.id(); goto EmitVexEvexR; } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRm_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRm: CaseVexRm: @@ -3109,7 +3127,7 @@ CaseVexRm: case InstDB::kEncodingVexRmi_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRmi: CaseVexRmi: @@ -3149,7 +3167,7 @@ CaseVexRvm_R: const Operand_& o3 = opExt[EmitterUtils::kOp3]; if (ASMJIT_UNLIKELY(!o3.isNone() && !Reg::isGp(o3, Gp::kIdDx))) goto InvalidInstruction; - ASMJIT_FALLTHROUGH; + [[fallthrough]]; } case InstDB::kEncodingVexRvm_Wx: { @@ -3159,7 +3177,7 @@ CaseVexRvm_R: case InstDB::kEncodingVexRvm_Lx_KEvex: { opcode.forceEvexIf(Reg::isKReg(o0)); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; } case InstDB::kEncodingVexRvm_Lx: { @@ -3195,7 +3213,7 @@ CaseVexRvm_R: case InstDB::kEncodingVexRvmr_Lx: { opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; } case InstDB::kEncodingVexRvmr: { @@ -3225,11 +3243,11 @@ CaseVexRvm_R: case InstDB::kEncodingVexRvmi_Lx_KEvex: opcode.forceEvexIf(Reg::isKReg(o0)); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRvmi_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRvmi: VexRvmi: @@ -3256,7 +3274,7 @@ VexRvmi: case InstDB::kEncodingVexRmv_Wx: opcode.addWIf(unsigned(Reg::isGpq(o0)) | unsigned(Reg::isGpq(o2))); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRmv: if (isign3 == ENC_OPS3(Reg, Reg, Reg)) { @@ -3281,7 +3299,7 @@ VexRvmi: rmRel = &o1; goto EmitVexEvexM; } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRmv_VM: if (isign3 == ENC_OPS3(Reg, Mem, Reg)) { @@ -3371,7 +3389,7 @@ VexRvmi: case InstDB::kEncodingVexRmMr_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRmMr: if (isign3 == ENC_OPS2(Reg, Reg)) { @@ -3427,7 +3445,7 @@ VexRvmi: case InstDB::kEncodingVexRvmRmi_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRvmRmi: if (isign3 == ENC_OPS3(Reg, Reg, Reg)) { @@ -3539,7 +3557,7 @@ VexRvmi: case InstDB::kEncodingVexRvmMvr_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRvmMvr: if (isign3 == ENC_OPS3(Reg, Reg, Reg)) { @@ -3567,11 +3585,11 @@ VexRvmi: case InstDB::kEncodingVexRvmVmi_Lx_MEvex: opcode.forceEvexIf(o1.isMem()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRvmVmi_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRvmVmi: if (isign3 == ENC_OPS3(Reg, Reg, Reg)) { @@ -3609,7 +3627,7 @@ VexRvmi: case InstDB::kEncodingVexVm_Wx: opcode.addWIf(unsigned(Reg::isGpq(o0)) | unsigned(Reg::isGpq(o1))); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexVm: if (isign3 == ENC_OPS2(Reg, Reg)) { @@ -3628,11 +3646,11 @@ VexRvmi: case InstDB::kEncodingVexVmi_Lx_MEvex: if (isign3 == ENC_OPS3(Reg, Mem, Imm)) opcode.forceEvex(); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexVmi_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexVmi: immValue = o2.as().value(); @@ -3660,7 +3678,7 @@ CaseVexVmi_AfterImm: case InstDB::kEncodingVexRvrmRvmr_Lx: opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingVexRvrmRvmr: { const Operand_& o3 = opExt[EmitterUtils::kOp3]; @@ -3761,7 +3779,7 @@ CaseVexVmi_AfterImm: case InstDB::kEncodingFma4_Lx: // It's fine to just check the first operand, second is just for sanity. opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case InstDB::kEncodingFma4: { const Operand_& o3 = opExt[EmitterUtils::kOp3]; @@ -5008,7 +5026,7 @@ Error Assembler::align(AlignMode alignMode, uint32_t alignment) { case AlignMode::kCode: { if (hasEncodingOption(EncodingOptions::kOptimizedAlign)) { // Intel 64 and IA-32 Architectures Software Developer's Manual - Volume 2B (NOP). - enum { kMaxNopSize = 9 }; + static constexpr uint32_t kMaxNopSize = 9; static const uint8_t nopData[kMaxNopSize][kMaxNopSize] = { { 0x90 }, diff --git a/src/asmjit/x86/x86assembler.h b/src/asmjit/x86/x86assembler.h index dd980a7..1131d57 100644 --- a/src/asmjit/x86/x86assembler.h +++ b/src/asmjit/x86/x86assembler.h @@ -32,7 +32,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! using namespace asmjit; //! //! // Signature of the generated function. -//! typedef int (*SumFunc)(const int* arr, size_t count); +//! using SumFunc = int (*)(const int* arr, size_t count); //! //! int main() { //! JitRuntime rt; // Create a runtime specialized for JIT. @@ -183,7 +183,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! typedef int (*Func)(void); +//! using Func = int (*)(void); //! //! int main(int argc, char* argv[]) { //! JitRuntime rt; // Create a runtime specialized for JIT. @@ -346,7 +346,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b); +//! using SumIntsFunc = void (*)(int* dst, const int* a, const int* b); //! //! int main(int argc, char* argv[]) { //! JitRuntime rt; // Create JIT Runtime. @@ -455,9 +455,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! typedef int (*Func)(void); -//! //! int main(int argc, char* argv[]) { +//! using Func = int (*)(void); +//! //! JitRuntime rt; // Create a runtime specialized for JIT. //! CodeHolder code; // Create a CodeHolder. //! @@ -640,7 +640,7 @@ class ASMJIT_VIRTAPI Assembler public EmitterImplicitT { public: ASMJIT_NONCOPYABLE(Assembler) - typedef BaseAssembler Base; + using Base = BaseAssembler; //! \name Construction & Destruction //! \{ diff --git a/src/asmjit/x86/x86builder.cpp b/src/asmjit/x86/x86builder.cpp index 9f025a1..29de793 100644 --- a/src/asmjit/x86/x86builder.cpp +++ b/src/asmjit/x86/x86builder.cpp @@ -18,8 +18,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() { _archMask = (uint64_t(1) << uint32_t(Arch::kX86)) | (uint64_t(1) << uint32_t(Arch::kX64)) ; - if (code) + if (code) { code->attach(this); + } } Builder::~Builder() noexcept {} diff --git a/src/asmjit/x86/x86builder.h b/src/asmjit/x86/x86builder.h index 194c140..41c64e6 100644 --- a/src/asmjit/x86/x86builder.h +++ b/src/asmjit/x86/x86builder.h @@ -31,8 +31,6 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b); -//! //! // Small helper function to print the current content of `cb`. //! static void dumpCode(BaseBuilder& builder, const char* phase) { //! String sb; @@ -43,6 +41,8 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! } //! //! int main() { +//! using SumIntsFunc = void (*)(int* dst, const int* a, const int* b); +//! //! JitRuntime rt; // Create JIT Runtime. //! CodeHolder code; // Create a CodeHolder. //! @@ -109,7 +109,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! SumIntsFunc fn; //! Error err = rt.add(&fn, &code); // Add the generated code to the runtime. -//! if (err) return 1; // Handle a possible error case. +//! if (err) { +//! return 1; // Handle a possible error case. +//! } //! //! // Execute the generated function. //! int inA[4] = { 4, 3, 2, 1 }; @@ -178,8 +180,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! } //! } //! -//! if (node == last) +//! if (node == last) { //! break; +//! } //! node = node->next(); //! } //! } @@ -320,7 +323,7 @@ class ASMJIT_VIRTAPI Builder public EmitterImplicitT { public: ASMJIT_NONCOPYABLE(Builder) - typedef BaseBuilder Base; + using Base = BaseBuilder; //! \name Construction & Destruction //! \{ diff --git a/src/asmjit/x86/x86compiler.cpp b/src/asmjit/x86/x86compiler.cpp index 830600e..ff4aa5e 100644 --- a/src/asmjit/x86/x86compiler.cpp +++ b/src/asmjit/x86/x86compiler.cpp @@ -19,8 +19,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() { _archMask = (uint64_t(1) << uint32_t(Arch::kX86)) | (uint64_t(1) << uint32_t(Arch::kX64)) ; - if (code) + if (code) { code->attach(this); + } } Compiler::~Compiler() noexcept {} diff --git a/src/asmjit/x86/x86compiler.h b/src/asmjit/x86/x86compiler.h index b281e20..6297856 100644 --- a/src/asmjit/x86/x86compiler.h +++ b/src/asmjit/x86/x86compiler.h @@ -31,10 +31,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! // Signature of the generated function. -//! typedef int (*Func)(void); -//! //! int main() { +//! using Func = int (*)(void); // Signature of the generated function. +//! //! JitRuntime rt; // Runtime specialized for JIT code execution. //! CodeHolder code; // Holds code and relocation information. //! @@ -76,10 +75,10 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! // Signature of the generated function. -//! typedef void (*MemCpy32)(uint32_t* dst, const uint32_t* src, size_t count); -//! //! int main() { +//! // Signature of the generated function. +//! using MemCpy32 = void (*)(uint32_t* dst, const uint32_t* src, size_t count); +//! //! JitRuntime rt; // Runtime specialized for JIT code execution. //! CodeHolder code; // Holds code and relocation information. //! @@ -123,6 +122,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! cc.endFunc(); // End of the function body. //! //! cc.finalize(); // Translate and assemble the whole 'cc' content. +//! //! // ----> x86::Compiler is no longer needed from here and can be destroyed <---- //! //! // Add the generated code to the runtime. @@ -130,8 +130,10 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! Error err = rt.add(&memcpy32, &code); //! //! // Handle a possible error returned by AsmJit. -//! if (err) +//! if (err) { //! return 1; +//! } +//! //! // ----> CodeHolder is no longer needed from here and can be destroyed <---- //! //! // Test the generated code. @@ -139,8 +141,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! uint32_t output[6]; //! memcpy32(output, input, 6); //! -//! for (uint32_t i = 0; i < 6; i++) +//! for (uint32_t i = 0; i < 6; i++) { //! printf("%d\n", output[i]); +//! } //! //! rt.release(memcpy32); //! return 0; @@ -159,10 +162,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! // Signature of the generated function. -//! typedef void (*Func)(void*); -//! //! int main() { +//! using Func = void (*)(void*); // Signature of the generated function. +//! //! JitRuntime rt; // Runtime specialized for JIT code execution. //! CodeHolder code; // Holds code and relocation information. //! @@ -218,10 +220,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! // Signature of the generated function. -//! typedef uint32_t (*Fibonacci)(uint32_t x); -//! //! int main() { +//! using FibFn = uint32_t (*)(uint32_t x); // Signature of the generated function. +//! //! JitRuntime rt; // Runtime specialized for JIT code execution. //! CodeHolder code; // Holds code and relocation information. //! @@ -261,7 +262,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! cc.finalize(); // Translate and assemble the whole 'cc' content. //! // ----> x86::Compiler is no longer needed from here and can be destroyed <---- //! -//! Fibonacci fib; +//! FibFn fib; //! Error err = rt.add(&fib, &code); // Add the generated code to the runtime. //! if (err) return 1; // Handle a possible error returned by AsmJit. //! // ----> CodeHolder is no longer needed from here and can be destroyed <---- @@ -287,10 +288,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! //! using namespace asmjit; //! -//! // Signature of the generated function. -//! typedef int (*Func)(void); -//! //! int main() { +//! using Func = int (*)(void); // Signature of the generated function. +//! //! JitRuntime rt; // Runtime specialized for JIT code execution. //! CodeHolder code; // Holds code and relocation information. //! @@ -488,7 +488,7 @@ class ASMJIT_VIRTAPI Compiler public EmitterExplicitT { public: ASMJIT_NONCOPYABLE(Compiler) - typedef BaseCompiler Base; + using Base = BaseCompiler; //! \name Construction & Destruction //! \{ diff --git a/src/asmjit/x86/x86emithelper.cpp b/src/asmjit/x86/x86emithelper.cpp index 9cd1db6..30ee1ba 100644 --- a/src/asmjit/x86/x86emithelper.cpp +++ b/src/asmjit/x86/x86emithelper.cpp @@ -34,6 +34,7 @@ static constexpr OperandSignature regSizeToGpSignature[8 + 1] = { OperandSignature{RegTraits::kSignature} }; +[[nodiscard]] static inline uint32_t getXmmMovInst(const FuncFrame& frame) { bool avx = frame.isAvxEnabled(); bool aligned = frame.hasAlignedVecSR(); @@ -43,6 +44,7 @@ static inline uint32_t getXmmMovInst(const FuncFrame& frame) { } //! Converts `size` to a 'kmov?' instruction. +[[nodiscard]] static inline uint32_t kmovInstFromSize(uint32_t size) noexcept { switch (size) { case 1: return Inst::kIdKmovb; @@ -53,6 +55,7 @@ static inline uint32_t kmovInstFromSize(uint32_t size) noexcept { } } +[[nodiscard]] static inline uint32_t makeCastOp(TypeId dst, TypeId src) noexcept { return (uint32_t(dst) << 8) | uint32_t(src); } @@ -102,7 +105,7 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove( dst.setSignature(Reg::signatureOfT()); src.setSignature(Reg::signatureOfT()); } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case TypeId::kInt32: case TypeId::kUInt32: @@ -114,7 +117,7 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove( case TypeId::kMmx32: instId = Inst::kIdMovd; if (memFlags) break; - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case TypeId::kMmx64 : instId = Inst::kIdMovq ; break; case TypeId::kMask8 : instId = Inst::kIdKmovb; break; @@ -126,40 +129,54 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove( TypeId scalarTypeId = TypeUtils::scalarOf(typeId); if (TypeUtils::isVec32(typeId) && memFlags) { overrideMemSize = 4; - if (scalarTypeId == TypeId::kFloat32) + if (scalarTypeId == TypeId::kFloat32) { instId = _avxEnabled ? Inst::kIdVmovss : Inst::kIdMovss; - else + } + else { instId = _avxEnabled ? Inst::kIdVmovd : Inst::kIdMovd; + } break; } if (TypeUtils::isVec64(typeId) && memFlags) { overrideMemSize = 8; - if (scalarTypeId == TypeId::kFloat64) + if (scalarTypeId == TypeId::kFloat64) { instId = _avxEnabled ? Inst::kIdVmovsd : Inst::kIdMovsd; - else + } + else { instId = _avxEnabled ? Inst::kIdVmovq : Inst::kIdMovq; + } break; } - if (scalarTypeId == TypeId::kFloat32) + if (scalarTypeId == TypeId::kFloat32) { instId = _avxEnabled ? Inst::kIdVmovaps : Inst::kIdMovaps; - else if (scalarTypeId == TypeId::kFloat64) + } + else if (scalarTypeId == TypeId::kFloat64) { instId = _avxEnabled ? Inst::kIdVmovapd : Inst::kIdMovapd; - else if (!_avx512Enabled) + } + else if (!_avx512Enabled) { instId = _avxEnabled ? Inst::kIdVmovdqa : Inst::kIdMovdqa; - else + } + else { instId = Inst::kIdVmovdqa32; + } break; } } - if (!instId) + if (!instId) { return DebugUtils::errored(kErrorInvalidState); + } if (overrideMemSize) { - if (dst.isMem()) dst.as().setSize(overrideMemSize); - if (src.isMem()) src.as().setSize(overrideMemSize); + if (dst.isMem()) { + dst.as().setSize(overrideMemSize); + } + + if (src.isMem()) { + src.as().setSize(overrideMemSize); + } } _emitter->setInlineComment(comment); @@ -205,14 +222,12 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( castOp == makeCastOp(TypeId::kInt64, TypeId::kInt16) || castOp == makeCastOp(TypeId::kInt64, TypeId::kInt32)) { // Sign extend by using 'movsx' or 'movsxd'. - instId = - castOp == makeCastOp(TypeId::kInt64, TypeId::kInt32) - ? Inst::kIdMovsxd - : Inst::kIdMovsx; + instId = (castOp == makeCastOp(TypeId::kInt64, TypeId::kInt32)) ? Inst::kIdMovsxd : Inst::kIdMovsx; dst.setSignature(regSizeToGpSignature[dstSize]); - if (src.isReg()) + if (src.isReg()) { src.setSignature(regSizeToGpSignature[srcSize]); + } break; } } @@ -220,16 +235,18 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( // Zero extend. if (TypeUtils::isInt(srcTypeId) || src_.isMem()) { uint32_t movSize = Support::min(srcSize, dstSize); - if (movSize <= 4) + if (movSize <= 4) { dstSize = 4; + } // Zero extend by using 'movzx' or 'mov'. instId = movSize < 4 ? Inst::kIdMovzx : Inst::kIdMov; srcSize = Support::min(srcSize, movSize); dst.setSignature(regSizeToGpSignature[dstSize]); - if (src.isReg()) + if (src.isReg()) { src.setSignature(regSizeToGpSignature[srcSize]); + } break; } @@ -240,8 +257,9 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( if (TypeUtils::isMmx(srcTypeId)) { // 64-bit move. instId = Inst::kIdMovq; - if (srcSize == 8) + if (srcSize == 8) { break; + } // 32-bit move. instId = Inst::kIdMovd; @@ -259,8 +277,9 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( if (TypeUtils::isVec(srcTypeId)) { // 64-bit move. instId = _avxEnabled ? Inst::kIdVmovq : Inst::kIdMovq; - if (srcSize == 8) + if (srcSize == 8) { break; + } // 32-bit move. instId = _avxEnabled ? Inst::kIdVmovd : Inst::kIdMovd; @@ -275,23 +294,27 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( if (TypeUtils::isInt(srcTypeId) || src.isMem()) { // 64-bit move. - if (srcSize == 8) + if (srcSize == 8) { break; + } // 32-bit move. instId = Inst::kIdMovd; - if (src.isReg()) + if (src.isReg()) { src.setSignature(Reg::signatureOfT()); + } break; } - if (TypeUtils::isMmx(srcTypeId)) + if (TypeUtils::isMmx(srcTypeId)) { break; + } // This will hurt if AVX is enabled. instId = Inst::kIdMovdq2q; - if (TypeUtils::isVec(srcTypeId)) + if (TypeUtils::isVec(srcTypeId)) { break; + } } if (TypeUtils::isMask(dstTypeId)) { @@ -299,8 +322,9 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( if (TypeUtils::isInt(srcTypeId) || TypeUtils::isMask(srcTypeId) || src.isMem()) { instId = kmovInstFromSize(srcSize); - if (Reg::isGp(src) && srcSize <= 4) + if (Reg::isGp(src) && srcSize <= 4) { src.setSignature(Reg::signatureOfT()); + } break; } } @@ -324,15 +348,19 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( srcSize = Support::min(dstSize * 2, srcSize); dstSize = srcSize / 2; - if (srcSize <= 8) + if (srcSize <= 8) { instId = _avxEnabled ? Inst::kIdVcvtss2sd : Inst::kIdCvtss2sd; - else + } + else { instId = _avxEnabled ? Inst::kIdVcvtps2pd : Inst::kIdCvtps2pd; + } - if (dstSize == 32) + if (dstSize == 32) { dst.setSignature(Reg::signatureOfT()); - if (src.isReg()) + } + if (src.isReg()) { src.setSignature(Reg::signatureOfVecBySize(srcSize)); + } break; } @@ -340,14 +368,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( srcSize = Support::min(dstSize, srcSize * 2) / 2; dstSize = srcSize * 2; - if (srcSize <= 4) + if (srcSize <= 4) { instId = _avxEnabled ? Inst::kIdVcvtsd2ss : Inst::kIdCvtsd2ss; - else + } + else { instId = _avxEnabled ? Inst::kIdVcvtpd2ps : Inst::kIdCvtpd2ps; + } dst.setSignature(Reg::signatureOfVecBySize(dstSize)); - if (src.isReg() && srcSize >= 32) + if (src.isReg() && srcSize >= 32) { src.setSignature(Reg::signatureOfT()); + } break; } @@ -356,8 +387,9 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( // 32-bit move. if (srcSize <= 4) { instId = _avxEnabled ? Inst::kIdVmovd : Inst::kIdMovd; - if (src.isReg()) + if (src.isReg()) { src.setSignature(Reg::signatureOfT()); + } break; } @@ -371,13 +403,15 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitArgMove( if (Reg::isVec(src) || src.isMem()) { instId = _avxEnabled ? Inst::kIdVmovaps : Inst::kIdMovaps; - if (src.isMem() && srcSize < _emitter->environment().stackAlignment()) + if (src.isMem() && srcSize < _emitter->environment().stackAlignment()) { instId = _avxEnabled ? Inst::kIdVmovups : Inst::kIdMovups; + } OperandSignature signature = Reg::signatureOfVecBySize(srcSize); dst.setSignature(signature); - if (src.isReg()) + if (src.isReg()) { src.setSignature(signature); + } break; } } @@ -400,8 +434,9 @@ Error EmitHelper::emitRegSwap( _emitter->setInlineComment(comment); return _emitter->emit(Inst::kIdXchg, a, b); } - else + else { return DebugUtils::errored(kErrorInvalidState); + } } // x86::EmitHelper - Emit Prolog & Epilog @@ -470,8 +505,9 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) { if (saRegId != BaseReg::kIdBad && saRegId != Gp::kIdSp) { saReg.setId(saRegId); if (frame.hasPreservedFP()) { - if (saRegId != Gp::kIdBp) + if (saRegId != Gp::kIdBp) { ASMJIT_PROPAGATE(emitter->mov(saReg, zbp)); + } } else { ASMJIT_PROPAGATE(emitter->mov(saReg, zsp)); @@ -531,8 +567,9 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) { Gp gpReg = emitter->zsp(); // General purpose register (temporary). // Don't emit 'pop zbp' in the pop sequence, this case is handled separately. - if (frame.hasPreservedFP()) + if (frame.hasPreservedFP()) { gpSaved &= ~Support::bitMask(Gp::kIdBp); + } // Emit 'movxxx {[x|y|z]mm, k}, [zsp + X]'. { @@ -555,16 +592,23 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) { } // Emit 'emms' and/or 'vzeroupper'. - if (frame.hasMmxCleanup()) ASMJIT_PROPAGATE(emitter->emms()); - if (frame.hasAvxCleanup()) ASMJIT_PROPAGATE(emitter->vzeroupper()); + if (frame.hasMmxCleanup()) { + ASMJIT_PROPAGATE(emitter->emms()); + } + + if (frame.hasAvxCleanup()) { + ASMJIT_PROPAGATE(emitter->vzeroupper()); + } if (frame.hasPreservedFP()) { // Emit 'mov zsp, zbp' or 'lea zsp, [zbp - x]' int32_t count = int32_t(frame.pushPopSaveSize() - registerSize); - if (!count) + if (!count) { ASMJIT_PROPAGATE(emitter->mov(zsp, zbp)); - else + } + else { ASMJIT_PROPAGATE(emitter->lea(zsp, ptr(zbp, -count))); + } } else { if (frame.hasDynamicAlignment() && frame.hasDAOffset()) { @@ -594,14 +638,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) { } // Emit 'pop zbp'. - if (frame.hasPreservedFP()) + if (frame.hasPreservedFP()) { ASMJIT_PROPAGATE(emitter->pop(zbp)); + } // Emit 'ret' or 'ret x'. - if (frame.hasCalleeStackCleanup()) + if (frame.hasCalleeStackCleanup()) { ASMJIT_PROPAGATE(emitter->emit(Inst::kIdRet, int(frame.calleeStackCleanup()))); - else + } + else { ASMJIT_PROPAGATE(emitter->emit(Inst::kIdRet)); + } return kErrorOk; } diff --git a/src/asmjit/x86/x86emithelper_p.h b/src/asmjit/x86/x86emithelper_p.h index 1288c67..c74dd91 100644 --- a/src/asmjit/x86/x86emithelper_p.h +++ b/src/asmjit/x86/x86emithelper_p.h @@ -19,6 +19,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) //! \addtogroup asmjit_x86 //! \{ +[[nodiscard]] static ASMJIT_INLINE_NODEBUG RegType vecTypeIdToRegType(TypeId typeId) noexcept { return uint32_t(typeId) <= uint32_t(TypeId::_kVec128End) ? RegType::kX86_Xmm : uint32_t(typeId) <= uint32_t(TypeId::_kVec256End) ? RegType::kX86_Ymm : RegType::kX86_Zmm; diff --git a/src/asmjit/x86/x86emitter.h b/src/asmjit/x86/x86emitter.h index 47f8ba0..af9dbb3 100644 --- a/src/asmjit/x86/x86emitter.h +++ b/src/asmjit/x86/x86emitter.h @@ -108,34 +108,34 @@ template struct EmitterExplicitT { //! \cond - // These typedefs are used to describe implicit operands passed explicitly. - typedef Gp Gp_AL; - typedef Gp Gp_AH; - typedef Gp Gp_CL; - typedef Gp Gp_AX; - typedef Gp Gp_DX; + // These types are used to describe implicit operands passed explicitly. + using Gp_AL = Gp; + using Gp_AH = Gp; + using Gp_CL = Gp; + using Gp_AX = Gp; + using Gp_DX = Gp; - typedef Gp Gp_EAX; - typedef Gp Gp_EBX; - typedef Gp Gp_ECX; - typedef Gp Gp_EDX; + using Gp_EAX = Gp; + using Gp_EBX = Gp; + using Gp_ECX = Gp; + using Gp_EDX = Gp; - typedef Gp Gp_RAX; - typedef Gp Gp_RBX; - typedef Gp Gp_RCX; - typedef Gp Gp_RDX; + using Gp_RAX = Gp; + using Gp_RBX = Gp; + using Gp_RCX = Gp; + using Gp_RDX = Gp; - typedef Gp Gp_ZAX; - typedef Gp Gp_ZBX; - typedef Gp Gp_ZCX; - typedef Gp Gp_ZDX; + using Gp_ZAX = Gp; + using Gp_ZBX = Gp; + using Gp_ZCX = Gp; + using Gp_ZDX = Gp; - typedef Mem DS_ZAX; // ds:[zax] - typedef Mem DS_ZDI; // ds:[zdi] - typedef Mem ES_ZDI; // es:[zdi] - typedef Mem DS_ZSI; // ds:[zsi] + using DS_ZAX = Mem; // ds:[zax] + using DS_ZDI = Mem; // ds:[zdi] + using ES_ZDI = Mem; // es:[zdi] + using DS_ZSI = Mem; // ds:[zsi] - typedef Xmm XMM0; + using XMM0 = Xmm; // These two are unfortunately reported by the sanitizer. We know what we do, however, the sanitizer doesn't. // I have tried to use reinterpret_cast instead, but that would generate bad code when compiled by MSC. diff --git a/src/asmjit/x86/x86formatter.cpp b/src/asmjit/x86/x86formatter.cpp index f121225..b1bc799 100644 --- a/src/asmjit/x86/x86formatter.cpp +++ b/src/asmjit/x86/x86formatter.cpp @@ -44,77 +44,80 @@ struct RegFormatInfo { template struct RegFormatInfo_T { - enum { - kTypeIndex = X == uint32_t(RegType::kX86_GpbLo) ? 1 : - X == uint32_t(RegType::kX86_GpbHi) ? 8 : - X == uint32_t(RegType::kX86_Gpw ) ? 15 : - X == uint32_t(RegType::kX86_Gpd ) ? 19 : - X == uint32_t(RegType::kX86_Gpq ) ? 23 : - X == uint32_t(RegType::kX86_Xmm ) ? 27 : - X == uint32_t(RegType::kX86_Ymm ) ? 31 : - X == uint32_t(RegType::kX86_Zmm ) ? 35 : - X == uint32_t(RegType::kX86_Mm ) ? 50 : - X == uint32_t(RegType::kX86_KReg ) ? 53 : - X == uint32_t(RegType::kX86_SReg ) ? 43 : - X == uint32_t(RegType::kX86_CReg ) ? 59 : - X == uint32_t(RegType::kX86_DReg ) ? 62 : - X == uint32_t(RegType::kX86_St ) ? 47 : - X == uint32_t(RegType::kX86_Bnd ) ? 55 : - X == uint32_t(RegType::kX86_Tmm ) ? 65 : - X == uint32_t(RegType::kX86_Rip ) ? 39 : 0, + static inline constexpr uint32_t kTypeIndex = + X == uint32_t(RegType::kX86_GpbLo) ? 1 : + X == uint32_t(RegType::kX86_GpbHi) ? 8 : + X == uint32_t(RegType::kX86_Gpw ) ? 15 : + X == uint32_t(RegType::kX86_Gpd ) ? 19 : + X == uint32_t(RegType::kX86_Gpq ) ? 23 : + X == uint32_t(RegType::kX86_Xmm ) ? 27 : + X == uint32_t(RegType::kX86_Ymm ) ? 31 : + X == uint32_t(RegType::kX86_Zmm ) ? 35 : + X == uint32_t(RegType::kX86_Mm ) ? 50 : + X == uint32_t(RegType::kX86_KReg ) ? 53 : + X == uint32_t(RegType::kX86_SReg ) ? 43 : + X == uint32_t(RegType::kX86_CReg ) ? 59 : + X == uint32_t(RegType::kX86_DReg ) ? 62 : + X == uint32_t(RegType::kX86_St ) ? 47 : + X == uint32_t(RegType::kX86_Bnd ) ? 55 : + X == uint32_t(RegType::kX86_Tmm ) ? 65 : + X == uint32_t(RegType::kX86_Rip ) ? 39 : 0; - kFormatIndex = X == uint32_t(RegType::kX86_GpbLo) ? 1 : - X == uint32_t(RegType::kX86_GpbHi) ? 6 : - X == uint32_t(RegType::kX86_Gpw ) ? 11 : - X == uint32_t(RegType::kX86_Gpd ) ? 16 : - X == uint32_t(RegType::kX86_Gpq ) ? 21 : - X == uint32_t(RegType::kX86_Xmm ) ? 25 : - X == uint32_t(RegType::kX86_Ymm ) ? 31 : - X == uint32_t(RegType::kX86_Zmm ) ? 37 : - X == uint32_t(RegType::kX86_Mm ) ? 60 : - X == uint32_t(RegType::kX86_KReg ) ? 65 : - X == uint32_t(RegType::kX86_SReg ) ? 49 : - X == uint32_t(RegType::kX86_CReg ) ? 75 : - X == uint32_t(RegType::kX86_DReg ) ? 80 : - X == uint32_t(RegType::kX86_St ) ? 55 : - X == uint32_t(RegType::kX86_Bnd ) ? 69 : - X == uint32_t(RegType::kX86_Tmm ) ? 89 : - X == uint32_t(RegType::kX86_Rip ) ? 43 : 0, + static inline constexpr uint32_t kFormatIndex = + X == uint32_t(RegType::kX86_GpbLo) ? 1 : + X == uint32_t(RegType::kX86_GpbHi) ? 6 : + X == uint32_t(RegType::kX86_Gpw ) ? 11 : + X == uint32_t(RegType::kX86_Gpd ) ? 16 : + X == uint32_t(RegType::kX86_Gpq ) ? 21 : + X == uint32_t(RegType::kX86_Xmm ) ? 25 : + X == uint32_t(RegType::kX86_Ymm ) ? 31 : + X == uint32_t(RegType::kX86_Zmm ) ? 37 : + X == uint32_t(RegType::kX86_Mm ) ? 60 : + X == uint32_t(RegType::kX86_KReg ) ? 65 : + X == uint32_t(RegType::kX86_SReg ) ? 49 : + X == uint32_t(RegType::kX86_CReg ) ? 75 : + X == uint32_t(RegType::kX86_DReg ) ? 80 : + X == uint32_t(RegType::kX86_St ) ? 55 : + X == uint32_t(RegType::kX86_Bnd ) ? 69 : + X == uint32_t(RegType::kX86_Tmm ) ? 89 : + X == uint32_t(RegType::kX86_Rip ) ? 43 : 0; - kSpecialIndex = X == uint32_t(RegType::kX86_GpbLo) ? 96 : - X == uint32_t(RegType::kX86_GpbHi) ? 128 : - X == uint32_t(RegType::kX86_Gpw ) ? 161 : - X == uint32_t(RegType::kX86_Gpd ) ? 160 : - X == uint32_t(RegType::kX86_Gpq ) ? 192 : - X == uint32_t(RegType::kX86_SReg ) ? 224 : - X == uint32_t(RegType::kX86_Rip ) ? 85 : 0, + static inline constexpr uint32_t kSpecialIndex = + X == uint32_t(RegType::kX86_GpbLo) ? 96 : + X == uint32_t(RegType::kX86_GpbHi) ? 128 : + X == uint32_t(RegType::kX86_Gpw ) ? 161 : + X == uint32_t(RegType::kX86_Gpd ) ? 160 : + X == uint32_t(RegType::kX86_Gpq ) ? 192 : + X == uint32_t(RegType::kX86_SReg ) ? 224 : + X == uint32_t(RegType::kX86_Rip ) ? 85 : 0; - kSpecialCount = X == uint32_t(RegType::kX86_GpbLo) ? 8 : - X == uint32_t(RegType::kX86_GpbHi) ? 4 : - X == uint32_t(RegType::kX86_Gpw ) ? 8 : - X == uint32_t(RegType::kX86_Gpd ) ? 8 : - X == uint32_t(RegType::kX86_Gpq ) ? 8 : - X == uint32_t(RegType::kX86_SReg ) ? 7 : - X == uint32_t(RegType::kX86_Rip ) ? 1 : 0, + static inline constexpr uint32_t kSpecialCount = + X == uint32_t(RegType::kX86_GpbLo) ? 8 : + X == uint32_t(RegType::kX86_GpbHi) ? 4 : + X == uint32_t(RegType::kX86_Gpw ) ? 8 : + X == uint32_t(RegType::kX86_Gpd ) ? 8 : + X == uint32_t(RegType::kX86_Gpq ) ? 8 : + X == uint32_t(RegType::kX86_SReg ) ? 7 : + X == uint32_t(RegType::kX86_Rip ) ? 1 : 0; - kRegCount = X == uint32_t(RegType::kX86_GpbLo) ? 32 : - X == uint32_t(RegType::kX86_GpbHi) ? 4 : - X == uint32_t(RegType::kX86_Gpw ) ? 32 : - X == uint32_t(RegType::kX86_Gpd ) ? 32 : - X == uint32_t(RegType::kX86_Gpq ) ? 32 : - X == uint32_t(RegType::kX86_Xmm ) ? 32 : - X == uint32_t(RegType::kX86_Ymm ) ? 32 : - X == uint32_t(RegType::kX86_Zmm ) ? 32 : - X == uint32_t(RegType::kX86_Mm ) ? 8 : - X == uint32_t(RegType::kX86_KReg ) ? 8 : - X == uint32_t(RegType::kX86_SReg ) ? 7 : - X == uint32_t(RegType::kX86_CReg ) ? 16 : - X == uint32_t(RegType::kX86_DReg ) ? 16 : - X == uint32_t(RegType::kX86_St ) ? 8 : - X == uint32_t(RegType::kX86_Bnd ) ? 4 : - X == uint32_t(RegType::kX86_Tmm ) ? 8 : - X == uint32_t(RegType::kX86_Rip ) ? 1 : 0 - }; + static inline constexpr uint32_t kRegCount = + X == uint32_t(RegType::kX86_GpbLo) ? 32 : + X == uint32_t(RegType::kX86_GpbHi) ? 4 : + X == uint32_t(RegType::kX86_Gpw ) ? 32 : + X == uint32_t(RegType::kX86_Gpd ) ? 32 : + X == uint32_t(RegType::kX86_Gpq ) ? 32 : + X == uint32_t(RegType::kX86_Xmm ) ? 32 : + X == uint32_t(RegType::kX86_Ymm ) ? 32 : + X == uint32_t(RegType::kX86_Zmm ) ? 32 : + X == uint32_t(RegType::kX86_Mm ) ? 8 : + X == uint32_t(RegType::kX86_KReg ) ? 8 : + X == uint32_t(RegType::kX86_SReg ) ? 7 : + X == uint32_t(RegType::kX86_CReg ) ? 16 : + X == uint32_t(RegType::kX86_DReg ) ? 16 : + X == uint32_t(RegType::kX86_St ) ? 8 : + X == uint32_t(RegType::kX86_Bnd ) ? 4 : + X == uint32_t(RegType::kX86_Tmm ) ? 8 : + X == uint32_t(RegType::kX86_Rip ) ? 1 : 0; }; #define ASMJIT_REG_TYPE_ENTRY(TYPE) { \ @@ -393,18 +396,21 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(String& sb, FormatFlag ASMJIT_ASSERT(vReg != nullptr); const char* name = vReg->name(); - if (name && name[0] != '\0') + if (name && name[0] != '\0') { ASMJIT_PROPAGATE(sb.append(name)); - else + } + else { ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(id)))); + } bool formatType = (Support::test(formatFlags, FormatFlags::kRegType)) || (Support::test(formatFlags, FormatFlags::kRegCasts) && vReg->type() != type); if (formatType && uint32_t(type) <= uint32_t(RegType::kMaxValue)) { const RegFormatInfo::TypeEntry& typeEntry = info.typeEntries[size_t(type)]; - if (typeEntry.index) + if (typeEntry.index) { ASMJIT_PROPAGATE(sb.appendFormat("@%s", info.typeStrings + typeEntry.index)); + } } return kErrorOk; @@ -418,15 +424,18 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(String& sb, FormatFlag if (uint32_t(type) <= uint32_t(RegType::kMaxValue)) { const RegFormatInfo::NameEntry& nameEntry = info.nameEntries[size_t(type)]; - if (id < nameEntry.specialCount) + if (id < nameEntry.specialCount) { return sb.append(info.nameStrings + nameEntry.specialIndex + id * 4); + } - if (id < nameEntry.count) + if (id < nameEntry.count) { return sb.appendFormat(info.nameStrings + nameEntry.formatIndex, unsigned(id)); + } const RegFormatInfo::TypeEntry& typeEntry = info.typeEntries[size_t(type)]; - if (typeEntry.index) + if (typeEntry.index) { return sb.appendFormat("%s@%u", info.typeStrings + typeEntry.index, id); + } } return sb.appendFormat("?%u", uint32_t(type), id); @@ -442,8 +451,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand( Arch arch, const Operand_& op) noexcept { - if (op.isReg()) + if (op.isReg()) { return formatRegister(sb, formatFlags, emitter, arch, op.as().type(), op.as().id()); + } if (op.isMem()) { const Mem& m = op.as(); @@ -451,8 +461,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand( // Segment override prefix. uint32_t seg = m.segmentId(); - if (seg != SReg::kIdNone && seg < SReg::kIdCount) + if (seg != SReg::kIdNone && seg < SReg::kIdCount) { ASMJIT_PROPAGATE(sb.appendFormat("%s:", x86RegFormatInfo.nameStrings + 224 + size_t(seg) * 4)); + } ASMJIT_PROPAGATE(sb.append('[')); switch (m.addrType()) { @@ -483,8 +494,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand( } if (m.hasIndex()) { - if (opSign) + if (opSign) { ASMJIT_PROPAGATE(sb.append(opSign)); + } opSign = '+'; ASMJIT_PROPAGATE(formatRegister(sb, formatFlags, emitter, arch, m.indexType(), m.indexId())); @@ -499,8 +511,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand( off = ~off + 1; } - if (opSign) + if (opSign) { ASMJIT_PROPAGATE(sb.append(opSign)); + } uint32_t base = 10; if (Support::test(formatFlags, FormatFlags::kHexOffsets) && off > 9) { @@ -592,15 +605,17 @@ ASMJIT_FAVOR_SIZE static Error FormatterInternal_formatImmBits(String& sb, uint3 return DebugUtils::errored(kErrorInvalidState); } - if (!str[0]) + if (!str[0]) { continue; + } ASMJIT_PROPAGATE(sb.append(++n == 1 ? kImmCharStart : kImmCharOr)); ASMJIT_PROPAGATE(sb.append(str)); } - if (n) + if (n) { ASMJIT_PROPAGATE(sb.append(kImmCharEnd)); + } return kErrorOk; } @@ -623,8 +638,8 @@ ASMJIT_FAVOR_SIZE static Error FormatterInternal_explainConst( FormatFlags formatFlags, InstId instId, uint32_t vecSize, - const Imm& imm) noexcept { - + const Imm& imm +) noexcept { DebugUtils::unused(formatFlags); static const char vcmpx[] = @@ -874,41 +889,51 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction( // Format instruction options and instruction mnemonic. if (instId < Inst::_kIdCount) { // VEX|EVEX options. - if (Support::test(options, InstOptions::kX86_Vex)) + if (Support::test(options, InstOptions::kX86_Vex)) { ASMJIT_PROPAGATE(sb.append("{vex} ")); + } - if (Support::test(options, InstOptions::kX86_Vex3)) + if (Support::test(options, InstOptions::kX86_Vex3)) { ASMJIT_PROPAGATE(sb.append("{vex3} ")); + } - if (Support::test(options, InstOptions::kX86_Evex)) + if (Support::test(options, InstOptions::kX86_Evex)) { ASMJIT_PROPAGATE(sb.append("{evex} ")); + } // MOD/RM and MOD/MR options - if (Support::test(options, InstOptions::kX86_ModRM)) + if (Support::test(options, InstOptions::kX86_ModRM)) { ASMJIT_PROPAGATE(sb.append("{modrm} ")); - else if (Support::test(options, InstOptions::kX86_ModMR)) + } + else if (Support::test(options, InstOptions::kX86_ModMR)) { ASMJIT_PROPAGATE(sb.append("{modmr} ")); + } // SHORT|LONG options. - if (Support::test(options, InstOptions::kShortForm)) + if (Support::test(options, InstOptions::kShortForm)) { ASMJIT_PROPAGATE(sb.append("short ")); + } - if (Support::test(options, InstOptions::kLongForm)) + if (Support::test(options, InstOptions::kLongForm)) { ASMJIT_PROPAGATE(sb.append("long ")); + } // LOCK|XACQUIRE|XRELEASE options. - if (Support::test(options, InstOptions::kX86_XAcquire)) + if (Support::test(options, InstOptions::kX86_XAcquire)) { ASMJIT_PROPAGATE(sb.append("xacquire ")); + } - if (Support::test(options, InstOptions::kX86_XRelease)) + if (Support::test(options, InstOptions::kX86_XRelease)) { ASMJIT_PROPAGATE(sb.append("xrelease ")); + } - if (Support::test(options, InstOptions::kX86_Lock)) + if (Support::test(options, InstOptions::kX86_Lock)) { ASMJIT_PROPAGATE(sb.append("lock ")); + } // REP|REPNE options. if (Support::test(options, InstOptions::kX86_Rep | InstOptions::kX86_Repne)) { - sb.append(Support::test(options, InstOptions::kX86_Rep) ? "rep " : "repnz "); + ASMJIT_PROPAGATE(sb.append(Support::test(options, InstOptions::kX86_Rep) ? "rep " : "repnz ")); if (inst.hasExtraReg()) { ASMJIT_PROPAGATE(sb.append("{")); ASMJIT_PROPAGATE(formatOperand(sb, formatFlags, emitter, arch, inst.extraReg().toReg())); @@ -924,11 +949,24 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction( InstOptions::kX86_OpCodeW ; if (Support::test(options, kRXBWMask)) { ASMJIT_PROPAGATE(sb.append("rex.")); - if (Support::test(options, InstOptions::kX86_OpCodeR)) sb.append('r'); - if (Support::test(options, InstOptions::kX86_OpCodeX)) sb.append('x'); - if (Support::test(options, InstOptions::kX86_OpCodeB)) sb.append('b'); - if (Support::test(options, InstOptions::kX86_OpCodeW)) sb.append('w'); - sb.append(' '); + + if (Support::test(options, InstOptions::kX86_OpCodeR)) { + ASMJIT_PROPAGATE(sb.append('r')); + } + + if (Support::test(options, InstOptions::kX86_OpCodeX)) { + ASMJIT_PROPAGATE(sb.append('x')); + } + + if (Support::test(options, InstOptions::kX86_OpCodeB)) { + ASMJIT_PROPAGATE(sb.append('b')); + } + + if (Support::test(options, InstOptions::kX86_OpCodeW)) { + ASMJIT_PROPAGATE(sb.append('w')); + } + + ASMJIT_PROPAGATE(sb.append(' ')); } else { ASMJIT_PROPAGATE(sb.append("rex ")); @@ -948,16 +986,20 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction( for (uint32_t i = 0; i < opCount; i++) { const Operand_& op = operands[i]; - if (op.isNone()) break; + if (op.isNone()) { + break; + } ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", ")); ASMJIT_PROPAGATE(formatOperand(sb, formatFlags, emitter, arch, op)); if (op.isImm() && uint32_t(formatFlags & FormatFlags::kExplainImms)) { uint32_t vecSize = 16; - for (uint32_t j = 0; j < opCount; j++) - if (operands[j].isReg()) + for (uint32_t j = 0; j < opCount; j++) { + if (operands[j].isReg()) { vecSize = Support::max(vecSize, operands[j].as().size()); + } + } ASMJIT_PROPAGATE(FormatterInternal_explainConst(sb, formatFlags, instId, vecSize, op.as())); } @@ -968,8 +1010,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction( ASMJIT_PROPAGATE(formatRegister(sb, formatFlags, emitter, arch, inst.extraReg().type(), inst.extraReg().id())); ASMJIT_PROPAGATE(sb.append('}')); - if (Support::test(options, InstOptions::kX86_ZMask)) + if (Support::test(options, InstOptions::kX86_ZMask)) { ASMJIT_PROPAGATE(sb.append("{z}")); + } } else if (Support::test(options, InstOptions::kX86_ZMask)) { ASMJIT_PROPAGATE(sb.append(" {z}")); diff --git a/src/asmjit/x86/x86func.cpp b/src/asmjit/x86/x86func.cpp index ac73aff..87e1bc8 100644 --- a/src/asmjit/x86/x86func.cpp +++ b/src/asmjit/x86/x86func.cpp @@ -14,6 +14,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) namespace FuncInternal { +[[nodiscard]] static inline bool shouldTreatAsCDeclIn64BitMode(CallConvId ccId) noexcept { return ccId == CallConvId::kCDecl || ccId == CallConvId::kStdCall || @@ -255,36 +256,43 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si TypeId typeId = func._rets[valueIndex].typeId(); // Terminate at the first void type (end of the pack). - if (typeId == TypeId::kVoid) + if (typeId == TypeId::kVoid) { break; + } switch (typeId) { case TypeId::kInt64: case TypeId::kUInt64: { - if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad) + if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad) { func._rets[valueIndex].initReg(RegType::kX86_Gpq, gpReturnIndexes[valueIndex], typeId); - else + } + else { return DebugUtils::errored(kErrorInvalidState); + } break; } case TypeId::kInt8: case TypeId::kInt16: case TypeId::kInt32: { - if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad) + if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad) { func._rets[valueIndex].initReg(RegType::kX86_Gpd, gpReturnIndexes[valueIndex], TypeId::kInt32); - else + } + else { return DebugUtils::errored(kErrorInvalidState); + } break; } case TypeId::kUInt8: case TypeId::kUInt16: case TypeId::kUInt32: { - if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad) + if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad) { func._rets[valueIndex].initReg(RegType::kX86_Gpd, gpReturnIndexes[valueIndex], TypeId::kUInt32); - else + } + else { return DebugUtils::errored(kErrorInvalidState); + } break; } @@ -310,8 +318,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si regType = cc.strategy() == CallConvStrategy::kDefault ? RegType::kX86_Xmm : RegType::kX86_Gpq; regIndex = cc.strategy() == CallConvStrategy::kDefault ? valueIndex : gpReturnIndexes[valueIndex]; - if (regIndex == BaseReg::kIdBad) + if (regIndex == BaseReg::kIdBad) { return DebugUtils::errored(kErrorInvalidState); + } } func._rets[valueIndex].initReg(regType, regIndex, typeId); @@ -339,16 +348,18 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si FuncValue& arg = func._args[argIndex][valueIndex]; // Terminate if there are no more arguments in the pack. - if (!arg) + if (!arg) { break; + } TypeId typeId = arg.typeId(); if (TypeUtils::isInt(typeId)) { uint32_t regId = BaseReg::kIdBad; - if (gpzPos < CallConv::kMaxRegArgsPerGroup) + if (gpzPos < CallConv::kMaxRegArgsPerGroup) { regId = cc._passedOrder[RegGroup::kGp].id[gpzPos]; + } if (regId != BaseReg::kIdBad) { RegType regType = typeId <= TypeId::kUInt32 ? RegType::kX86_Gpd : RegType::kX86_Gpq; @@ -367,20 +378,23 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) { uint32_t regId = BaseReg::kIdBad; - if (vecPos < CallConv::kMaxRegArgsPerGroup) + if (vecPos < CallConv::kMaxRegArgsPerGroup) { regId = cc._passedOrder[RegGroup::kVec].id[vecPos]; + } if (TypeUtils::isFloat(typeId)) { // If this is a float, but `kFlagPassFloatsByVec` is false, we have to use stack instead. This should // be only used by 32-bit calling conventions. - if (!cc.hasFlag(CallConvFlags::kPassFloatsByVec)) + if (!cc.hasFlag(CallConvFlags::kPassFloatsByVec)) { regId = BaseReg::kIdBad; + } } else { // Pass vector registers via stack if this is a variable arguments function. This should be only used // by 32-bit calling conventions. - if (signature.hasVarArgs() && cc.hasFlag(CallConvFlags::kPassVecByStackIfVA)) + if (signature.hasVarArgs() && cc.hasFlag(CallConvFlags::kPassVecByStackIfVA)) { regId = BaseReg::kIdBad; + } } if (regId != BaseReg::kIdBad) { @@ -428,8 +442,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si FuncValue& arg = func._args[argIndex][valueIndex]; // Terminate if there are no more arguments in the pack. - if (!arg) + if (!arg) { break; + } TypeId typeId = arg.typeId(); uint32_t size = TypeUtils::sizeOf(typeId); @@ -437,8 +452,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si if (TypeUtils::isInt(typeId) || TypeUtils::isMmx(typeId)) { uint32_t regId = BaseReg::kIdBad; - if (argIndex < CallConv::kMaxRegArgsPerGroup) + if (argIndex < CallConv::kMaxRegArgsPerGroup) { regId = cc._passedOrder[RegGroup::kGp].id[argIndex]; + } if (regId != BaseReg::kIdBad) { RegType regType = size <= 4 && !TypeUtils::isMmx(typeId) ? RegType::kX86_Gpd : RegType::kX86_Gpq; @@ -455,8 +471,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) { uint32_t regId = BaseReg::kIdBad; - if (argIndex < CallConv::kMaxRegArgsPerGroup) + if (argIndex < CallConv::kMaxRegArgsPerGroup) { regId = cc._passedOrder[RegGroup::kVec].id[argIndex]; + } if (regId != BaseReg::kIdBad) { // X64-ABI doesn't allow vector types (XMM|YMM|ZMM) to be passed via registers, however, VectorCall @@ -476,10 +493,12 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si } else { uint32_t gpRegId = cc._passedOrder[RegGroup::kGp].id[argIndex]; - if (gpRegId != BaseReg::kIdBad) + if (gpRegId != BaseReg::kIdBad) { arg.assignRegData(RegType::kX86_Gpq, gpRegId); - else + } + else { arg.assignStackOffset(int32_t(stackOffset)); + } arg.addFlags(FuncValue::kFlagIsIndirect); } diff --git a/src/asmjit/x86/x86globals.h b/src/asmjit/x86/x86globals.h index 71264c5..478223f 100644 --- a/src/asmjit/x86/x86globals.h +++ b/src/asmjit/x86/x86globals.h @@ -111,9 +111,9 @@ static constexpr CondCode _reverseCondTable[] = { //! \endcond //! Reverses a condition code (reverses the corresponding operands of a comparison). -static ASMJIT_INLINE_NODEBUG constexpr CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; } +static ASMJIT_INLINE_CONSTEXPR CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; } //! Negates a condition code. -static ASMJIT_INLINE_NODEBUG constexpr CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ 1u); } +static ASMJIT_INLINE_CONSTEXPR CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ 1u); } //! Instruction. //! @@ -1821,7 +1821,7 @@ namespace Inst { }; //! Tests whether the `instId` is defined. - static ASMJIT_INLINE_NODEBUG constexpr bool isDefinedId(InstId instId) noexcept { return instId < _kIdCount; } + static ASMJIT_INLINE_CONSTEXPR bool isDefinedId(InstId instId) noexcept { return instId < _kIdCount; } //! \cond #define ASMJIT_INST_FROM_COND(ID) \ @@ -1838,11 +1838,11 @@ namespace Inst { //! \endcond //! Translates a condition code `cond` to a `jcc` instruction id. - static ASMJIT_INLINE_NODEBUG constexpr InstId jccFromCond(CondCode cond) noexcept { return _jccTable[uint8_t(cond)]; } + static ASMJIT_INLINE_CONSTEXPR InstId jccFromCond(CondCode cond) noexcept { return _jccTable[uint8_t(cond)]; } //! Translates a condition code `cond` to a `setcc` instruction id. - static ASMJIT_INLINE_NODEBUG constexpr InstId setccFromCond(CondCode cond) noexcept { return _setccTable[uint8_t(cond)]; } + static ASMJIT_INLINE_CONSTEXPR InstId setccFromCond(CondCode cond) noexcept { return _setccTable[uint8_t(cond)]; } //! Translates a condition code `cond` to a `cmovcc` instruction id. - static ASMJIT_INLINE_NODEBUG constexpr InstId cmovccFromCond(CondCode cond) noexcept { return _cmovccTable[uint8_t(cond)]; } + static ASMJIT_INLINE_CONSTEXPR InstId cmovccFromCond(CondCode cond) noexcept { return _cmovccTable[uint8_t(cond)]; } } // {Inst} //! FPU status word bits. @@ -2122,7 +2122,7 @@ enum class VReduceImm : uint8_t { ASMJIT_DEFINE_ENUM_FLAGS(VReduceImm) //! Creates a \ref VReduceImm from a combination of `flags` and `fixedPointLength`. -static ASMJIT_INLINE_NODEBUG constexpr VReduceImm vReduceImm(VReduceImm flags, uint32_t fixedPointLength) noexcept { +static ASMJIT_INLINE_CONSTEXPR VReduceImm vReduceImm(VReduceImm flags, uint32_t fixedPointLength) noexcept { return flags | VReduceImm(fixedPointLength << 4); } @@ -2157,7 +2157,7 @@ enum class TLogImm : uint8_t { ASMJIT_DEFINE_ENUM_FLAGS(TLogImm) //! Creates an immediate that can be used by VPTERNLOG[D|Q] instructions. -static ASMJIT_INLINE_NODEBUG constexpr TLogImm tLogFromBits(uint8_t b000, uint8_t b001, uint8_t b010, uint8_t b011, uint8_t b100, uint8_t b101, uint8_t b110, uint8_t b111) noexcept { +static ASMJIT_INLINE_CONSTEXPR TLogImm tLogFromBits(uint8_t b000, uint8_t b001, uint8_t b010, uint8_t b011, uint8_t b100, uint8_t b101, uint8_t b110, uint8_t b111) noexcept { return TLogImm(uint8_t(b000 << 0) | uint8_t(b001 << 1) | uint8_t(b010 << 2) | @@ -2169,7 +2169,7 @@ static ASMJIT_INLINE_NODEBUG constexpr TLogImm tLogFromBits(uint8_t b000, uint8_ } //! Creates an if/else logic that can be used by VPTERNLOG[D|Q] instructions. -static ASMJIT_INLINE_NODEBUG constexpr TLogImm fLogIfElse(TLogImm condition, TLogImm a, TLogImm b) noexcept { return (condition & a) | (~condition & b); } +static ASMJIT_INLINE_CONSTEXPR TLogImm fLogIfElse(TLogImm condition, TLogImm a, TLogImm b) noexcept { return (condition & a) | (~condition & b); } //! Creates a shuffle immediate value that be used with SSE/AVX/AVX-512 instructions to shuffle 2 elements in a vector. //! @@ -2178,7 +2178,7 @@ static ASMJIT_INLINE_NODEBUG constexpr TLogImm fLogIfElse(TLogImm condition, TLo //! //! Shuffle constants can be used to encode an immediate for these instructions: //! - `shufpd|vshufpd` -static ASMJIT_INLINE_NODEBUG constexpr uint32_t shuffleImm(uint32_t a, uint32_t b) noexcept { +static ASMJIT_INLINE_CONSTEXPR uint32_t shuffleImm(uint32_t a, uint32_t b) noexcept { return (a << 1) | b; } @@ -2195,7 +2195,7 @@ static ASMJIT_INLINE_NODEBUG constexpr uint32_t shuffleImm(uint32_t a, uint32_t //! - `pshufhw|vpshufhw` //! - `pshufd|vpshufd` //! - `shufps|vshufps` -static ASMJIT_INLINE_NODEBUG constexpr uint32_t shuffleImm(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept { +static ASMJIT_INLINE_CONSTEXPR uint32_t shuffleImm(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept { return (a << 6) | (b << 4) | (c << 2) | d; } diff --git a/src/asmjit/x86/x86instapi.cpp b/src/asmjit/x86/x86instapi.cpp index 94cd31b..49228d1 100644 --- a/src/asmjit/x86/x86instapi.cpp +++ b/src/asmjit/x86/x86instapi.cpp @@ -145,11 +145,11 @@ static const X86ValidationData _x64ValidationData = { #undef REG_MASK_FROM_REG_TYPE_X64 #undef REG_MASK_FROM_REG_TYPE_X86 -static ASMJIT_FORCE_INLINE bool x86IsZmmOrM512(const Operand_& op) noexcept { +static ASMJIT_INLINE bool x86IsZmmOrM512(const Operand_& op) noexcept { return Reg::isZmm(op) || (op.isMem() && op.x86RmSize() == 64); } -static ASMJIT_FORCE_INLINE bool x86CheckOSig(const InstDB::OpSignature& op, const InstDB::OpSignature& ref, bool& immOutOfRange) noexcept { +static ASMJIT_INLINE bool x86CheckOSig(const InstDB::OpSignature& op, const InstDB::OpSignature& ref, bool& immOutOfRange) noexcept { // Fail if operand types are incompatible. InstDB::OpFlags commonFlags = op.flags() & ref.flags(); @@ -165,14 +165,16 @@ static ASMJIT_FORCE_INLINE bool x86CheckOSig(const InstDB::OpSignature& op, cons // Fail if some memory specific flags do not match. if (Support::test(commonFlags, InstDB::OpFlags::kMemMask)) { - if (ref.hasFlag(InstDB::OpFlags::kFlagMemBase) && !op.hasFlag(InstDB::OpFlags::kFlagMemBase)) + if (ref.hasFlag(InstDB::OpFlags::kFlagMemBase) && !op.hasFlag(InstDB::OpFlags::kFlagMemBase)) { return false; + } } // Fail if register indexes do not match. if (Support::test(commonFlags, InstDB::OpFlags::kRegMask)) { - if (ref.regMask() && !Support::test(op.regMask(), ref.regMask())) + if (ref.regMask() && !Support::test(op.regMask(), ref.regMask())) { return false; + } } return true; @@ -186,8 +188,9 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, InstId instId = inst.id(); InstOptions options = inst.options(); - if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId))) + if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId))) { return DebugUtils::errored(kErrorInvalidInstruction); + } const InstDB::InstInfo& instInfo = InstDB::infoById(instId); const InstDB::CommonInfo& commonInfo = instInfo.commonInfo(); @@ -203,22 +206,27 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, if (Support::test(options, InstOptions::kX86_Lock | kXAcqXRel)) { if (Support::test(options, InstOptions::kX86_Lock)) { - if (ASMJIT_UNLIKELY(!Support::test(iFlags, InstDB::InstFlags::kLock) && !Support::test(options, kXAcqXRel))) + if (ASMJIT_UNLIKELY(!Support::test(iFlags, InstDB::InstFlags::kLock) && !Support::test(options, kXAcqXRel))) { return DebugUtils::errored(kErrorInvalidLockPrefix); + } - if (ASMJIT_UNLIKELY(opCount < 1 || !operands[0].isMem())) + if (ASMJIT_UNLIKELY(opCount < 1 || !operands[0].isMem())) { return DebugUtils::errored(kErrorInvalidLockPrefix); + } } if (Support::test(options, kXAcqXRel)) { - if (ASMJIT_UNLIKELY(!Support::test(options, InstOptions::kX86_Lock) || (options & kXAcqXRel) == kXAcqXRel)) + if (ASMJIT_UNLIKELY(!Support::test(options, InstOptions::kX86_Lock) || (options & kXAcqXRel) == kXAcqXRel)) { return DebugUtils::errored(kErrorInvalidPrefixCombination); + } - if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_XAcquire) && !Support::test(iFlags, InstDB::InstFlags::kXAcquire))) + if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_XAcquire) && !Support::test(iFlags, InstDB::InstFlags::kXAcquire))) { return DebugUtils::errored(kErrorInvalidXAcquirePrefix); + } - if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_XRelease) && !Support::test(iFlags, InstDB::InstFlags::kXRelease))) + if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_XRelease) && !Support::test(iFlags, InstDB::InstFlags::kXRelease))) { return DebugUtils::errored(kErrorInvalidXReleasePrefix); + } } } @@ -226,11 +234,13 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, // ------------------------------- if (Support::test(options, kRepAny)) { - if (ASMJIT_UNLIKELY((options & kRepAny) == kRepAny)) + if (ASMJIT_UNLIKELY((options & kRepAny) == kRepAny)) { return DebugUtils::errored(kErrorInvalidPrefixCombination); + } - if (ASMJIT_UNLIKELY(!Support::test(iFlags, InstDB::InstFlags::kRep))) + if (ASMJIT_UNLIKELY(!Support::test(iFlags, InstDB::InstFlags::kRep))) { return DebugUtils::errored(kErrorInvalidRepPrefix); + } } // Translate Each Operand to the Corresponding OpSignature @@ -243,8 +253,9 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, for (i = 0; i < opCount; i++) { const Operand_& op = operands[i]; - if (op.opType() == OperandType::kNone) + if (op.opType() == OperandType::kNone) { break; + } InstDB::OpFlags opFlags = InstDB::OpFlags::kNone; RegMask regMask = 0; @@ -254,26 +265,30 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, RegType regType = op.as().type(); opFlags = _x86OpFlagFromRegType[size_t(regType)]; - if (ASMJIT_UNLIKELY(opFlags == InstDB::OpFlags::kNone)) + if (ASMJIT_UNLIKELY(opFlags == InstDB::OpFlags::kNone)) { return DebugUtils::errored(kErrorInvalidRegType); + } // If `regId` is equal or greater than Operand::kVirtIdMin it means that the register is virtual and its // index will be assigned later by the register allocator. We must pass unless asked to disallow virtual // registers. uint32_t regId = op.id(); if (regId < Operand::kVirtIdMin) { - if (ASMJIT_UNLIKELY(regId >= 32)) + if (ASMJIT_UNLIKELY(regId >= 32)) { return DebugUtils::errored(kErrorInvalidPhysId); + } - if (ASMJIT_UNLIKELY(Support::bitTest(vd->allowedRegMask[size_t(regType)], regId) == 0)) + if (ASMJIT_UNLIKELY(Support::bitTest(vd->allowedRegMask[size_t(regType)], regId) == 0)) { return DebugUtils::errored(kErrorInvalidPhysId); + } regMask = Support::bitMask(regId); combinedRegMask |= regMask; } else { - if (uint32_t(validationFlags & ValidationFlags::kEnableVirtRegs) == 0) + if (uint32_t(validationFlags & ValidationFlags::kEnableVirtRegs) == 0) { return DebugUtils::errored(kErrorIllegalVirtReg); + } regMask = 0xFFFFFFFFu; } break; @@ -288,18 +303,21 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, RegType baseType = m.baseType(); RegType indexType = m.indexType(); - if (m.segmentId() > 6) + if (m.segmentId() > 6) { return DebugUtils::errored(kErrorInvalidSegment); + } // Validate AVX-512 broadcast {1tox}. if (m.hasBroadcast()) { if (memSize != 0) { // If the size is specified it has to match the broadcast size. - if (ASMJIT_UNLIKELY(commonInfo.hasAvx512B32() && memSize != 4)) + if (ASMJIT_UNLIKELY(commonInfo.hasAvx512B32() && memSize != 4)) { return DebugUtils::errored(kErrorInvalidBroadcast); + } - if (ASMJIT_UNLIKELY(commonInfo.hasAvx512B64() && memSize != 8)) + if (ASMJIT_UNLIKELY(commonInfo.hasAvx512B64() && memSize != 8)) { return DebugUtils::errored(kErrorInvalidBroadcast); + } } else { // If there is no size we implicitly calculate it so we can validate N in {1toN} properly. @@ -317,17 +335,17 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, // Home address of a virtual register. In such case we don't want to validate the type of the // base register as it will always be patched to ESP|RSP. } - else { - if (ASMJIT_UNLIKELY(!Support::bitTest(vd->allowedMemBaseRegs, baseType))) - return DebugUtils::errored(kErrorInvalidAddress); + else if (ASMJIT_UNLIKELY(!Support::bitTest(vd->allowedMemBaseRegs, baseType))) { + return DebugUtils::errored(kErrorInvalidAddress); } // Create information that will be validated only if this is an implicit memory operand. Basically // only usable for string instructions and other instructions where memory operand is implicit and // has 'seg:[reg]' form. if (baseId < Operand::kVirtIdMin) { - if (ASMJIT_UNLIKELY(baseId >= 32)) + if (ASMJIT_UNLIKELY(baseId >= 32)) { return DebugUtils::errored(kErrorInvalidPhysId); + } // Physical base id. regMask = Support::bitMask(baseId); @@ -336,13 +354,15 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, else { // Virtual base id - fill the whole mask for implicit mem validation. The register is not assigned // yet, so we cannot predict the phys id. - if (uint32_t(validationFlags & ValidationFlags::kEnableVirtRegs) == 0) + if (uint32_t(validationFlags & ValidationFlags::kEnableVirtRegs) == 0) { return DebugUtils::errored(kErrorIllegalVirtReg); + } regMask = 0xFFFFFFFFu; } - if (indexType == RegType::kNone && !m.offsetLo32()) + if (indexType == RegType::kNone && !m.offsetLo32()) { opFlags |= InstDB::OpFlags::kFlagMemBase; + } } else if (baseType == RegType::kLabelTag) { // [Label] - there is no need to validate the base as it's label. @@ -353,18 +373,21 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, if (!Support::isInt32(offset)) { if (mode == InstDB::Mode::kX86) { // 32-bit mode: Make sure that the address is either `int32_t` or `uint32_t`. - if (!Support::isUInt32(offset)) + if (!Support::isUInt32(offset)) { return DebugUtils::errored(kErrorInvalidAddress64Bit); + } } else { // 64-bit mode: Zero extension is allowed if the address has 32-bit index register or the address // has no index register (it's still encodable). if (indexType != RegType::kNone) { - if (!Support::isUInt32(offset)) + if (!Support::isUInt32(offset)) { return DebugUtils::errored(kErrorInvalidAddress64Bit); + } - if (indexType != RegType::kX86_Gpd) + if (indexType != RegType::kX86_Gpd) { return DebugUtils::errored(kErrorInvalidAddress64BitZeroExtension); + } } else { // We don't validate absolute 64-bit addresses without an index register as this also depends @@ -375,8 +398,9 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, } if (indexType != RegType::kNone) { - if (ASMJIT_UNLIKELY(!Support::bitTest(vd->allowedMemIndexRegs, indexType))) + if (ASMJIT_UNLIKELY(!Support::bitTest(vd->allowedMemIndexRegs, indexType))) { return DebugUtils::errored(kErrorInvalidAddress); + } if (indexType == RegType::kX86_Xmm) { opFlags |= InstDB::OpFlags::kVm32x | InstDB::OpFlags::kVm64x; @@ -393,19 +417,20 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, } // [RIP + {XMM|YMM|ZMM}] is not allowed. - if (baseType == RegType::kX86_Rip && Support::test(opFlags, InstDB::OpFlags::kVmMask)) + if (baseType == RegType::kX86_Rip && Support::test(opFlags, InstDB::OpFlags::kVmMask)) { return DebugUtils::errored(kErrorInvalidAddress); + } uint32_t indexId = m.indexId(); if (indexId < Operand::kVirtIdMin) { - if (ASMJIT_UNLIKELY(indexId >= 32)) + if (ASMJIT_UNLIKELY(indexId >= 32)) { return DebugUtils::errored(kErrorInvalidPhysId); + } combinedRegMask |= Support::bitMask(indexId); } - else { - if (uint32_t(validationFlags & ValidationFlags::kEnableVirtRegs) == 0) - return DebugUtils::errored(kErrorIllegalVirtReg); + else if (uint32_t(validationFlags & ValidationFlags::kEnableVirtRegs) == 0) { + return DebugUtils::errored(kErrorIllegalVirtReg); } // Only used for implicit memory operands having 'seg:[reg]' form, so clear it. @@ -435,47 +460,62 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, uint64_t immValue = op.as().valueAs(); if (int64_t(immValue) >= 0) { - if (immValue <= 0x7u) + if (immValue <= 0x7u) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmU32 | InstDB::OpFlags::kImmI16 | InstDB::OpFlags::kImmU16 | InstDB::OpFlags::kImmI8 | InstDB::OpFlags::kImmU8 | InstDB::OpFlags::kImmI4 | InstDB::OpFlags::kImmU4 ; - else if (immValue <= 0xFu) + } + else if (immValue <= 0xFu) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmU32 | InstDB::OpFlags::kImmI16 | InstDB::OpFlags::kImmU16 | InstDB::OpFlags::kImmI8 | InstDB::OpFlags::kImmU8 | InstDB::OpFlags::kImmU4 ; - else if (immValue <= 0x7Fu) + } + else if (immValue <= 0x7Fu) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmU32 | InstDB::OpFlags::kImmI16 | InstDB::OpFlags::kImmU16 | InstDB::OpFlags::kImmI8 | InstDB::OpFlags::kImmU8 ; - else if (immValue <= 0xFFu) + } + else if (immValue <= 0xFFu) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmU32 | InstDB::OpFlags::kImmI16 | InstDB::OpFlags::kImmU16 | InstDB::OpFlags::kImmU8 ; - else if (immValue <= 0x7FFFu) + } + else if (immValue <= 0x7FFFu) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmU32 | InstDB::OpFlags::kImmI16 | InstDB::OpFlags::kImmU16 ; - else if (immValue <= 0xFFFFu) + } + else if (immValue <= 0xFFFFu) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmU32 | InstDB::OpFlags::kImmU16 ; - else if (immValue <= 0x7FFFFFFFu) + } + else if (immValue <= 0x7FFFFFFFu) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmU32; - else if (immValue <= 0xFFFFFFFFu) + } + else if (immValue <= 0xFFFFFFFFu) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64 | InstDB::OpFlags::kImmU32; - else if (immValue <= 0x7FFFFFFFFFFFFFFFu) + } + else if (immValue <= 0x7FFFFFFFFFFFFFFFu) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmU64; - else + } + else { opFlags = InstDB::OpFlags::kImmU64; + } } else { immValue = Support::neg(immValue); - if (immValue <= 0x8u) + if (immValue <= 0x8u) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmI16 | InstDB::OpFlags::kImmI8 | InstDB::OpFlags::kImmI4; - else if (immValue <= 0x80u) + } + else if (immValue <= 0x80u) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmI16 | InstDB::OpFlags::kImmI8; - else if (immValue <= 0x8000u) + } + else if (immValue <= 0x8000u) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmI32 | InstDB::OpFlags::kImmI16; - else if (immValue <= 0x80000000u) + } + else if (immValue <= 0x80000000u) { opFlags = InstDB::OpFlags::kImmI64 | InstDB::OpFlags::kImmI32; - else + } + else { opFlags = InstDB::OpFlags::kImmI64; + } } break; } @@ -499,22 +539,26 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, // more operands padded with none (which means that no operand is given at that index). However, validate that there // are no gaps (like [reg, none, reg] or [none, reg]). if (i < opCount) { - while (--opCount > i) - if (ASMJIT_UNLIKELY(!operands[opCount].isNone())) + while (--opCount > i) { + if (ASMJIT_UNLIKELY(!operands[opCount].isNone())) { return DebugUtils::errored(kErrorInvalidInstruction); + } + } } // Validate X86 and X64 specific cases. if (mode == InstDB::Mode::kX86) { // Illegal use of 64-bit register in 32-bit mode. - if (ASMJIT_UNLIKELY(Support::test(combinedOpFlags, InstDB::OpFlags::kRegGpq))) + if (ASMJIT_UNLIKELY(Support::test(combinedOpFlags, InstDB::OpFlags::kRegGpq))) { return DebugUtils::errored(kErrorInvalidUseOfGpq); + } } else { // Illegal use of a high 8-bit register with REX prefix. bool hasREX = inst.hasOption(InstOptions::kX86_Rex) || (combinedRegMask & 0xFFFFFF00u) != 0; - if (ASMJIT_UNLIKELY(hasREX && Support::test(combinedOpFlags, InstDB::OpFlags::kRegGpbHi))) + if (ASMJIT_UNLIKELY(hasREX && Support::test(combinedOpFlags, InstDB::OpFlags::kRegGpbHi))) { return DebugUtils::errored(kErrorInvalidUseOfGpbHi); + } } // Validate Instruction Signature by Comparing Against All `iSig` Rows @@ -532,8 +576,9 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, do { // Check if the architecture is compatible. - if (!iSig->supportsMode(mode)) + if (!iSig->supportsMode(mode)) { continue; + } // Compare the operands table with reference operands. uint32_t j = 0; @@ -541,9 +586,11 @@ static ASMJIT_FAVOR_SIZE Error validate(InstDB::Mode mode, const BaseInst& inst, bool localImmOutOfRange = false; if (iSigCount == opCount) { - for (j = 0; j < opCount; j++) - if (!x86CheckOSig(oSigTranslated[j], iSig->opSignature(j), localImmOutOfRange)) + for (j = 0; j < opCount; j++) { + if (!x86CheckOSig(oSigTranslated[j], iSig->opSignature(j), localImmOutOfRange)) { break; + } + } } else if (iSigCount - iSig->implicitOpCount() == opCount) { uint32_t r = 0; @@ -554,14 +601,17 @@ Next: oRef = opSignatureTable + iSig->opSignatureIndex(r); // Skip implicit operands. if (oRef->isImplicit()) { - if (++r >= iSigCount) + if (++r >= iSigCount) { break; - else + } + else { goto Next; + } } - if (!x86CheckOSig(*oChk, *oRef, localImmOutOfRange)) + if (!x86CheckOSig(*oChk, *oRef, localImmOutOfRange)) { break; + } } } @@ -576,10 +626,12 @@ Next: } while (++iSig != iEnd); if (iSig == iEnd) { - if (globalImmOutOfRange) + if (globalImmOutOfRange) { return DebugUtils::errored(kErrorInvalidImmediate); - else + } + else { return DebugUtils::errored(kErrorInvalidInstruction); + } } } @@ -592,25 +644,29 @@ Next: if (commonInfo.hasFlag(InstDB::InstFlags::kEvex)) { // Validate AVX-512 {z}. if (Support::test(options, InstOptions::kX86_ZMask)) { - if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_ZMask) && !commonInfo.hasAvx512Z())) + if (ASMJIT_UNLIKELY(Support::test(options, InstOptions::kX86_ZMask) && !commonInfo.hasAvx512Z())) { return DebugUtils::errored(kErrorInvalidKZeroUse); + } } // Validate AVX-512 {sae} and {er}. if (Support::test(options, InstOptions::kX86_SAE | InstOptions::kX86_ER)) { // Rounding control is impossible if the instruction is not reg-to-reg. - if (ASMJIT_UNLIKELY(memOp)) + if (ASMJIT_UNLIKELY(memOp)) { return DebugUtils::errored(kErrorInvalidEROrSAE); + } // Check if {sae} or {er} is supported by the instruction. if (Support::test(options, InstOptions::kX86_ER)) { // NOTE: if both {sae} and {er} are set, we don't care, as {sae} is implied. - if (ASMJIT_UNLIKELY(!commonInfo.hasAvx512ER())) + if (ASMJIT_UNLIKELY(!commonInfo.hasAvx512ER())) { return DebugUtils::errored(kErrorInvalidEROrSAE); + } } else { - if (ASMJIT_UNLIKELY(!commonInfo.hasAvx512SAE())) + if (ASMJIT_UNLIKELY(!commonInfo.hasAvx512SAE())) { return DebugUtils::errored(kErrorInvalidEROrSAE); + } } // {sae} and {er} are defined for either scalar ops or vector ops that require LL to be 10 (512-bit vector @@ -623,15 +679,17 @@ Next: // There is no {er}/{sae}-enabled instruction with less than two operands. ASMJIT_ASSERT(opCount >= 2); - if (ASMJIT_UNLIKELY(!x86IsZmmOrM512(operands[0]) && !x86IsZmmOrM512(operands[1]))) + if (ASMJIT_UNLIKELY(!x86IsZmmOrM512(operands[0]) && !x86IsZmmOrM512(operands[1]))) { return DebugUtils::errored(kErrorInvalidEROrSAE); + } } } } else { // Not an AVX512 instruction - maybe OpExtra is xCX register used by REP/REPNE prefix. - if (Support::test(options, kAvx512Options) || !Support::test(options, kRepAny)) + if (Support::test(options, kAvx512Options) || !Support::test(options, kRepAny)) { return DebugUtils::errored(kErrorInvalidInstruction); + } } } @@ -641,27 +699,32 @@ Next: if (extraReg.isReg()) { if (Support::test(options, kRepAny)) { // Validate REP|REPNE {cx|ecx|rcx}. - if (ASMJIT_UNLIKELY(Support::test(iFlags, InstDB::InstFlags::kRepIgnored))) + if (ASMJIT_UNLIKELY(Support::test(iFlags, InstDB::InstFlags::kRepIgnored))) { return DebugUtils::errored(kErrorInvalidExtraReg); + } if (extraReg.isPhysReg()) { - if (ASMJIT_UNLIKELY(extraReg.id() != Gp::kIdCx)) + if (ASMJIT_UNLIKELY(extraReg.id() != Gp::kIdCx)) { return DebugUtils::errored(kErrorInvalidExtraReg); + } } // The type of the {...} register must match the type of the base register // of memory operand. So if the memory operand uses 32-bit register the // count register must also be 32-bit, etc... - if (ASMJIT_UNLIKELY(!memOp || extraReg.type() != memOp->baseType())) + if (ASMJIT_UNLIKELY(!memOp || extraReg.type() != memOp->baseType())) { return DebugUtils::errored(kErrorInvalidExtraReg); + } } else if (commonInfo.hasFlag(InstDB::InstFlags::kEvex)) { // Validate AVX-512 {k}. - if (ASMJIT_UNLIKELY(extraReg.type() != RegType::kX86_KReg)) + if (ASMJIT_UNLIKELY(extraReg.type() != RegType::kX86_KReg)) { return DebugUtils::errored(kErrorInvalidExtraReg); + } - if (ASMJIT_UNLIKELY(extraReg.id() == 0 || !commonInfo.hasAvx512K())) + if (ASMJIT_UNLIKELY(extraReg.id() == 0 || !commonInfo.hasAvx512K())) { return DebugUtils::errored(kErrorInvalidKMaskUse); + } } else { return DebugUtils::errored(kErrorInvalidExtraReg); @@ -698,7 +761,7 @@ static const Support::Array rwRegGr 0x00000000000000FFu // RIP. }}; -static ASMJIT_FORCE_INLINE void rwZeroExtendGp(OpRWInfo& opRwInfo, const Gp& reg, uint32_t nativeGpSize) noexcept { +static ASMJIT_INLINE void rwZeroExtendGp(OpRWInfo& opRwInfo, const Gp& reg, uint32_t nativeGpSize) noexcept { ASMJIT_ASSERT(BaseReg::isGp(reg.as())); if (reg.size() + 4 == nativeGpSize) { opRwInfo.addOpFlags(OpRWFlags::kZExt); @@ -706,7 +769,7 @@ static ASMJIT_FORCE_INLINE void rwZeroExtendGp(OpRWInfo& opRwInfo, const Gp& reg } } -static ASMJIT_FORCE_INLINE void rwZeroExtendAvxVec(OpRWInfo& opRwInfo, const Vec& reg) noexcept { +static ASMJIT_INLINE void rwZeroExtendAvxVec(OpRWInfo& opRwInfo, const Vec& reg) noexcept { DebugUtils::unused(reg); uint64_t msk = ~Support::fillTrailingBits(opRwInfo.writeByteMask()); @@ -716,7 +779,7 @@ static ASMJIT_FORCE_INLINE void rwZeroExtendAvxVec(OpRWInfo& opRwInfo, const Vec } } -static ASMJIT_FORCE_INLINE void rwZeroExtendNonVec(OpRWInfo& opRwInfo, const Reg& reg) noexcept { +static ASMJIT_INLINE void rwZeroExtendNonVec(OpRWInfo& opRwInfo, const Reg& reg) noexcept { uint64_t msk = ~Support::fillTrailingBits(opRwInfo.writeByteMask()) & rwRegGroupByteMask[reg.group()]; if (msk) { opRwInfo.addOpFlags(OpRWFlags::kZExt); @@ -724,7 +787,7 @@ static ASMJIT_FORCE_INLINE void rwZeroExtendNonVec(OpRWInfo& opRwInfo, const Reg } } -static ASMJIT_FORCE_INLINE Error rwHandleAVX512(const BaseInst& inst, const InstDB::CommonInfo& commonInfo, InstRWInfo* out) noexcept { +static ASMJIT_INLINE Error rwHandleAVX512(const BaseInst& inst, const InstDB::CommonInfo& commonInfo, InstRWInfo* out) noexcept { if (inst.hasExtraReg() && inst.extraReg().type() == RegType::kX86_KReg && out->opCount() > 0) { // AVX-512 instruction that uses a destination with {k} register (zeroing vs masking). out->_extraReg.addOpFlags(OpRWFlags::kRead); @@ -738,12 +801,14 @@ static ASMJIT_FORCE_INLINE Error rwHandleAVX512(const BaseInst& inst, const Inst return kErrorOk; } -static ASMJIT_FORCE_INLINE bool hasSameRegType(const BaseReg* regs, size_t opCount) noexcept { +static ASMJIT_INLINE bool hasSameRegType(const BaseReg* regs, size_t opCount) noexcept { ASMJIT_ASSERT(opCount > 0); RegType regType = regs[0].type(); - for (size_t i = 1; i < opCount; i++) - if (regs[i].type() != regType) + for (size_t i = 1; i < opCount; i++) { + if (regs[i].type() != regType) { return false; + } + } return true; } @@ -753,8 +818,9 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz // Get the instruction data. InstId instId = inst.id(); - if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId))) + if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId))) { return DebugUtils::errored(kErrorInvalidInstruction); + } // Read/Write flags. const InstDB::InstInfo& instInfo = InstDB::_instInfoTable[instId]; @@ -814,8 +880,13 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz uint64_t rByteMask = rwOpData.rByteMask; uint64_t wByteMask = rwOpData.wByteMask; - if (op.isRead() && !rByteMask) rByteMask = Support::lsbMask(srcOp.x86RmSize()); - if (op.isWrite() && !wByteMask) wByteMask = Support::lsbMask(srcOp.x86RmSize()); + if (op.isRead() && !rByteMask) { + rByteMask = Support::lsbMask(srcOp.x86RmSize()); + } + + if (op.isWrite() && !wByteMask) { + wByteMask = Support::lsbMask(srcOp.x86RmSize()); + } op._readByteMask = rByteMask; op._writeByteMask = wByteMask; @@ -845,17 +916,20 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz const x86::Mem& memOp = srcOp.as(); // The RW flags of BASE+INDEX are either provided by the data, which means // that the instruction is border-case, or they are deduced from the operand. - if (memOp.hasBaseReg() && !op.hasOpFlag(OpRWFlags::kMemBaseRW)) + if (memOp.hasBaseReg() && !op.hasOpFlag(OpRWFlags::kMemBaseRW)) { op.addOpFlags(OpRWFlags::kMemBaseRead); - if (memOp.hasIndexReg() && !op.hasOpFlag(OpRWFlags::kMemIndexRW)) + } + if (memOp.hasIndexReg() && !op.hasOpFlag(OpRWFlags::kMemIndexRW)) { op.addOpFlags(OpRWFlags::kMemIndexRead); + } } } // Only keep kMovOp if the instruction is actually register to register move of the same kind. if (out->hasInstFlag(InstRWFlags::kMovOp)) { - if (!(opCount >= 2 && opTypeMask == Support::bitMask(OperandType::kReg) && hasSameRegType(reinterpret_cast(operands), opCount))) + if (!(opCount >= 2 && opTypeMask == Support::bitMask(OperandType::kReg) && hasSameRegType(reinterpret_cast(operands), opCount))) { out->_instFlags &= ~InstRWFlags::kMovOp; + } } // Special cases require more logic. @@ -1001,10 +1075,12 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz const Mem& o1 = operands[1].as(); if (o0.isGp()) { - if (!o1.isOffset64Bit()) + if (!o1.isOffset64Bit()) { out->_operands[0].reset(W, o0.size()); - else + } + else { out->_operands[0].reset(W | RegPhys, o0.size(), Gp::kIdAx); + } out->_operands[1].reset(R | MibRead, o0.size()); rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); @@ -1024,10 +1100,12 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz if (o1.isGp()) { out->_operands[0].reset(W | MibRead, o1.size()); - if (!o0.isOffset64Bit()) + if (!o0.isOffset64Bit()) { out->_operands[1].reset(R, o1.size()); - else + } + else { out->_operands[1].reset(R | RegPhys, o1.size(), Gp::kIdAx); + } return kErrorOk; } @@ -1117,8 +1195,9 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); } - if (operands[1].isMem()) + if (operands[1].isMem()) { out->_operands[1].addOpFlags(MibRead); + } return kErrorOk; } @@ -1129,8 +1208,9 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz out->_operands[2].reset(); rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); - if (operands[1].isMem()) + if (operands[1].isMem()) { out->_operands[1].addOpFlags(MibRead); + } return kErrorOk; } else { @@ -1140,8 +1220,9 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); rwZeroExtendGp(out->_operands[1], operands[1].as(), nativeGpSize); - if (operands[2].isMem()) + if (operands[2].isMem()) { out->_operands[2].addOpFlags(MibRead); + } return kErrorOk; } } @@ -1301,8 +1382,9 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz if (opCount >= 2) { if (opCount >= 3) { - if (opCount > 3) + if (opCount > 3) { return DebugUtils::errored(kErrorInvalidInstruction); + } out->_operands[2].reset(); } @@ -1324,11 +1406,13 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz } // Handle 'pmovmskb|vpmovmskb'. - if (BaseReg::isGp(operands[0])) + if (BaseReg::isGp(operands[0])) { rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); + } - if (BaseReg::isVec(operands[0])) + if (BaseReg::isVec(operands[0])) { rwZeroExtendAvxVec(out->_operands[0], operands[0].as()); + } return rwHandleAVX512(inst, commonInfo, out); } @@ -1340,8 +1424,9 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz out->_operands[0].reset(W, size0); out->_operands[1].reset(R | MibRead, size1); - if (BaseReg::isVec(operands[0])) + if (BaseReg::isVec(operands[0])) { rwZeroExtendAvxVec(out->_operands[0], operands[0].as()); + } return kErrorOk; } @@ -1383,8 +1468,9 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz if (opCount >= 2) { if (opCount >= 3) { - if (opCount > 3) + if (opCount > 3) { return DebugUtils::errored(kErrorInvalidInstruction); + } out->_operands[2].reset(); } @@ -1394,8 +1480,9 @@ Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, siz out->_operands[0].reset(W, size0); out->_operands[1].reset(R, size1); - if (BaseReg::isVec(operands[0])) + if (BaseReg::isVec(operands[0])) { rwZeroExtendAvxVec(out->_operands[0], operands[0].as()); + } if (operands[0].isReg() && operands[1].isReg()) { if (instRmInfo.rmOpsMask & 0x1) { @@ -1447,8 +1534,9 @@ static RegAnalysis InstInternal_regAnalysis(const Operand_* operands, size_t opC if (op.isReg()) { const BaseReg& reg = op.as(); mask |= Support::bitMask(reg.type()); - if (reg.isVec()) + if (reg.isVec()) { highVecUsed |= uint32_t(reg.id() >= 16 && reg.id() < 32); + } } else if (op.isMem()) { const BaseMem& mem = op.as(); @@ -1472,7 +1560,7 @@ static inline uint32_t InstInternal_usesAvx512(InstOptions instOptions, const Re } Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept { - typedef CpuFeatures::X86 Ext; + using Ext = CpuFeatures::X86; // Only called when `arch` matches X86 family. DebugUtils::unused(arch); @@ -1482,8 +1570,9 @@ Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, s InstId instId = inst.id(); InstOptions options = inst.options(); - if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId))) + if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId))) { return DebugUtils::errored(kErrorInvalidInstruction); + } const InstDB::InstInfo& instInfo = InstDB::infoById(instId); const InstDB::AdditionalInfo& additionalInfo = InstDB::_additionalInfoTable[instInfo._additionalInfoIndex]; @@ -1495,8 +1584,9 @@ Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, s out->reset(); do { uint32_t feature = fData[0]; - if (!feature) + if (!feature) { break; + } out->add(feature); } while (++fData != fEnd); @@ -1555,21 +1645,19 @@ Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, s // Special case: VBROADCASTSS and VBROADCASTSD were introduced in AVX, but only version that uses memory as a // source operand. AVX2 then added support for register source operand. if (instId == Inst::kIdVbroadcastss || instId == Inst::kIdVbroadcastsd) { - if (opCount > 1 && operands[1].isMem()) + if (opCount > 1 && operands[1].isMem()) { isAVX2 = false; + } } else { // AVX instruction set doesn't support integer operations on YMM registers as these were later introcuced by // AVX2. In our case we have to check if YMM register(s) are in use and if that is the case this is an AVX2 // instruction. - if (!(regAnalysis.regTypeMask & Support::bitMask(RegType::kX86_Ymm, RegType::kX86_Zmm))) + if (!(regAnalysis.regTypeMask & Support::bitMask(RegType::kX86_Ymm, RegType::kX86_Zmm))) { isAVX2 = false; + } } - - if (isAVX2) - out->remove(Ext::kAVX); - else - out->remove(Ext::kAVX2); + out->remove(isAVX2 ? Ext::kAVX : Ext::kAVX2); } // Handle AVX vs AVX512 overlap. @@ -1582,15 +1670,13 @@ Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, s Ext::kAVX_VNNI, Ext::kAVX2, Ext::kF16C, - Ext::kFMA) - && + Ext::kFMA) && out->hasAny(Ext::kAVX512_BF16, Ext::kAVX512_BW, Ext::kAVX512_DQ, Ext::kAVX512_F, Ext::kAVX512_IFMA, Ext::kAVX512_VNNI)) { - uint32_t useEvex = InstInternal_usesAvx512(options, inst.extraReg(), regAnalysis) | regAnalysis.highVecUsed; switch (instId) { // Special case: VPBROADCAST[B|D|Q|W] only supports r32/r64 with EVEX prefix. @@ -1670,8 +1756,9 @@ Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, s } // Clear AVX512_VL if ZMM register is used. - if (regAnalysis.hasRegType(RegType::kX86_Zmm)) + if (regAnalysis.hasRegType(RegType::kX86_Zmm)) { out->remove(Ext::kAVX512_VL); + } } return kErrorOk; diff --git a/src/asmjit/x86/x86instdb.h b/src/asmjit/x86/x86instdb.h index dd282b2..d5901fc 100644 --- a/src/asmjit/x86/x86instdb.h +++ b/src/asmjit/x86/x86instdb.h @@ -30,7 +30,7 @@ enum class Mode : uint8_t { ASMJIT_DEFINE_ENUM_FLAGS(Mode) //! Converts architecture to operation mode, see \ref Mode. -static ASMJIT_INLINE_NODEBUG constexpr Mode modeFromArch(Arch arch) noexcept { +static ASMJIT_INLINE_CONSTEXPR Mode modeFromArch(Arch arch) noexcept { return arch == Arch::kX86 ? Mode::kX86 : arch == Arch::kX64 ? Mode::kX64 : Mode::kNone; } @@ -127,26 +127,39 @@ struct OpSignature { //! \{ //! Returns operand signature flags. + [[nodiscard]] inline OpFlags flags() const noexcept { return (OpFlags)_flags; } //! Tests whether the given `flag` is set. + [[nodiscard]] inline bool hasFlag(OpFlags flag) const noexcept { return (_flags & uint64_t(flag)) != 0; } //! Tests whether this signature contains at least one register operand of any type. + [[nodiscard]] inline bool hasReg() const noexcept { return hasFlag(OpFlags::kRegMask); } + //! Tests whether this signature contains at least one scalar memory operand of any type. + [[nodiscard]] inline bool hasMem() const noexcept { return hasFlag(OpFlags::kMemMask); } + //! Tests whether this signature contains at least one vector memory operand of any type. + [[nodiscard]] inline bool hasVm() const noexcept { return hasFlag(OpFlags::kVmMask); } + //! Tests whether this signature contains at least one immediate operand of any type. + [[nodiscard]] inline bool hasImm() const noexcept { return hasFlag(OpFlags::kImmMask); } + //! Tests whether this signature contains at least one relative displacement operand of any type. + [[nodiscard]] inline bool hasRel() const noexcept { return hasFlag(OpFlags::kRelMask); } //! Tests whether the operand is implicit. + [[nodiscard]] inline bool isImplicit() const noexcept { return hasFlag(OpFlags::kFlagImplicit); } //! Returns a physical register mask. + [[nodiscard]] inline RegMask regMask() const noexcept { return _regMask; } //! \} @@ -179,30 +192,41 @@ struct InstSignature { //! \{ //! Returns instruction operation mode. + [[nodiscard]] inline Mode mode() const noexcept { return (Mode)_mode; } + //! Tests whether the instruction supports the given operating mode. + [[nodiscard]] inline bool supportsMode(Mode mode) const noexcept { return (uint8_t(_mode) & uint8_t(mode)) != 0; } //! Returns the number of operands of this signature. + [[nodiscard]] inline uint32_t opCount() const noexcept { return _opCount; } + //! Returns the number of implicit operands this signature has. + [[nodiscard]] inline uint32_t implicitOpCount() const noexcept { return _implicitOpCount; } + //! Tests whether this instruction signature has at least one implicit operand. + [[nodiscard]] inline bool hasImplicitOperands() const noexcept { return _implicitOpCount != 0; } //! Returns indexes to \ref _opSignatureTable for each operand of the instruction. //! //! \note The returned array always provides indexes for all operands (see \ref Globals::kMaxOpCount) even if the //! instruction provides less operands. Undefined operands have always index of zero. + [[nodiscard]] inline const uint8_t* opSignatureIndexes() const noexcept { return _opSignatureIndexes; } //! Returns index to \ref _opSignatureTable, corresponding to the requested operand `index` of the instruction. + [[nodiscard]] inline uint8_t opSignatureIndex(size_t index) const noexcept { ASMJIT_ASSERT(index < Globals::kMaxOpCount); return _opSignatureIndexes[index]; } //! Returns \ref OpSignature corresponding to the requested operand `index` of the instruction. + [[nodiscard]] inline const OpSignature& opSignature(size_t index) const noexcept { ASMJIT_ASSERT(index < Globals::kMaxOpCount); return _opSignatureTable[_opSignatureIndexes[index]]; @@ -345,91 +369,158 @@ struct CommonInfo { //! \{ //! Returns instruction flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstFlags flags() const noexcept { return (InstFlags)_flags; } + //! Tests whether the instruction has a `flag`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(InstFlags flag) const noexcept { return Support::test(_flags, flag); } //! Returns instruction AVX-512 flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Avx512Flags avx512Flags() const noexcept { return (Avx512Flags)_avx512Flags; } + //! Tests whether the instruction has an AVX-512 `flag`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512Flag(Avx512Flags flag) const noexcept { return Support::test(_avx512Flags, flag); } //! Tests whether the instruction is FPU instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFpu() const noexcept { return hasFlag(InstFlags::kFpu); } + //! Tests whether the instruction is MMX/3DNOW instruction that accesses MMX registers (includes EMMS and FEMMS). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMmx() const noexcept { return hasFlag(InstFlags::kMmx); } + //! Tests whether the instruction is SSE|AVX|AVX512 instruction that accesses XMM|YMM|ZMM registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVec() const noexcept { return hasFlag(InstFlags::kVec); } + //! Tests whether the instruction is SSE+ (SSE4.2, AES, SHA included) instruction that accesses XMM registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isSse() const noexcept { return (flags() & (InstFlags::kVec | InstFlags::kVex | InstFlags::kEvex)) == InstFlags::kVec; } + //! Tests whether the instruction is AVX+ (FMA included) instruction that accesses XMM|YMM|ZMM registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAvx() const noexcept { return isVec() && isVexOrEvex(); } //! Tests whether the instruction can be prefixed with LOCK prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasLockPrefix() const noexcept { return hasFlag(InstFlags::kLock); } + //! Tests whether the instruction can be prefixed with REP (REPE|REPZ) prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRepPrefix() const noexcept { return hasFlag(InstFlags::kRep); } + //! Tests whether the instruction can be prefixed with XACQUIRE prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasXAcquirePrefix() const noexcept { return hasFlag(InstFlags::kXAcquire); } + //! Tests whether the instruction can be prefixed with XRELEASE prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasXReleasePrefix() const noexcept { return hasFlag(InstFlags::kXRelease); } //! Tests whether the rep prefix is supported by the instruction, but ignored (has no effect). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isRepIgnored() const noexcept { return hasFlag(InstFlags::kRepIgnored); } + //! Tests whether the instruction uses MIB. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMibOp() const noexcept { return hasFlag(InstFlags::kMib); } + //! Tests whether the instruction uses VSIB. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVsibOp() const noexcept { return hasFlag(InstFlags::kVsib); } + //! Tests whether the instruction uses TSIB (AMX, instruction requires MOD+SIB). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isTsibOp() const noexcept { return hasFlag(InstFlags::kTsib); } + //! Tests whether the instruction uses VEX (can be set together with EVEX if both are encodable). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVex() const noexcept { return hasFlag(InstFlags::kVex); } + //! Tests whether the instruction uses EVEX (can be set together with VEX if both are encodable). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvex() const noexcept { return hasFlag(InstFlags::kEvex); } + //! Tests whether the instruction uses EVEX (can be set together with VEX if both are encodable). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVexOrEvex() const noexcept { return hasFlag(InstFlags::kVex | InstFlags::kEvex); } //! Tests whether the instruction should prefer EVEX prefix instead of VEX prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool preferEvex() const noexcept { return hasFlag(InstFlags::kPreferEvex); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvexCompatible() const noexcept { return hasFlag(InstFlags::kEvexCompat); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvexKRegOnly() const noexcept { return hasFlag(InstFlags::kEvexKReg); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvexTwoOpOnly() const noexcept { return hasFlag(InstFlags::kEvexTwoOp); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvexTransformable() const noexcept { return hasFlag(InstFlags::kEvexTransformable); } //! Tests whether the instruction supports AVX512 masking {k}. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512K() const noexcept { return hasAvx512Flag(Avx512Flags::kK); } + //! Tests whether the instruction supports AVX512 zeroing {k}{z}. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512Z() const noexcept { return hasAvx512Flag(Avx512Flags::kZ); } + //! Tests whether the instruction supports AVX512 embedded-rounding {er}. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512ER() const noexcept { return hasAvx512Flag(Avx512Flags::kER); } + //! Tests whether the instruction supports AVX512 suppress-all-exceptions {sae}. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512SAE() const noexcept { return hasAvx512Flag(Avx512Flags::kSAE); } + //! Tests whether the instruction supports AVX512 broadcast (either 32-bit or 64-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512B() const noexcept { return hasAvx512Flag(Avx512Flags::kB16 | Avx512Flags::kB32 | Avx512Flags::kB64); } + //! Tests whether the instruction supports AVX512 broadcast (16-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512B16() const noexcept { return hasAvx512Flag(Avx512Flags::kB16); } + //! Tests whether the instruction supports AVX512 broadcast (32-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512B32() const noexcept { return hasAvx512Flag(Avx512Flags::kB32); } + //! Tests whether the instruction supports AVX512 broadcast (64-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512B64() const noexcept { return hasAvx512Flag(Avx512Flags::kB64); } // Returns the size of the broadcast - either 2, 4, or 8, or 0 if broadcast is not supported. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t broadcastSize() const noexcept { constexpr uint32_t kShift = Support::ConstCTZ::value; return (uint32_t(_avx512Flags) & uint32_t(Avx512Flags::kB16 | Avx512Flags::kB32 | Avx512Flags::kB64)) >> (kShift - 1); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t signatureIndex() const noexcept { return _iSignatureIndex; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t signatureCount() const noexcept { return _iSignatureCount; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG const InstSignature* signatureData() const noexcept { return _instSignatureTable + _iSignatureIndex; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG const InstSignature* signatureEnd() const noexcept { return _instSignatureTable + _iSignatureIndex + _iSignatureCount; } //! Returns a control flow category of the instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstControlFlow controlFlow() const noexcept { return (InstControlFlow)_controlFlow; } //! Returns a hint that can be used when both inputs are the same register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstSameRegHint sameRegHint() const noexcept { return (InstSameRegHint)_sameRegHint; } //! \} @@ -459,86 +550,151 @@ struct InstInfo { //! \{ //! Returns common information, see \ref CommonInfo. + [[nodiscard]] ASMJIT_INLINE_NODEBUG const CommonInfo& commonInfo() const noexcept { return _commonInfoTable[_commonInfoIndex]; } //! Returns instruction flags, see \ref InstFlags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstFlags flags() const noexcept { return commonInfo().flags(); } + //! Tests whether the instruction has flag `flag`, see \ref InstFlags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasFlag(InstFlags flag) const noexcept { return commonInfo().hasFlag(flag); } //! Returns instruction AVX-512 flags, see \ref Avx512Flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Avx512Flags avx512Flags() const noexcept { return commonInfo().avx512Flags(); } + //! Tests whether the instruction has an AVX-512 `flag`, see \ref Avx512Flags. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512Flag(Avx512Flags flag) const noexcept { return commonInfo().hasAvx512Flag(flag); } //! Tests whether the instruction is FPU instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isFpu() const noexcept { return commonInfo().isFpu(); } + //! Tests whether the instruction is MMX/3DNOW instruction that accesses MMX registers (includes EMMS and FEMMS). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMmx() const noexcept { return commonInfo().isMmx(); } + //! Tests whether the instruction is SSE|AVX|AVX512 instruction that accesses XMM|YMM|ZMM registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVec() const noexcept { return commonInfo().isVec(); } + //! Tests whether the instruction is SSE+ (SSE4.2, AES, SHA included) instruction that accesses XMM registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isSse() const noexcept { return commonInfo().isSse(); } + //! Tests whether the instruction is AVX+ (FMA included) instruction that accesses XMM|YMM|ZMM registers. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isAvx() const noexcept { return commonInfo().isAvx(); } //! Tests whether the instruction can be prefixed with LOCK prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasLockPrefix() const noexcept { return commonInfo().hasLockPrefix(); } + //! Tests whether the instruction can be prefixed with REP (REPE|REPZ) prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasRepPrefix() const noexcept { return commonInfo().hasRepPrefix(); } + //! Tests whether the instruction can be prefixed with XACQUIRE prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasXAcquirePrefix() const noexcept { return commonInfo().hasXAcquirePrefix(); } + //! Tests whether the instruction can be prefixed with XRELEASE prefix. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasXReleasePrefix() const noexcept { return commonInfo().hasXReleasePrefix(); } //! Tests whether the rep prefix is supported by the instruction, but ignored (has no effect). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isRepIgnored() const noexcept { return commonInfo().isRepIgnored(); } + //! Tests whether the instruction uses MIB. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isMibOp() const noexcept { return hasFlag(InstFlags::kMib); } + //! Tests whether the instruction uses VSIB. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVsibOp() const noexcept { return hasFlag(InstFlags::kVsib); } + //! Tests whether the instruction uses VEX (can be set together with EVEX if both are encodable). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVex() const noexcept { return hasFlag(InstFlags::kVex); } + //! Tests whether the instruction uses EVEX (can be set together with VEX if both are encodable). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvex() const noexcept { return hasFlag(InstFlags::kEvex); } + //! Tests whether the instruction uses EVEX (can be set together with VEX if both are encodable). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isVexOrEvex() const noexcept { return hasFlag(InstFlags::kVex | InstFlags::kEvex); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvexCompatible() const noexcept { return hasFlag(InstFlags::kEvexCompat); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvexKRegOnly() const noexcept { return hasFlag(InstFlags::kEvexKReg); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvexTwoOpOnly() const noexcept { return hasFlag(InstFlags::kEvexTwoOp); } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool isEvexTransformable() const noexcept { return hasFlag(InstFlags::kEvexTransformable); } //! Tests whether the instruction supports AVX512 masking {k}. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512K() const noexcept { return hasAvx512Flag(Avx512Flags::kK); } + //! Tests whether the instruction supports AVX512 zeroing {k}{z}. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512Z() const noexcept { return hasAvx512Flag(Avx512Flags::kZ); } + //! Tests whether the instruction supports AVX512 embedded-rounding {er}. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512ER() const noexcept { return hasAvx512Flag(Avx512Flags::kER); } + //! Tests whether the instruction supports AVX512 suppress-all-exceptions {sae}. + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512SAE() const noexcept { return hasAvx512Flag(Avx512Flags::kSAE); } + //! Tests whether the instruction supports AVX512 broadcast (either 32-bit or 64-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512B() const noexcept { return hasAvx512Flag(Avx512Flags::kB16 | Avx512Flags::kB32 | Avx512Flags::kB64); } + //! Tests whether the instruction supports AVX512 broadcast (16-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512B16() const noexcept { return hasAvx512Flag(Avx512Flags::kB16); } + //! Tests whether the instruction supports AVX512 broadcast (32-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512B32() const noexcept { return hasAvx512Flag(Avx512Flags::kB32); } + //! Tests whether the instruction supports AVX512 broadcast (64-bit). + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool hasAvx512B64() const noexcept { return hasAvx512Flag(Avx512Flags::kB64); } //! Returns a control flow category of the instruction. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstControlFlow controlFlow() const noexcept { return commonInfo().controlFlow(); } + //! Returns a hint that can be used when both inputs are the same register. + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstSameRegHint sameRegHint() const noexcept { return commonInfo().sameRegHint(); } //! Returns the beginning of the instruction signature list relative to \ref _instSignatureTable. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t signatureIndex() const noexcept { return commonInfo().signatureIndex(); } + //! Returns the number of instruction signature entries. + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t signatureCount() const noexcept { return commonInfo().signatureCount(); } //! Returns the beginning of instruction signature data (relative to \ref _instSignatureTable). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const InstSignature* signatureData() const noexcept { return commonInfo().signatureData(); } + //! Returns the end of instruction signature data (relative to \ref _instSignatureTable). + [[nodiscard]] ASMJIT_INLINE_NODEBUG const InstSignature* signatureEnd() const noexcept { return commonInfo().signatureEnd(); } //! \} @@ -546,6 +702,7 @@ struct InstInfo { ASMJIT_VARAPI const InstInfo _instInfoTable[]; +[[nodiscard]] static inline const InstInfo& infoById(InstId instId) noexcept { ASMJIT_ASSERT(Inst::isDefinedId(instId)); return _instInfoTable[instId]; diff --git a/src/asmjit/x86/x86operand.h b/src/asmjit/x86/x86operand.h index 8510a93..75125cf 100644 --- a/src/asmjit/x86/x86operand.h +++ b/src/asmjit/x86/x86operand.h @@ -77,55 +77,96 @@ public: ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg) //! Tests whether the register is a GPB register (8-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpb() const noexcept { return size() == 1; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGpb() const noexcept { return size() == 1; } + //! Tests whether the register is a low GPB register (8-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpbLo() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGpbLo() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a high GPB register (8-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpbHi() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGpbHi() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a GPW register (16-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpw() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGpw() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a GPD register (32-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpd() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGpd() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a GPQ register (64-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isGpq() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGpq() const noexcept { return hasBaseSignature(RegTraits::kSignature); } //! Tests whether the register is a 32-bit general purpose register, alias of \ref isGpd(). - ASMJIT_INLINE_NODEBUG constexpr bool isGp32() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGp32() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a 64-bit general purpose register, alias of \ref isGpq() - ASMJIT_INLINE_NODEBUG constexpr bool isGp64() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isGp64() const noexcept { return hasBaseSignature(RegTraits::kSignature); } //! Tests whether the register is an XMM register (128-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isXmm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isXmm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a YMM register (256-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isYmm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isYmm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a ZMM register (512-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isZmm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isZmm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } //! Tests whether the register is a 128-bit vector register, alias of \ref isXmm(). - ASMJIT_INLINE_NODEBUG constexpr bool isVec128() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec128() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a 256-bit vector register, alias of \ref isYmm(). - ASMJIT_INLINE_NODEBUG constexpr bool isVec256() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec256() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a 512-bit vector register, alias of \ref isZmm(). - ASMJIT_INLINE_NODEBUG constexpr bool isVec512() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isVec512() const noexcept { return hasBaseSignature(RegTraits::kSignature); } //! Tests whether the register is an MMX register (64-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isMm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isMm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a K register (64-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isKReg() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isKReg() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a segment register. - ASMJIT_INLINE_NODEBUG constexpr bool isSReg() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isSReg() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a control register. - ASMJIT_INLINE_NODEBUG constexpr bool isCReg() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isCReg() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a debug register. - ASMJIT_INLINE_NODEBUG constexpr bool isDReg() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isDReg() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is an FPU register (80-bit). - ASMJIT_INLINE_NODEBUG constexpr bool isSt() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isSt() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a bound register. - ASMJIT_INLINE_NODEBUG constexpr bool isBnd() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isBnd() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is a TMM register. - ASMJIT_INLINE_NODEBUG constexpr bool isTmm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isTmm() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + //! Tests whether the register is RIP. - ASMJIT_INLINE_NODEBUG constexpr bool isRip() const noexcept { return hasBaseSignature(RegTraits::kSignature); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isRip() const noexcept { return hasBaseSignature(RegTraits::kSignature); } template ASMJIT_INLINE_NODEBUG void setRegT(uint32_t rId) noexcept { @@ -138,25 +179,35 @@ public: setId(id); } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG RegGroup groupOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kX86).regTypeToGroup(type); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG TypeId typeIdOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kX86).regTypeToTypeId(type); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG OperandSignature signatureOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kX86).regTypeToSignature(type); } template + [[nodiscard]] static ASMJIT_INLINE_NODEBUG RegGroup groupOfT() noexcept { return RegGroup(RegTraits::kGroup); } template + [[nodiscard]] static ASMJIT_INLINE_NODEBUG TypeId typeIdOfT() noexcept { return TypeId(RegTraits::kTypeId); } template + [[nodiscard]] static ASMJIT_INLINE_NODEBUG OperandSignature signatureOfT() noexcept { return OperandSignature{RegTraits::kSignature}; } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG OperandSignature signatureOfVecByType(TypeId typeId) noexcept { return OperandSignature{typeId <= TypeId::_kVec128End ? uint32_t(RegTraits::kSignature) : typeId <= TypeId::_kVec256End ? uint32_t(RegTraits::kSignature) : uint32_t(RegTraits::kSignature)}; } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG OperandSignature signatureOfVecBySize(uint32_t size) noexcept { return OperandSignature{size <= 16 ? uint32_t(RegTraits::kSignature) : size <= 32 ? uint32_t(RegTraits::kSignature) : @@ -164,47 +215,116 @@ public: } //! Tests whether the `op` operand is either a low or high 8-bit GPB register. + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpb(const Operand_& op) noexcept { // Check operand type, register group, and size. Not interested in register type. return op.signature().subset(Signature::kOpTypeMask | Signature::kRegGroupMask | Signature::kSizeMask) == (Signature::fromOpType(OperandType::kReg) | Signature::fromRegGroup(RegGroup::kGp) | Signature::fromSize(1)); } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpbLo(const Operand_& op) noexcept { return op.as().isGpbLo(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpbHi(const Operand_& op) noexcept { return op.as().isGpbHi(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpw(const Operand_& op) noexcept { return op.as().isGpw(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpd(const Operand_& op) noexcept { return op.as().isGpd(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpq(const Operand_& op) noexcept { return op.as().isGpq(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isXmm(const Operand_& op) noexcept { return op.as().isXmm(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isYmm(const Operand_& op) noexcept { return op.as().isYmm(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isZmm(const Operand_& op) noexcept { return op.as().isZmm(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isMm(const Operand_& op) noexcept { return op.as().isMm(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isKReg(const Operand_& op) noexcept { return op.as().isKReg(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isSReg(const Operand_& op) noexcept { return op.as().isSReg(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isCReg(const Operand_& op) noexcept { return op.as().isCReg(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isDReg(const Operand_& op) noexcept { return op.as().isDReg(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isSt(const Operand_& op) noexcept { return op.as().isSt(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isBnd(const Operand_& op) noexcept { return op.as().isBnd(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isTmm(const Operand_& op) noexcept { return op.as().isTmm(); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isRip(const Operand_& op) noexcept { return op.as().isRip(); } + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpb(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isGpb(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpbLo(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isGpbLo(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpbHi(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isGpbHi(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpw(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isGpw(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpd(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isGpd(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isGpq(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isGpq(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isXmm(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isXmm(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isYmm(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isYmm(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isZmm(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isZmm(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isMm(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isMm(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isKReg(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isKReg(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isSReg(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isSReg(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isCReg(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isCReg(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isDReg(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isDReg(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isSt(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isSt(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isBnd(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isBnd(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isTmm(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isTmm(op)) & unsigned(op.id() == rId)); } + + [[nodiscard]] static ASMJIT_INLINE_NODEBUG bool isRip(const Operand_& op, uint32_t rId) noexcept { return bool(unsigned(isRip(op)) & unsigned(op.id() == rId)); } }; @@ -237,16 +357,27 @@ public: }; //! Casts this register to 8-bit (LO) part. + [[nodiscard]] ASMJIT_INLINE_NODEBUG GpbLo r8() const noexcept; + //! Casts this register to 8-bit (LO) part. + [[nodiscard]] ASMJIT_INLINE_NODEBUG GpbLo r8Lo() const noexcept; + //! Casts this register to 8-bit (HI) part. + [[nodiscard]] ASMJIT_INLINE_NODEBUG GpbHi r8Hi() const noexcept; + //! Casts this register to 16-bit. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Gpw r16() const noexcept; + //! Casts this register to 32-bit. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Gpd r32() const noexcept; + //! Casts this register to 64-bit. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Gpq r64() const noexcept; }; @@ -255,20 +386,31 @@ class Vec : public Reg { ASMJIT_DEFINE_ABSTRACT_REG(Vec, Reg) //! Casts this register to XMM (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Xmm xmm() const noexcept; + //! Casts this register to YMM (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Ymm ymm() const noexcept; + //! Casts this register to ZMM (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Zmm zmm() const noexcept; //! Casts this register to XMM (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec v128() const noexcept; + //! Casts this register to YMM (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec v256() const noexcept; + //! Casts this register to ZMM (clone). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec v512() const noexcept; //! Casts this register to a register that has half the size (or XMM if it's already XMM). + [[nodiscard]] ASMJIT_INLINE_NODEBUG Vec half() const noexcept { return Vec(type() == RegType::kX86_Zmm ? signatureOfT() : signatureOfT(), id()); } @@ -380,37 +522,68 @@ namespace regs { #endif //! Creates an 8-bit low GPB register operand. -static ASMJIT_INLINE_NODEBUG constexpr GpbLo gpb(uint32_t rId) noexcept { return GpbLo(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR GpbLo gpb(uint32_t rId) noexcept { return GpbLo(rId); } + //! Creates an 8-bit low GPB register operand. -static ASMJIT_INLINE_NODEBUG constexpr GpbLo gpb_lo(uint32_t rId) noexcept { return GpbLo(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR GpbLo gpb_lo(uint32_t rId) noexcept { return GpbLo(rId); } + //! Creates an 8-bit high GPB register operand. -static ASMJIT_INLINE_NODEBUG constexpr GpbHi gpb_hi(uint32_t rId) noexcept { return GpbHi(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR GpbHi gpb_hi(uint32_t rId) noexcept { return GpbHi(rId); } + //! Creates a 16-bit GPW register operand. -static ASMJIT_INLINE_NODEBUG constexpr Gpw gpw(uint32_t rId) noexcept { return Gpw(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Gpw gpw(uint32_t rId) noexcept { return Gpw(rId); } + //! Creates a 32-bit GPD register operand. -static ASMJIT_INLINE_NODEBUG constexpr Gpd gpd(uint32_t rId) noexcept { return Gpd(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Gpd gpd(uint32_t rId) noexcept { return Gpd(rId); } + //! Creates a 64-bit GPQ register operand (64-bit). -static ASMJIT_INLINE_NODEBUG constexpr Gpq gpq(uint32_t rId) noexcept { return Gpq(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Gpq gpq(uint32_t rId) noexcept { return Gpq(rId); } + //! Creates a 128-bit XMM register operand. -static ASMJIT_INLINE_NODEBUG constexpr Xmm xmm(uint32_t rId) noexcept { return Xmm(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Xmm xmm(uint32_t rId) noexcept { return Xmm(rId); } + //! Creates a 256-bit YMM register operand. -static ASMJIT_INLINE_NODEBUG constexpr Ymm ymm(uint32_t rId) noexcept { return Ymm(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Ymm ymm(uint32_t rId) noexcept { return Ymm(rId); } + //! Creates a 512-bit ZMM register operand. -static ASMJIT_INLINE_NODEBUG constexpr Zmm zmm(uint32_t rId) noexcept { return Zmm(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Zmm zmm(uint32_t rId) noexcept { return Zmm(rId); } + //! Creates a 64-bit Mm register operand. -static ASMJIT_INLINE_NODEBUG constexpr Mm mm(uint32_t rId) noexcept { return Mm(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mm mm(uint32_t rId) noexcept { return Mm(rId); } + //! Creates a 64-bit K register operand. -static ASMJIT_INLINE_NODEBUG constexpr KReg k(uint32_t rId) noexcept { return KReg(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR KReg k(uint32_t rId) noexcept { return KReg(rId); } + //! Creates a 32-bit or 64-bit control register operand. -static ASMJIT_INLINE_NODEBUG constexpr CReg cr(uint32_t rId) noexcept { return CReg(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR CReg cr(uint32_t rId) noexcept { return CReg(rId); } + //! Creates a 32-bit or 64-bit debug register operand. -static ASMJIT_INLINE_NODEBUG constexpr DReg dr(uint32_t rId) noexcept { return DReg(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR DReg dr(uint32_t rId) noexcept { return DReg(rId); } + //! Creates an 80-bit st register operand. -static ASMJIT_INLINE_NODEBUG constexpr St st(uint32_t rId) noexcept { return St(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR St st(uint32_t rId) noexcept { return St(rId); } + //! Creates a 128-bit bound register operand. -static ASMJIT_INLINE_NODEBUG constexpr Bnd bnd(uint32_t rId) noexcept { return Bnd(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Bnd bnd(uint32_t rId) noexcept { return Bnd(rId); } + //! Creates a TMM register operand. -static ASMJIT_INLINE_NODEBUG constexpr Tmm tmm(uint32_t rId) noexcept { return Tmm(rId); } +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Tmm tmm(uint32_t rId) noexcept { return Tmm(rId); } static constexpr GpbLo al = GpbLo(Gp::kIdAx); static constexpr GpbLo bl = GpbLo(Gp::kIdBx); @@ -682,28 +855,25 @@ public: //! \name Constants //! \{ - //! Additional bits of operand's signature used by `x86::Mem`. - enum AdditionalBits : uint32_t { - // Memory address type (2 bits). - // |........|........|XX......|........| - kSignatureMemAddrTypeShift = 14, - kSignatureMemAddrTypeMask = 0x03u << kSignatureMemAddrTypeShift, + // Memory address type (2 bits). + // |........|........|XX......|........| + static inline constexpr uint32_t kSignatureMemAddrTypeShift = 14; + static inline constexpr uint32_t kSignatureMemAddrTypeMask = 0x03u << kSignatureMemAddrTypeShift; - // Memory shift amount (2 bits). - // |........|......XX|........|........| - kSignatureMemShiftValueShift = 16, - kSignatureMemShiftValueMask = 0x03u << kSignatureMemShiftValueShift, + // Memory shift amount (2 bits). + // |........|......XX|........|........| + static inline constexpr uint32_t kSignatureMemShiftValueShift = 16; + static inline constexpr uint32_t kSignatureMemShiftValueMask = 0x03u << kSignatureMemShiftValueShift; - // Memory segment reg (3 bits). - // |........|...XXX..|........|........| - kSignatureMemSegmentShift = 18, - kSignatureMemSegmentMask = 0x07u << kSignatureMemSegmentShift, + // Memory segment reg (3 bits). + // |........|...XXX..|........|........| + static inline constexpr uint32_t kSignatureMemSegmentShift = 18; + static inline constexpr uint32_t kSignatureMemSegmentMask = 0x07u << kSignatureMemSegmentShift; - // Memory broadcast type (3 bits). - // |........|XXX.....|........|........| - kSignatureMemBroadcastShift = 21, - kSignatureMemBroadcastMask = 0x7u << kSignatureMemBroadcastShift - }; + // Memory broadcast type (3 bits). + // |........|XXX.....|........|........| + static inline constexpr uint32_t kSignatureMemBroadcastShift = 21; + static inline constexpr uint32_t kSignatureMemBroadcastMask = 0x7u << kSignatureMemBroadcastShift; //! Address type. enum class AddrType : uint32_t { @@ -745,25 +915,25 @@ public: //! \{ //! Creates a default `Mem` operand that points to [0]. - ASMJIT_INLINE_NODEBUG constexpr Mem() noexcept + ASMJIT_INLINE_CONSTEXPR Mem() noexcept : BaseMem() {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const Mem& other) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const Mem& other) noexcept : BaseMem(other) {} ASMJIT_INLINE_NODEBUG explicit Mem(Globals::NoInit_) noexcept : BaseMem(Globals::NoInit) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept : BaseMem(signature, baseId, indexId, offset) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const Label& base, int32_t off, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const Label& base, int32_t off, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(RegType::kLabelTag) | Signature::fromSize(size) | signature, base.id(), 0, off) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const Label& base, const BaseReg& index, uint32_t shift, int32_t off, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const Label& base, const BaseReg& index, uint32_t shift, int32_t off, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(RegType::kLabelTag) | Signature::fromMemIndexType(index.type()) | @@ -771,13 +941,13 @@ public: Signature::fromSize(size) | signature, base.id(), index.id(), off) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, int32_t off, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const BaseReg& base, int32_t off, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(base.type()) | Signature::fromSize(size) | signature, base.id(), 0, off) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, uint32_t shift, int32_t off, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(const BaseReg& base, const BaseReg& index, uint32_t shift, int32_t off, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemBaseType(base.type()) | Signature::fromMemIndexType(index.type()) | @@ -785,12 +955,12 @@ public: Signature::fromSize(size) | signature, base.id(), index.id(), off) {} - ASMJIT_INLINE_NODEBUG constexpr explicit Mem(uint64_t base, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR explicit Mem(uint64_t base, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromSize(size) | signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {} - ASMJIT_INLINE_NODEBUG constexpr Mem(uint64_t base, const BaseReg& index, uint32_t shift = 0, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept + ASMJIT_INLINE_CONSTEXPR Mem(uint64_t base, const BaseReg& index, uint32_t shift = 0, uint32_t size = 0, Signature signature = OperandSignature{0}) noexcept : BaseMem(Signature::fromOpType(OperandType::kMem) | Signature::fromMemIndexType(index.type()) | Signature::fromValue(shift) | @@ -810,24 +980,28 @@ public: //! \{ //! Clones the memory operand. - ASMJIT_INLINE_NODEBUG constexpr Mem clone() const noexcept { return Mem(*this); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem clone() const noexcept { return Mem(*this); } //! Creates a copy of this memory operand adjusted by `off`. - inline Mem cloneAdjusted(int64_t off) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem cloneAdjusted(int64_t off) const noexcept { Mem result(*this); result.addOffset(off); return result; } //! Creates a copy of this memory operand resized to `size`. - inline Mem cloneResized(uint32_t size) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem cloneResized(uint32_t size) const noexcept { Mem result(*this); result.setSize(size); return result; } //! Creates a copy of this memory operand with a broadcast `bcst`. - ASMJIT_INLINE_NODEBUG constexpr Mem cloneBroadcasted(Broadcast bcst) const noexcept { + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem cloneBroadcasted(Broadcast bcst) const noexcept { return Mem((_signature & ~Signature{kSignatureMemBroadcastMask}) | Signature::fromValue(bcst), _baseId, _data[0], int32_t(_data[1])); } @@ -839,16 +1013,18 @@ public: //! Converts memory `baseType` and `baseId` to `x86::Reg` instance. //! //! The memory must have a valid base register otherwise the result will be wrong. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Reg baseReg() const noexcept { return Reg::fromTypeAndId(baseType(), baseId()); } //! Converts memory `indexType` and `indexId` to `x86::Reg` instance. //! //! The memory must have a valid index register otherwise the result will be wrong. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Reg indexReg() const noexcept { return Reg::fromTypeAndId(indexType(), indexId()); } using BaseMem::setIndex; - ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index, uint32_t shift) noexcept { + ASMJIT_INLINE_CONSTEXPR void setIndex(const BaseReg& index, uint32_t shift) noexcept { setIndex(index); setShift(shift); } @@ -859,9 +1035,12 @@ public: //! \{ //! Tests whether the memory operand specifies a size (i.e. the size is not zero). - ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return _signature.hasField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasSize() const noexcept { return _signature.hasField(); } + //! Tests whether the memory operand size matches size `s`. - ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasSize(uint32_t s) const noexcept { return size() == s; } //! Returns the size of the memory operand in bytes. //! @@ -869,10 +1048,11 @@ public: //! returned value would be zero. However, some instruction require the size to select between multiple variations, //! so in some cases size is required would be non-zero (for example `inc [mem], immediate` requires size to //! distinguish between 8-bit, 16-bit, 32-bit, and 64-bit increments. - ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t size() const noexcept { return _signature.getField(); } //! Sets the memory operand size (in bytes). - ASMJIT_INLINE_NODEBUG void setSize(uint32_t size) noexcept { _signature.setField(size); } + ASMJIT_INLINE_CONSTEXPR void setSize(uint32_t size) noexcept { _signature.setField(size); } //! \} @@ -882,21 +1062,28 @@ public: //! Returns the address type of the memory operand. //! //! By default, address type of newly created memory operands is always \ref AddrType::kDefault. - ASMJIT_INLINE_NODEBUG constexpr AddrType addrType() const noexcept { return (AddrType)_signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR AddrType addrType() const noexcept { return (AddrType)_signature.getField(); } + //! Sets the address type to `addrType`. - ASMJIT_INLINE_NODEBUG void setAddrType(AddrType addrType) noexcept { _signature.setField(uint32_t(addrType)); } + ASMJIT_INLINE_CONSTEXPR void setAddrType(AddrType addrType) noexcept { _signature.setField(uint32_t(addrType)); } + //! Resets the address type to \ref AddrType::kDefault. - ASMJIT_INLINE_NODEBUG void resetAddrType() noexcept { _signature.setField(uint32_t(AddrType::kDefault)); } + ASMJIT_INLINE_CONSTEXPR void resetAddrType() noexcept { _signature.setField(uint32_t(AddrType::kDefault)); } //! Tests whether the address type is \ref AddrType::kAbs. - ASMJIT_INLINE_NODEBUG constexpr bool isAbs() const noexcept { return addrType() == AddrType::kAbs; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isAbs() const noexcept { return addrType() == AddrType::kAbs; } + //! Sets the address type to \ref AddrType::kAbs. - ASMJIT_INLINE_NODEBUG void setAbs() noexcept { setAddrType(AddrType::kAbs); } + ASMJIT_INLINE_CONSTEXPR void setAbs() noexcept { setAddrType(AddrType::kAbs); } //! Tests whether the address type is \ref AddrType::kRel. - ASMJIT_INLINE_NODEBUG constexpr bool isRel() const noexcept { return addrType() == AddrType::kRel; } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool isRel() const noexcept { return addrType() == AddrType::kRel; } + //! Sets the address type to \ref AddrType::kRel. - ASMJIT_INLINE_NODEBUG void setRel() noexcept { setAddrType(AddrType::kRel); } + ASMJIT_INLINE_CONSTEXPR void setRel() noexcept { setAddrType(AddrType::kRel); } //! \} @@ -904,18 +1091,25 @@ public: //! \{ //! Tests whether the memory operand has a segment override. - ASMJIT_INLINE_NODEBUG constexpr bool hasSegment() const noexcept { return _signature.hasField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasSegment() const noexcept { return _signature.hasField(); } + //! Returns the associated segment override as `SReg` operand. - ASMJIT_INLINE_NODEBUG constexpr SReg segment() const noexcept { return SReg(segmentId()); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR SReg segment() const noexcept { return SReg(segmentId()); } + //! Returns segment override register id, see `SReg::Id`. - ASMJIT_INLINE_NODEBUG constexpr uint32_t segmentId() const noexcept { return _signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t segmentId() const noexcept { return _signature.getField(); } //! Sets the segment override to `seg`. - ASMJIT_INLINE_NODEBUG void setSegment(const SReg& seg) noexcept { setSegment(seg.id()); } + ASMJIT_INLINE_CONSTEXPR void setSegment(const SReg& seg) noexcept { setSegment(seg.id()); } + //! Sets the segment override to `id`. - ASMJIT_INLINE_NODEBUG void setSegment(uint32_t rId) noexcept { _signature.setField(rId); } + ASMJIT_INLINE_CONSTEXPR void setSegment(uint32_t rId) noexcept { _signature.setField(rId); } + //! Resets the segment override. - ASMJIT_INLINE_NODEBUG void resetSegment() noexcept { _signature.setField(0); } + ASMJIT_INLINE_CONSTEXPR void resetSegment() noexcept { _signature.setField(0); } //! \} @@ -923,13 +1117,18 @@ public: //! \{ //! Tests whether the memory operand has shift (aka scale) value. - ASMJIT_INLINE_NODEBUG constexpr bool hasShift() const noexcept { return _signature.hasField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasShift() const noexcept { return _signature.hasField(); } + //! Returns the memory operand's shift (aka scale) value. - ASMJIT_INLINE_NODEBUG constexpr uint32_t shift() const noexcept { return _signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR uint32_t shift() const noexcept { return _signature.getField(); } + //! Sets the memory operand's shift (aka scale) value. - ASMJIT_INLINE_NODEBUG void setShift(uint32_t shift) noexcept { _signature.setField(shift); } + ASMJIT_INLINE_CONSTEXPR void setShift(uint32_t shift) noexcept { _signature.setField(shift); } + //! Resets the memory operand's shift (aka scale) value to zero. - ASMJIT_INLINE_NODEBUG void resetShift() noexcept { _signature.setField(0); } + ASMJIT_INLINE_CONSTEXPR void resetShift() noexcept { _signature.setField(0); } //! \} @@ -937,165 +1136,224 @@ public: //! \{ //! Tests whether the memory operand has broadcast {1tox}. - ASMJIT_INLINE_NODEBUG constexpr bool hasBroadcast() const noexcept { return _signature.hasField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR bool hasBroadcast() const noexcept { return _signature.hasField(); } + //! Returns the memory operand's broadcast. - ASMJIT_INLINE_NODEBUG constexpr Broadcast getBroadcast() const noexcept { return (Broadcast)_signature.getField(); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Broadcast getBroadcast() const noexcept { return (Broadcast)_signature.getField(); } + //! Sets the memory operand's broadcast. - ASMJIT_INLINE_NODEBUG void setBroadcast(Broadcast b) noexcept { _signature.setField(uint32_t(b)); } + ASMJIT_INLINE_CONSTEXPR void setBroadcast(Broadcast b) noexcept { _signature.setField(uint32_t(b)); } + //! Resets the memory operand's broadcast to none. - ASMJIT_INLINE_NODEBUG void resetBroadcast() noexcept { _signature.setField(0); } + ASMJIT_INLINE_CONSTEXPR void resetBroadcast() noexcept { _signature.setField(0); } //! Returns a new `Mem` without a broadcast (the possible broadcast is cleared). - ASMJIT_INLINE_NODEBUG constexpr Mem _1to1() const noexcept { return cloneBroadcasted(Broadcast::kNone); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem _1to1() const noexcept { return cloneBroadcasted(Broadcast::kNone); } + //! Returns a new `Mem` with {1to2} broadcast (AVX-512). - ASMJIT_INLINE_NODEBUG constexpr Mem _1to2() const noexcept { return cloneBroadcasted(Broadcast::k1To2); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem _1to2() const noexcept { return cloneBroadcasted(Broadcast::k1To2); } + //! Returns a new `Mem` with {1to4} broadcast (AVX-512). - ASMJIT_INLINE_NODEBUG constexpr Mem _1to4() const noexcept { return cloneBroadcasted(Broadcast::k1To4); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem _1to4() const noexcept { return cloneBroadcasted(Broadcast::k1To4); } + //! Returns a new `Mem` with {1to8} broadcast (AVX-512). - ASMJIT_INLINE_NODEBUG constexpr Mem _1to8() const noexcept { return cloneBroadcasted(Broadcast::k1To8); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem _1to8() const noexcept { return cloneBroadcasted(Broadcast::k1To8); } + //! Returns a new `Mem` with {1to16} broadcast (AVX-512). - ASMJIT_INLINE_NODEBUG constexpr Mem _1to16() const noexcept { return cloneBroadcasted(Broadcast::k1To16); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem _1to16() const noexcept { return cloneBroadcasted(Broadcast::k1To16); } + //! Returns a new `Mem` with {1to32} broadcast (AVX-512). - ASMJIT_INLINE_NODEBUG constexpr Mem _1to32() const noexcept { return cloneBroadcasted(Broadcast::k1To32); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem _1to32() const noexcept { return cloneBroadcasted(Broadcast::k1To32); } + //! Returns a new `Mem` with {1to64} broadcast (AVX-512). - ASMJIT_INLINE_NODEBUG constexpr Mem _1to64() const noexcept { return cloneBroadcasted(Broadcast::k1To64); } + [[nodiscard]] + ASMJIT_INLINE_CONSTEXPR Mem _1to64() const noexcept { return cloneBroadcasted(Broadcast::k1To64); } //! \} }; //! Creates `[base.reg + offset]` memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, int32_t offset = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, int32_t offset = 0, uint32_t size = 0) noexcept { return Mem(base, offset, size); } + //! Creates `[base.reg + (index << shift) + offset]` memory operand (scalar index). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, offset, size); } + //! Creates `[base.reg + (index << shift) + offset]` memory operand (vector index). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, offset, size); } //! Creates `[base + offset]` memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Label& base, int32_t offset = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Label& base, int32_t offset = 0, uint32_t size = 0) noexcept { return Mem(base, offset, size); } + //! Creates `[base + (index << shift) + offset]` memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Label& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Label& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, offset, size); } + //! Creates `[base + (index << shift) + offset]` memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Label& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Label& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, offset, size); } //! Creates `[rip + offset]` memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Rip& rip_, int32_t offset = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Rip& rip_, int32_t offset = 0, uint32_t size = 0) noexcept { return Mem(rip_, offset, size); } //! Creates `[base]` absolute memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(uint64_t base, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(uint64_t base, uint32_t size = 0) noexcept { return Mem(base, size); } + //! Creates `[base + (index.reg << shift)]` absolute memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, size); } + //! Creates `[base + (index.reg << shift)]` absolute memory operand. -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, size); } //! Creates `[base]` absolute memory operand (absolute). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_abs(uint64_t base, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_abs(uint64_t base, uint32_t size = 0) noexcept { return Mem(base, size, OperandSignature::fromValue(Mem::AddrType::kAbs)); } + //! Creates `[base + (index.reg << shift)]` absolute memory operand (absolute). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_abs(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_abs(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, size, OperandSignature::fromValue(Mem::AddrType::kAbs)); } + //! Creates `[base + (index.reg << shift)]` absolute memory operand (absolute). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_abs(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_abs(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, size, OperandSignature::fromValue(Mem::AddrType::kAbs)); } //! Creates `[base]` relative memory operand (relative). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_rel(uint64_t base, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_rel(uint64_t base, uint32_t size = 0) noexcept { return Mem(base, size, OperandSignature::fromValue(Mem::AddrType::kRel)); } + //! Creates `[base + (index.reg << shift)]` relative memory operand (relative). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_rel(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_rel(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, size, OperandSignature::fromValue(Mem::AddrType::kRel)); } + //! Creates `[base + (index.reg << shift)]` relative memory operand (relative). -static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_rel(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept { +[[nodiscard]] +static ASMJIT_INLINE_CONSTEXPR Mem ptr_rel(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept { return Mem(base, index, shift, size, OperandSignature::fromValue(Mem::AddrType::kRel)); } #define ASMJIT_MEM_PTR(FUNC, SIZE) \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ const Gp& base, int32_t offset = 0) noexcept \ { return Mem(base, offset, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ const Gp& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0) noexcept \ { return Mem(base, index, shift, offset, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ const Gp& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0) noexcept \ { return Mem(base, index, shift, offset, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ const Label& base, int32_t offset = 0) noexcept \ { return Mem(base, offset, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ const Label& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0) noexcept \ { return Mem(base, index, shift, offset, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ const Rip& rip_, int32_t offset = 0) noexcept \ { return Mem(rip_, offset, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ uint64_t base) noexcept \ { return Mem(base, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ uint64_t base, const Gp& index, uint32_t shift = 0) noexcept \ { return Mem(base, index, shift, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC( \ uint64_t base, const Vec& index, uint32_t shift = 0) noexcept \ { return Mem(base, index, shift, SIZE); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC##_abs( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC##_abs( \ uint64_t base) noexcept \ { return Mem(base, SIZE, \ OperandSignature::fromValue(Mem::AddrType::kAbs)); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC##_abs( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC##_abs( \ uint64_t base, const Gp& index, uint32_t shift = 0) noexcept \ { return Mem(base, index, shift, SIZE, \ OperandSignature::fromValue(Mem::AddrType::kAbs)); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC##_abs( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC##_abs( \ uint64_t base, const Vec& index, uint32_t shift = 0) noexcept \ { return Mem(base, index, shift, SIZE, \ OperandSignature::fromValue(Mem::AddrType::kAbs)); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC##_rel( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC##_rel( \ uint64_t base) noexcept \ { return Mem(base, SIZE, \ OperandSignature::fromValue(Mem::AddrType::kRel)); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC##_rel( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC##_rel( \ uint64_t base, const Gp& index, uint32_t shift = 0) noexcept \ { return Mem(base, index, shift, SIZE, \ OperandSignature::fromValue(Mem::AddrType::kRel)); } \ \ - static ASMJIT_INLINE_NODEBUG constexpr Mem FUNC##_rel( \ + [[nodiscard]] \ + static ASMJIT_INLINE_CONSTEXPR Mem FUNC##_rel( \ uint64_t base, const Vec& index, uint32_t shift = 0) noexcept \ { return Mem(base, index, shift, SIZE, \ OperandSignature::fromValue(Mem::AddrType::kRel)); } diff --git a/src/asmjit/x86/x86rapass.cpp b/src/asmjit/x86/x86rapass.cpp index c9bf88b..d233b86 100644 --- a/src/asmjit/x86/x86rapass.cpp +++ b/src/asmjit/x86/x86rapass.cpp @@ -21,7 +21,8 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) // x86::X86RAPass - Utilities // ========================== -static ASMJIT_FORCE_INLINE uint64_t raImmMaskFromSize(uint32_t size) noexcept { +[[nodiscard]] +static ASMJIT_INLINE uint64_t raImmMaskFromSize(uint32_t size) noexcept { ASMJIT_ASSERT(size > 0 && size < 256); static constexpr uint64_t masks[] = { 0x00000000000000FFu, // 1 @@ -45,7 +46,8 @@ static const RegMask raConsecutiveLeadCountToRegMaskFilter[5] = { 0x11111111u // [4] Every fourth register. }; -static ASMJIT_FORCE_INLINE RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept { +[[nodiscard]] +static ASMJIT_INLINE RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept { static constexpr RATiedFlags map[] = { RATiedFlags::kNone, RATiedFlags::kRead | RATiedFlags::kUse, // kRead @@ -60,16 +62,19 @@ static ASMJIT_FORCE_INLINE RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlag return map[uint32_t(rwFlags & (OpRWFlags::kRW | OpRWFlags::kRegMem))]; } -static ASMJIT_FORCE_INLINE RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept { +[[nodiscard]] +static ASMJIT_INLINE RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept { return (RATiedFlags)raUseOutFlagsFromRWFlags(flags); } -static ASMJIT_FORCE_INLINE RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept { +[[nodiscard]] +static ASMJIT_INLINE RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept { constexpr uint32_t kShift = Support::ConstCTZ::value; return (RATiedFlags)raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> kShift) & OpRWFlags::kRW); } -static ASMJIT_FORCE_INLINE RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept { +[[nodiscard]] +static ASMJIT_INLINE RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept { constexpr uint32_t kShift = Support::ConstCTZ::value; return (RATiedFlags)raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> kShift) & OpRWFlags::kRW); } @@ -90,23 +95,39 @@ public: _avxEnabled(pass->avxEnabled()) { } + [[nodiscard]] ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast(_cc); } + [[nodiscard]] ASMJIT_INLINE_NODEBUG uint32_t choose(uint32_t sseInst, uint32_t avxInst) const noexcept { return _avxEnabled ? avxInst : sseInst; } + [[nodiscard]] Error onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& ib) noexcept; + [[nodiscard]] Error onBeforeInvoke(InvokeNode* invokeNode) noexcept; + + [[nodiscard]] Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept; + [[nodiscard]] Error moveVecToPtr(InvokeNode* invokeNode, const FuncValue& arg, const Vec& src, BaseReg* out) noexcept; + + [[nodiscard]] Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept; + + [[nodiscard]] Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept; + + [[nodiscard]] Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept; + [[nodiscard]] Error onBeforeRet(FuncRetNode* funcRet) noexcept; + + [[nodiscard]] Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept; }; @@ -148,13 +169,15 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i if (instInfo.isVex() && !instInfo.isEvexCompatible()) { if (instInfo.isEvexKRegOnly()) { // EVEX encodable only if the first operand is K register (compare instructions). - if (!Reg::isKReg(opArray[0])) + if (!Reg::isKReg(opArray[0])) { instructionAllowedRegs = 0xFFFFu; + } } else if (instInfo.isEvexTwoOpOnly()) { // EVEX encodable only if the instruction has two operands (gather instructions). - if (opCount != 2) + if (opCount != 2) { instructionAllowedRegs = 0xFFFFu; + } } else { instructionAllowedRegs = 0xFFFFu; @@ -183,8 +206,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i RATiedFlags flags = raRegRwFlags(opRwInfo.opFlags()); RegMask allowedRegs = instructionAllowedRegs; - if (opRwInfo.isUnique()) + if (opRwInfo.isUnique()) { flags |= RATiedFlags::kUnique; + } // X86-specific constraints related to LO|HI general purpose registers. This is only required when the // register is part of the encoding. If the register is fixed we won't restrict anything as it doesn't @@ -242,12 +266,14 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i if (opRwInfo.consecutiveLeadCount()) { // There must be a single consecutive register lead, otherwise the RW data is invalid. - if (consecutiveLeadId != Globals::kInvalidId) + if (consecutiveLeadId != Globals::kInvalidId) { return DebugUtils::errored(kErrorInvalidState); + } // A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct. - if (RATiedReg::consecutiveDataFromFlags(flags) != 0) + if (RATiedReg::consecutiveDataFromFlags(flags) != 0) { return DebugUtils::errored(kErrorNotConsecutiveRegs); + } flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1); consecutiveLeadId = workReg->workId(); @@ -270,11 +296,13 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i flags |= RATiedFlags::kUseFixed; } else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) { - if (consecutiveLeadId == Globals::kInvalidId) + if (consecutiveLeadId == Globals::kInvalidId) { return DebugUtils::errored(kErrorInvalidState); + } - if (consecutiveLeadId == workReg->workId()) + if (consecutiveLeadId == workReg->workId()) { return DebugUtils::errored(kErrorOverlappedRegs); + } flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset); } @@ -286,22 +314,26 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i flags |= RATiedFlags::kOutFixed; } else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) { - if (consecutiveLeadId == Globals::kInvalidId) + if (consecutiveLeadId == Globals::kInvalidId) { return DebugUtils::errored(kErrorInvalidState); + } - if (consecutiveLeadId == workReg->workId()) + if (consecutiveLeadId == workReg->workId()) { return DebugUtils::errored(kErrorOverlappedRegs); + } flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset); } } ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent)); - if (singleRegOps == i) + if (singleRegOps == i) { singleRegOps++; + } - if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive)) + if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive)) { consecutiveParent = workReg->workId(); + } } } else if (op.isMem()) { @@ -313,7 +345,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i if (mem.isRegHome()) { RAWorkReg* workReg; ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg)); - _pass->getOrCreateStackSlot(workReg); + if (ASMJIT_UNLIKELY(!_pass->getOrCreateStackSlot(workReg))) { + return DebugUtils::errored(kErrorOutOfMemory); + } } else if (mem.hasBaseReg()) { uint32_t vIndex = Operand::virtIdToIndex(mem.baseId()); @@ -367,10 +401,12 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i uint32_t useRewriteMask = 0; uint32_t outRewriteMask = 0; - if (Support::test(flags, RATiedFlags::kUse)) + if (Support::test(flags, RATiedFlags::kUse)) { useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId])); - else + } + else { outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId])); + } ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, inOutRegs, useId, useRewriteMask, inOutRegs, outId, outRewriteMask)); } @@ -402,8 +438,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i } else { RegGroup group = inst->extraReg().group(); - if (group == RegGroup::kX86_K && inst->extraReg().id() != 0) + if (group == RegGroup::kX86_K && inst->extraReg().id() != 0) { singleRegOps = 0; + } } } @@ -419,8 +456,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i const OpRWInfo& opRwInfo = rwInfo.operand(0); uint64_t remainingByteMask = vReg->workReg()->regByteMask() & ~opRwInfo.writeByteMask(); - if (remainingByteMask == 0u || (remainingByteMask & opRwInfo.extendByteMask()) == 0) + if (remainingByteMask == 0u || (remainingByteMask & opRwInfo.extendByteMask()) == 0) { ib.addInstRWFlags(InstRWFlags::kMovOp); + } } } } @@ -453,10 +491,11 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i case Inst::kIdOr: { // Sets the value of the destination register to -1, previous content unused. if (reg.size() >= 4 || reg.size() >= workRegSize) { - if (imm.value() == -1 || imm.valueAs() == raImmMaskFromSize(reg.size())) + if (imm.value() == -1 || imm.valueAs() == raImmMaskFromSize(reg.size())) { sameRegHint = InstSameRegHint::kWO; + } } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; } case Inst::kIdAdd: @@ -470,8 +509,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& cf, RAInstBuilder& i case Inst::kIdXor: { // Updates [E|R]FLAGS without changing the content. if (reg.size() != 4 || reg.size() >= workRegSize) { - if (imm.value() == 0) + if (imm.value() == 0) { sameRegHint = InstSameRegHint::kRO; + } } break; } @@ -523,14 +563,16 @@ Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept { for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) { const FuncValuePack& argPack = fd.argPack(argIndex); for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { - if (!argPack[valueIndex]) + if (!argPack[valueIndex]) { break; + } const FuncValue& arg = argPack[valueIndex]; const Operand& op = invokeNode->arg(argIndex, valueIndex); - if (op.isNone()) + if (op.isNone()) { continue; + } if (op.isReg()) { const Reg& reg = op.as(); @@ -543,14 +585,15 @@ Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept { if (arg.isIndirect()) { if (reg.isGp()) { - if (reg.type() != nativeRegType) + if (reg.type() != nativeRegType) { return DebugUtils::errored(kErrorInvalidAssignment); + } // It's considered allocated if this is an indirect argument and the user used GP. continue; } BaseReg indirectReg; - moveVecToPtr(invokeNode, arg, reg.as(), &indirectReg); + ASMJIT_PROPAGATE(moveVecToPtr(invokeNode, arg, reg.as(), &indirectReg)); invokeNode->_args[argIndex][valueIndex] = indirectReg; } else { @@ -563,15 +606,16 @@ Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept { else { if (arg.isIndirect()) { if (reg.isGp()) { - if (reg.type() != nativeRegType) + if (reg.type() != nativeRegType) { return DebugUtils::errored(kErrorInvalidAssignment); + } ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg)); continue; } BaseReg indirectReg; - moveVecToPtr(invokeNode, arg, reg.as(), &indirectReg); + ASMJIT_PROPAGATE(moveVecToPtr(invokeNode, arg, reg.as(), &indirectReg)); ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, indirectReg)); } else { @@ -593,14 +637,16 @@ Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept { } cc()->_setCursor(invokeNode); - if (fd.hasFlag(CallConvFlags::kCalleePopsStack) && fd.argStackSize() != 0) + if (fd.hasFlag(CallConvFlags::kCalleePopsStack) && fd.argStackSize() != 0) { ASMJIT_PROPAGATE(cc()->sub(cc()->zsp(), fd.argStackSize())); + } if (fd.hasRet()) { for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { const FuncValue& ret = fd.ret(valueIndex); - if (!ret) + if (!ret) { break; + } const Operand& op = invokeNode->ret(valueIndex); if (op.isReg()) { @@ -610,15 +656,17 @@ Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept { if (ret.isReg()) { if (ret.regType() == RegType::kX86_St) { - if (workReg->group() != RegGroup::kVec) + if (workReg->group() != RegGroup::kVec) { return DebugUtils::errored(kErrorInvalidAssignment); + } Reg dst(workReg->signature(), workReg->virtId()); Mem mem; TypeId typeId = TypeUtils::scalarOf(workReg->typeId()); - if (ret.hasTypeId()) + if (ret.hasTypeId()) { typeId = ret.typeId(); + } switch (typeId) { case TypeId::kFloat32: @@ -668,14 +716,16 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) { const FuncValuePack& argPack = fd.argPack(argIndex); for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { - if (!argPack[valueIndex]) + if (!argPack[valueIndex]) { continue; + } const FuncValue& arg = argPack[valueIndex]; const Operand& op = invokeNode->arg(argIndex, valueIndex); - if (op.isNone()) + if (op.isNone()) { continue; + } if (op.isReg()) { const Reg& reg = op.as(); @@ -684,8 +734,9 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept if (arg.isIndirect()) { RegGroup regGroup = workReg->group(); - if (regGroup != RegGroup::kGp) + if (regGroup != RegGroup::kGp) { return DebugUtils::errored(kErrorInvalidState); + } ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId())); } else if (arg.isReg()) { @@ -702,13 +753,15 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) { const FuncValue& ret = fd.ret(retIndex); - if (!ret) + if (!ret) { break; + } // Not handled here... const Operand& op = invokeNode->ret(retIndex); - if (ret.regType() == RegType::kX86_St) + if (ret.regType() == RegType::kX86_St) { continue; + } if (op.isReg()) { const Reg& reg = op.as(); @@ -730,8 +783,9 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept } // Setup clobbered registers. - for (RegGroup group : RegGroupVirtValues{}) + for (RegGroup group : RegGroupVirtValues{}) { ib._clobbered[group] = Support::lsbMask(_pass->_physRegCount[group]) & ~fd.preservedRegs(group); + } return kErrorOk; } @@ -749,11 +803,13 @@ Error RACFGBuilder::moveVecToPtr(InvokeNode* invokeNode, const FuncValue& arg, c ASMJIT_ASSERT(arg.isReg()); uint32_t argSize = TypeUtils::sizeOf(arg.typeId()); - if (argSize == 0) + if (argSize == 0) { return DebugUtils::errored(kErrorInvalidState); + } - if (argSize < 16) + if (argSize < 16) { argSize = 16; + } uint32_t argStackOffset = Support::alignUp(invokeNode->detail()._argStackSize, argSize); _funcNode->frame().updateCallStackAlignment(argSize); @@ -763,8 +819,9 @@ Error RACFGBuilder::moveVecToPtr(InvokeNode* invokeNode, const FuncValue& arg, c Mem vecPtr = ptr(_pass->_sp.as(), int32_t(argStackOffset)); uint32_t vMovInstId = choose(Inst::kIdMovaps, Inst::kIdVmovaps); - if (argSize > 16) + if (argSize > 16) { vMovInstId = Inst::kIdVmovaps; + } ASMJIT_PROPAGATE(cc()->_newReg(out, ArchTraits::byArch(cc()->arch()).regTypeToTypeId(cc()->_gpSignature.regType()), nullptr)); @@ -925,10 +982,12 @@ Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& a r1.setRegT(reg.id()); instId = Inst::kIdMovsxd; - if (dstTypeId == TypeId::kInt64 && srcTypeId == TypeId::kInt32) + if (dstTypeId == TypeId::kInt64 && srcTypeId == TypeId::kInt32) { goto ExtendMovGpXQ; - else + } + else { goto ZeroExtendGpDQ; + } } // Move QWORD (GP). @@ -960,7 +1019,7 @@ Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& a instId = isDstSigned && isSrcSigned ? Inst::kIdMovsx : Inst::kIdMovzx; goto ExtendMovGpD; } - ASMJIT_FALLTHROUGH; + [[fallthrough]]; case TypeId::kInt8: case TypeId::kUInt8: @@ -1008,14 +1067,18 @@ Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& a stackPtr.setSize(TypeUtils::sizeOf(dstTypeId)); uint32_t vMovInstId = choose(Inst::kIdMovaps, Inst::kIdVmovaps); - if (TypeUtils::isVec128(dstTypeId)) + if (TypeUtils::isVec128(dstTypeId)) { r0.setRegT(reg.id()); - else if (TypeUtils::isVec256(dstTypeId)) + } + else if (TypeUtils::isVec256(dstTypeId)) { r0.setRegT(reg.id()); - else if (TypeUtils::isVec512(dstTypeId)) + } + else if (TypeUtils::isVec512(dstTypeId)) { r0.setRegT(reg.id()); - else + } + else { break; + } return cc()->emit(vMovInstId, stackPtr, r0); } @@ -1103,8 +1166,9 @@ Error RACFGBuilder::onBeforeRet(FuncRetNode* funcRet) noexcept { const Operand& op = opArray[i]; const FuncValue& ret = funcDetail.ret(i); - if (!op.isReg()) + if (!op.isReg()) { continue; + } if (ret.regType() == RegType::kX86_St) { const Reg& reg = op.as(); @@ -1114,15 +1178,17 @@ Error RACFGBuilder::onBeforeRet(FuncRetNode* funcRet) noexcept { RAWorkReg* workReg; ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg)); - if (workReg->group() != RegGroup::kVec) + if (workReg->group() != RegGroup::kVec) { return DebugUtils::errored(kErrorInvalidAssignment); + } Reg src(workReg->signature(), workReg->virtId()); Mem mem; TypeId typeId = TypeUtils::scalarOf(workReg->typeId()); - if (ret.hasTypeId()) + if (ret.hasTypeId()) { typeId = ret.typeId(); + } switch (typeId) { case TypeId::kFloat32: @@ -1156,15 +1222,19 @@ Error RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept { for (uint32_t i = 0; i < opCount; i++) { const Operand& op = opArray[i]; - if (op.isNone()) continue; + if (op.isNone()) { + continue; + } const FuncValue& ret = funcDetail.ret(i); - if (ASMJIT_UNLIKELY(!ret.isReg())) + if (ASMJIT_UNLIKELY(!ret.isReg())) { return DebugUtils::errored(kErrorInvalidAssignment); + } // Not handled here... - if (ret.regType() == RegType::kX86_St) + if (ret.regType() == RegType::kX86_St) { continue; + } if (op.isReg()) { // Register return value. @@ -1203,8 +1273,9 @@ void X86RAPass::onInit() noexcept { uint32_t baseRegCount = Environment::is32Bit(arch) ? 8u : 16u; uint32_t simdRegCount = baseRegCount; - if (Environment::is64Bit(arch) && _func->frame().isAvx512Enabled()) + if (Environment::is64Bit(arch) && _func->frame().isAvx512Enabled()) { simdRegCount = 32u; + } bool avxEnabled = _func->frame().isAvxEnabled(); bool avx512Enabled = _func->frame().isAvx512Enabled(); @@ -1233,8 +1304,10 @@ void X86RAPass::onInit() noexcept { // make unavailable all registers that are special and cannot be used in general. bool hasFP = _func->frame().hasPreservedFP(); - makeUnavailable(RegGroup::kGp, Gp::kIdSp); // ESP|RSP used as a stack-pointer (SP). - if (hasFP) makeUnavailable(RegGroup::kGp, Gp::kIdBp); // EBP|RBP used as a frame-pointer (FP). + makeUnavailable(RegGroup::kGp, Gp::kIdSp); // ESP|RSP used as a stack-pointer (SP). + if (hasFP) { + makeUnavailable(RegGroup::kGp, Gp::kIdBp); // EBP|RBP used as a frame-pointer (FP). + } _sp = cc()->zsp(); _fp = cc()->zbp(); @@ -1415,8 +1488,9 @@ ASMJIT_FAVOR_SPEED Error X86RAPass::_rewrite(BaseNode* first, BaseNode* stop) no BaseMem& mem = op.as(); if (mem.isRegHome()) { uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId()); - if (ASMJIT_UNLIKELY(virtIndex >= virtCount)) + if (ASMJIT_UNLIKELY(virtIndex >= virtCount)) { return DebugUtils::errored(kErrorInvalidVirtId); + } VirtReg* virtReg = cc()->virtRegByIndex(virtIndex); RAWorkReg* workReg = virtReg->workReg(); @@ -1529,18 +1603,22 @@ Error X86RAPass::emitPreCall(InvokeNode* invokeNode) noexcept { const FuncValuePack& argPack = fd.argPack(argIndex); for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { const FuncValue& arg = argPack[valueIndex]; - if (!arg) + if (!arg) { break; + } - if (arg.isReg() && Reg::groupOf(arg.regType()) == RegGroup::kVec) + if (arg.isReg() && Reg::groupOf(arg.regType()) == RegGroup::kVec) { n++; + } } } - if (!n) + if (!n) { ASMJIT_PROPAGATE(cc()->xor_(eax, eax)); - else + } + else { ASMJIT_PROPAGATE(cc()->mov(eax, n)); + } break; } @@ -1550,8 +1628,9 @@ Error X86RAPass::emitPreCall(InvokeNode* invokeNode) noexcept { const FuncValuePack& argPack = fd.argPack(argIndex); for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { const FuncValue& arg = argPack[valueIndex]; - if (!arg) + if (!arg) { break; + } if (arg.isReg() && Reg::groupOf(arg.regType()) == RegGroup::kVec) { Gp dst = gpq(fd.callConv().passedOrder(RegGroup::kGp)[argIndex]); diff --git a/src/asmjit/x86/x86rapass_p.h b/src/asmjit/x86/x86rapass_p.h index 67227a1..ef55094 100644 --- a/src/asmjit/x86/x86rapass_p.h +++ b/src/asmjit/x86/x86rapass_p.h @@ -28,10 +28,15 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86) class X86RAPass : public BaseRAPass { public: ASMJIT_NONCOPYABLE(X86RAPass) - typedef BaseRAPass Base; + using Base = BaseRAPass; + + //! \name Members + //! \{ EmitHelper _emitHelper; + //! \} + //! \name Construction & Destruction //! \{ @@ -44,12 +49,17 @@ public: //! \{ //! Returns the compiler casted to `x86::Compiler`. + [[nodiscard]] ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast(_cb); } //! Returns emit helper. + [[nodiscard]] ASMJIT_INLINE_NODEBUG EmitHelper* emitHelper() noexcept { return &_emitHelper; } + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool avxEnabled() const noexcept { return _emitHelper._avxEnabled; } + + [[nodiscard]] ASMJIT_INLINE_NODEBUG bool avx512Enabled() const noexcept { return _emitHelper._avx512Enabled; } //! \} @@ -57,6 +67,7 @@ public: //! \name Utilities //! \{ + [[nodiscard]] ASMJIT_INLINE_NODEBUG InstId choose(InstId sseInstId, InstId avxInstId) noexcept { return avxEnabled() ? avxInstId : sseInstId; } diff --git a/test/asmjit_test_compiler_a64.cpp b/test/asmjit_test_compiler_a64.cpp index 4d8cd5e..73f764f 100644 --- a/test/asmjit_test_compiler_a64.cpp +++ b/test/asmjit_test_compiler_a64.cpp @@ -87,25 +87,25 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef unsigned int U; + using U = unsigned int; - typedef U (*Func0)(); - typedef U (*Func1)(U); - typedef U (*Func2)(U, U); - typedef U (*Func3)(U, U, U); - typedef U (*Func4)(U, U, U, U); - typedef U (*Func5)(U, U, U, U, U); - typedef U (*Func6)(U, U, U, U, U, U); - typedef U (*Func7)(U, U, U, U, U, U, U); - typedef U (*Func8)(U, U, U, U, U, U, U, U); - typedef U (*Func9)(U, U, U, U, U, U, U, U, U); - typedef U (*Func10)(U, U, U, U, U, U, U, U, U, U); - typedef U (*Func11)(U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func12)(U, U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func13)(U, U, U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func14)(U, U, U, U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func15)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func16)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U); + using Func0 = U (*)(); + using Func1 = U (*)(U); + using Func2 = U (*)(U, U); + using Func3 = U (*)(U, U, U); + using Func4 = U (*)(U, U, U, U); + using Func5 = U (*)(U, U, U, U, U); + using Func6 = U (*)(U, U, U, U, U, U); + using Func7 = U (*)(U, U, U, U, U, U, U); + using Func8 = U (*)(U, U, U, U, U, U, U, U); + using Func9 = U (*)(U, U, U, U, U, U, U, U, U); + using Func10 = U (*)(U, U, U, U, U, U, U, U, U, U); + using Func11 = U (*)(U, U, U, U, U, U, U, U, U, U, U); + using Func12 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U); + using Func13 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U, U); + using Func14 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U, U, U); + using Func15 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U); + using Func16 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U); unsigned int resultRet = 0; unsigned int expectRet = 0; @@ -224,7 +224,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void*, const void*, const void*); + using Func = void (*)(void*, const void*, const void*); uint32_t dst[4]; uint32_t aSrc[4] = { 0 , 1 , 2 , 255 }; @@ -284,7 +284,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); result.assignFormat("ret={%d}", func()); @@ -332,7 +332,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); result.assignFormat("ret={%d}", func()); @@ -377,7 +377,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void* p, size_t n); + using Func = void (*)(void* p, size_t n); Func func = ptr_as_func(_func); uint8_t array[16]; @@ -433,7 +433,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef uint32_t (*Func)(uint32_t, uint32_t); + using Func = uint32_t (*)(uint32_t, uint32_t); Func func = ptr_as_func(_func); uint32_t x = 49; @@ -485,7 +485,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(double, double); + using Func = double (*)(double, double); Func func = ptr_as_func(_func); double x = 49; @@ -537,7 +537,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(double, double); + using Func = double (*)(double, double); Func func = ptr_as_func(_func); double x = 49; @@ -646,7 +646,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef float (*Func)(float, float, uint32_t); + using Func = float (*)(float, float, uint32_t); Func func = ptr_as_func(_func); float dst[4]; diff --git a/test/asmjit_test_compiler_x86.cpp b/test/asmjit_test_compiler_x86.cpp index 9ff6d89..332258d 100644 --- a/test/asmjit_test_compiler_x86.cpp +++ b/test/asmjit_test_compiler_x86.cpp @@ -106,25 +106,25 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef unsigned int U; + using U = unsigned int; - typedef U (*Func0)(); - typedef U (*Func1)(U); - typedef U (*Func2)(U, U); - typedef U (*Func3)(U, U, U); - typedef U (*Func4)(U, U, U, U); - typedef U (*Func5)(U, U, U, U, U); - typedef U (*Func6)(U, U, U, U, U, U); - typedef U (*Func7)(U, U, U, U, U, U, U); - typedef U (*Func8)(U, U, U, U, U, U, U, U); - typedef U (*Func9)(U, U, U, U, U, U, U, U, U); - typedef U (*Func10)(U, U, U, U, U, U, U, U, U, U); - typedef U (*Func11)(U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func12)(U, U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func13)(U, U, U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func14)(U, U, U, U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func15)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U); - typedef U (*Func16)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U); + using Func0 = U (*)(); + using Func1 = U (*)(U); + using Func2 = U (*)(U, U); + using Func3 = U (*)(U, U, U); + using Func4 = U (*)(U, U, U, U); + using Func5 = U (*)(U, U, U, U, U); + using Func6 = U (*)(U, U, U, U, U, U); + using Func7 = U (*)(U, U, U, U, U, U, U); + using Func8 = U (*)(U, U, U, U, U, U, U, U); + using Func9 = U (*)(U, U, U, U, U, U, U, U, U); + using Func10 = U (*)(U, U, U, U, U, U, U, U, U, U); + using Func11 = U (*)(U, U, U, U, U, U, U, U, U, U, U); + using Func12 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U); + using Func13 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U, U); + using Func14 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U, U, U); + using Func15 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U); + using Func16 = U (*)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U); unsigned int resultRet = 0; unsigned int expectRet = 0; @@ -230,7 +230,7 @@ public: virtual bool run(void* _func, String& result, String& expect) { DebugUtils::unused(result, expect); - typedef void(*Func)(void); + using Func = void (*)(void); Func func = ptr_as_func(_func); func(); @@ -259,7 +259,7 @@ public: virtual bool run(void* _func, String& result, String& expect) { DebugUtils::unused(result, expect); - typedef void (*Func)(void); + using Func = void (*)(void); Func func = ptr_as_func(_func); func(); @@ -287,7 +287,7 @@ public: virtual bool run(void* _func, String& result, String& expect) { DebugUtils::unused(result, expect); - typedef void (*Func)(void); + using Func = void (*)(void); Func func = ptr_as_func(_func); func(); @@ -344,7 +344,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void(*Func)(int*, int); + using Func = void (*)(int*, int); Func func = ptr_as_func(_func); int arr[5] = { -1, -1, -1, -1, -1 }; @@ -393,7 +393,7 @@ public: virtual bool run(void* _func, String& result, String& expect) { DebugUtils::unused(result, expect); - typedef void (*Func)(void); + using Func = void (*)(void); Func func = ptr_as_func(_func); func(); @@ -427,7 +427,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); @@ -491,7 +491,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void); + using Func = void (*)(void); Func func = ptr_as_func(_func); func(); @@ -537,7 +537,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void); + using Func = void (*)(void); Func func = ptr_as_func(_func); func(); @@ -642,7 +642,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef float (*Func)(float, float, uint32_t); + using Func = float (*)(float, float, uint32_t); Func func = ptr_as_func(_func); float results[4]; @@ -727,7 +727,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int); + using Func = int (*)(int); Func func = ptr_as_func(_func); int results[2]; @@ -778,7 +778,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int out = func(); @@ -853,7 +853,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int); + using Func = int (*)(int); Func func = ptr_as_func(_func); int results[2] = { func(0), func(1) }; @@ -903,7 +903,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -923,7 +923,7 @@ class X86Test_AllocMany1 : public X86TestCase { public: X86Test_AllocMany1() : X86TestCase("AllocMany1") {} - enum { kCount = 8 }; + static inline constexpr uint32_t kCount = 8; static void add(TestApp& app) { app.add(new X86Test_AllocMany1()); @@ -968,7 +968,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(int*, int*); + using Func = void (*)(int*, int*); Func func = ptr_as_func(_func); int resultX = 0; @@ -1022,7 +1022,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(uint32_t*); + using Func = void (*)(uint32_t*); Func func = ptr_as_func(_func); uint32_t i; @@ -1072,7 +1072,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int8_t); + using Func = int (*)(int8_t); Func func = ptr_as_func(_func); int resultRet = func(int8_t(-13)); @@ -1107,7 +1107,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int, int); + using Func = int (*)(int, int, int); Func func = ptr_as_func(_func); int resultRet = func(42, 155, 199); @@ -1154,7 +1154,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void*, void*, void*, void*, void*, void*, void*, void*); + using Func = void (*)(void*, void*, void*, void*, void*, void*, void*, void*); Func func = ptr_as_func(_func); uint8_t resultBuf[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; @@ -1212,7 +1212,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(float, float, float, float, float, float, float, float*); + using Func = void (*)(float, float, float, float, float, float, float, float*); Func func = ptr_as_func(_func); float resultRet = 0; @@ -1263,7 +1263,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(double, double, double, double, double, double, double, double*); + using Func = void (*)(double, double, double, double, double, double, double, double*); Func func = ptr_as_func(_func); double resultRet = 0; @@ -1310,7 +1310,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef __m128i (*Func)(__m128i, __m128i); + using Func = __m128i (*)(__m128i, __m128i); Func func = ptr_as_func(_func); uint8_t aData[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; @@ -1355,7 +1355,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef float (*Func)(float); + using Func = float (*)(float); Func func = ptr_as_func(_func); float resultRet = func(42.0f); @@ -1394,7 +1394,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef float (*Func)(float, float); + using Func = float (*)(float, float); Func func = ptr_as_func(_func); float resultRet = func(1.0f, 2.0f); @@ -1429,7 +1429,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(double); + using Func = double (*)(double); Func func = ptr_as_func(_func); double resultRet = func(42.0); @@ -1468,7 +1468,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(double, double); + using Func = double (*)(double, double); Func func = ptr_as_func(_func); double resultRet = func(1.0, 2.0); @@ -1488,7 +1488,7 @@ class X86Test_AllocStack : public X86TestCase { public: X86Test_AllocStack() : X86TestCase("AllocStack") {} - enum { kSize = 256 }; + static inline constexpr uint32_t kSize = 256u; static void add(TestApp& app) { app.add(new X86Test_AllocStack()); @@ -1535,7 +1535,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -1580,7 +1580,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(int*, int*, int, int); + using Func = void (*)(int*, int*, int, int); Func func = ptr_as_func(_func); int v0 = 4; @@ -1637,7 +1637,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(int*, const int*); + using Func = void (*)(int*, const int*); Func func = ptr_as_func(_func); int src[2] = { 4, 9 }; @@ -1681,7 +1681,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int); + using Func = int (*)(int, int); Func func = ptr_as_func(_func); int v0 = 2999; @@ -1725,7 +1725,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(int, int, char*); + using Func = void (*)(int, int, char*); Func func = ptr_as_func(_func); char resultBuf[4] {}; @@ -1776,7 +1776,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(int*, int, int, int); + using Func = void (*)(int*, int, int, int); Func func = ptr_as_func(_func); int v0 = 0x000000FF; @@ -1800,7 +1800,7 @@ class X86Test_GpbLo1 : public X86TestCase { public: X86Test_GpbLo1() : X86TestCase("GpbLo1") {} - enum : uint32_t { kCount = 32 }; + static inline constexpr uint32_t kCount = 32u; static void add(TestApp& app) { app.add(new X86Test_GpbLo1()); @@ -1843,7 +1843,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef uint32_t (*Func)(uint32_t*); + using Func = uint32_t (*)(uint32_t*); Func func = ptr_as_func(_func); uint32_t i; @@ -1901,7 +1901,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef uint32_t (*Func)(uint32_t); + using Func = uint32_t (*)(uint32_t); Func func = ptr_as_func(_func); uint32_t resultRet = func(0x12345678u); @@ -1940,7 +1940,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void*, void*, size_t); + using Func = void (*)(void*, void*, size_t); Func func = ptr_as_func(_func); char dst[20] = { 0 }; @@ -1991,7 +1991,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int); + using Func = int (*)(int, int); Func func = ptr_as_func(_func); int a = func(0, 1); @@ -2050,7 +2050,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int); + using Func = int (*)(int, int); Func func = ptr_as_func(_func); int a = func(0, 1); @@ -2109,7 +2109,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int); + using Func = int (*)(int, int); Func func = ptr_as_func(_func); int a = func(0, 1); @@ -2172,7 +2172,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int); + using Func = int (*)(int, int); Func func = ptr_as_func(_func); int a = func(0, 1); @@ -2192,7 +2192,7 @@ class X86Test_Memcpy : public X86TestCase { public: X86Test_Memcpy() : X86TestCase("Memcpy") {} - enum { kCount = 32 }; + static inline constexpr uint32_t kCount = 32u; static void add(TestApp& app) { app.add(new X86Test_Memcpy()); @@ -2231,7 +2231,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(uint32_t*, const uint32_t*, size_t); + using Func = void (*)(uint32_t*, const uint32_t*, size_t); Func func = ptr_as_func(_func); uint32_t i; @@ -2312,7 +2312,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int, int); + using Func = int (*)(int, int, int); Func func = ptr_as_func(_func); int ret1 = func(0, 4, 5); @@ -2335,7 +2335,7 @@ class X86Test_AlphaBlend : public X86TestCase { public: X86Test_AlphaBlend() : X86TestCase("AlphaBlend") {} - enum { kCount = 17 }; + static inline constexpr uint32_t kCount = 17u; static void add(TestApp& app) { app.add(new X86Test_AlphaBlend()); @@ -2361,7 +2361,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void*, const void*, size_t); + using Func = void (*)(void*, const void*, size_t); Func func = ptr_as_func(_func); static const uint32_t dstConstData[] = { 0x00000000, 0x10101010, 0x20100804, 0x30200003, 0x40204040, 0x5000004D, 0x60302E2C, 0x706F6E6D, 0x807F4F2F, 0x90349001, 0xA0010203, 0xB03204AB, 0xC023AFBD, 0xD0D0D0C0, 0xE0AABBCC, 0xFFFFFFFF, 0xF8F4F2F1 }; @@ -2448,7 +2448,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef uint32_t (*Func)(const void*, const void*, uint32_t prevK); + using Func = uint32_t (*)(const void*, const void*, uint32_t prevK); Func func = ptr_as_func(_func); static const uint32_t srcA[16] = { 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1 }; @@ -2492,7 +2492,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void*); + using Func = void (*)(void*); Func func = ptr_as_func(_func); uint32_t out[16]; @@ -2543,7 +2543,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef uint32_t (*Func)(uint8_t, uint8_t, uint32_t); + using Func = uint32_t (*)(uint8_t, uint8_t, uint32_t); Func func = ptr_as_func(_func); uint32_t arg = uint32_t(uintptr_t(_func) & 0xFFFFFFFF); @@ -2597,7 +2597,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int, int); + using Func = int (*)(int, int, int); Func func = ptr_as_func(_func); int resultRet = func(3, 2, 1); @@ -2619,7 +2619,7 @@ class X86Test_FuncCallBase2 : public X86TestCase { public: X86Test_FuncCallBase2() : X86TestCase("FuncCallBase2") {} - enum { kSize = 256 }; + static inline constexpr uint32_t kSize = 256u; static void add(TestApp& app) { app.add(new X86Test_FuncCallBase2()); @@ -2680,7 +2680,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -2728,7 +2728,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int, int); + using Func = int (*)(int, int, int); Func func = ptr_as_func(_func); int resultRet = func(1, 42, 3); @@ -2778,7 +2778,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int); + using Func = int (*)(int); Func func = ptr_as_func(_func); int resultRet = func(9); @@ -2859,7 +2859,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void*, const void*, const void*); + using Func = void (*)(void*, const void*, const void*); Func func = ptr_as_func(_func); uint8_t aData[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; @@ -2965,7 +2965,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(const void*, const void*, const void*, const void*, void*); + using Func = void (*)(const void*, const void*, const void*, const void*, void*); Func func = ptr_as_func(_func); @@ -3049,7 +3049,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -3106,7 +3106,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -3159,7 +3159,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -3225,7 +3225,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -3289,7 +3289,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int&, int&, int&, int&); + using Func = int (*)(int&, int&, int&, int&); Func func = ptr_as_func(_func); int inputs[4] = { 1, 2, 3, 4 }; @@ -3341,7 +3341,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef float (*Func)(float, float); + using Func = float (*)(float, float); Func func = ptr_as_func(_func); float resultRet = func(15.5f, 2.0f); @@ -3390,7 +3390,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(double, double); + using Func = double (*)(double, double); Func func = ptr_as_func(_func); double resultRet = func(15.5, 2.0); @@ -3461,7 +3461,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int, int); + using Func = int (*)(int, int, int); Func func = ptr_as_func(_func); int arg1 = 4; @@ -3544,7 +3544,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int*); + using Func = int (*)(int*); Func func = ptr_as_func(_func); int buffer[4] = { 127, 87, 23, 17 }; @@ -3597,7 +3597,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int); + using Func = int (*)(int); Func func = ptr_as_func(_func); int resultRet = func(5); @@ -3653,7 +3653,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int, int, int); + using Func = int (*)(int, int, int, int); Func func = ptr_as_func(_func); int resultRet = func(1, 2, 3, 4); @@ -3721,7 +3721,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(double, double, double, double); + using Func = double (*)(double, double, double, double); Func func = ptr_as_func(_func); double resultRet = func(1.0, 2.0, 3.0, 4.0); @@ -3782,7 +3782,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef uint64_t (*Func)(uint64_t); + using Func = uint64_t (*)(uint64_t); Func func = ptr_as_func(_func); uint64_t resultRet = func(uint64_t(0xFFFFFFFF)); @@ -3844,7 +3844,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int); + using Func = int (*)(int, int); Func func = ptr_as_func(_func); int resultRet = func(44, 199); @@ -3890,7 +3890,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(const double*); + using Func = double (*)(const double*); Func func = ptr_as_func(_func); double arg = 2; @@ -3943,7 +3943,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(const double*); + using Func = double (*)(const double*); Func func = ptr_as_func(_func); double arg = 2; @@ -3992,7 +3992,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef double (*Func)(void); + using Func = double (*)(void); Func func = ptr_as_func(_func); double resultRet = func(); @@ -4050,7 +4050,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -4110,7 +4110,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef uint32_t (*Func)(uint32_t x); + using Func = uint32_t (*)(uint32_t x); Func func = ptr_as_func(_func); uint32_t resultRet = func(111); @@ -4210,7 +4210,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef void (*Func)(void*, const void*, const void*); + using Func = void (*)(void*, const void*, const void*); Func func = ptr_as_func(_func); size_t i; @@ -4249,7 +4249,7 @@ public: class X86Test_VecToScalar : public X86TestCase { public: - static constexpr uint32_t kVecCount = 64; + static inline constexpr uint32_t kVecCount = 64; X86Test_VecToScalar() : X86TestCase("VecToScalar") {} @@ -4285,7 +4285,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef uint32_t (*Func)(uint32_t); + using Func = uint32_t (*)(uint32_t); Func func = ptr_as_func(_func); uint32_t resultRet = func(1); @@ -4327,7 +4327,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -4369,7 +4369,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(void); + using Func = int (*)(void); Func func = ptr_as_func(_func); int resultRet = func(); @@ -4450,7 +4450,7 @@ struct X86Test_MiscMultiRet : public X86TestCase { } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int, int); + using Func = int (*)(int, int, int); Func func = ptr_as_func(_func); @@ -4521,7 +4521,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (*Func)(int, int); + using Func = int (*)(int, int); Func func = ptr_as_func(_func); @@ -4569,8 +4569,7 @@ public: } virtual bool run(void* _func, String& result, String& expect) { - typedef int (ASMJIT_FASTCALL *Func)(int, void*); - + using Func = int (ASMJIT_FASTCALL*)(int, void*); Func func = ptr_as_func(_func); int resultRet = 0; diff --git a/test/asmjit_test_emitters.cpp b/test/asmjit_test_emitters.cpp index 47fcaff..673a8d1 100644 --- a/test/asmjit_test_emitters.cpp +++ b/test/asmjit_test_emitters.cpp @@ -31,7 +31,7 @@ static void printInfo() noexcept { using namespace asmjit; // Signature of the generated function. -typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b); +using SumIntsFunc = void (*)(int* dst, const int* a, const int* b); // X86 Backend // ----------- diff --git a/test/asmjit_test_execute.cpp b/test/asmjit_test_execute.cpp index ce002dc..7972dde 100644 --- a/test/asmjit_test_execute.cpp +++ b/test/asmjit_test_execute.cpp @@ -31,7 +31,7 @@ static void printInfo() noexcept { using namespace asmjit; // Signature of the generated function. -typedef void (*EmptyFunc)(void); +using EmptyFunc = void (*)(void); // Generate Empty Function // ----------------------- diff --git a/test/asmjit_test_x86_sections.cpp b/test/asmjit_test_x86_sections.cpp index df76322..631e43c 100644 --- a/test/asmjit_test_x86_sections.cpp +++ b/test/asmjit_test_x86_sections.cpp @@ -145,7 +145,7 @@ int main() { }); // Execute the function and test whether it works. - typedef size_t (*Func)(size_t idx); + using Func = size_t (*)(size_t idx); Func fn = (Func)span.rx(); printf("\n"); diff --git a/test/asmjitutils.h b/test/asmjitutils.h index 2888015..b4a970d 100644 --- a/test/asmjitutils.h +++ b/test/asmjitutils.h @@ -10,7 +10,7 @@ namespace { -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline const char* asmjitArchAsString(asmjit::Arch arch) noexcept { switch (arch) { case asmjit::Arch::kX86 : return "X86"; @@ -38,7 +38,7 @@ static inline const char* asmjitArchAsString(asmjit::Arch arch) noexcept { } } -ASMJIT_MAYBE_UNUSED +[[maybe_unused]] static inline void printIndented(const char* str, size_t indent) noexcept { const char* start = str; while (*str) { diff --git a/test/performancetimer.h b/test/performancetimer.h index c7a8ebe..55e377c 100644 --- a/test/performancetimer.h +++ b/test/performancetimer.h @@ -11,18 +11,13 @@ class PerformanceTimer { public: - typedef std::chrono::high_resolution_clock::time_point TimePoint; + using TimePoint = std::chrono::high_resolution_clock::time_point; TimePoint _startTime {}; TimePoint _endTime {}; - inline void start() { - _startTime = std::chrono::high_resolution_clock::now(); - } - - inline void stop() { - _endTime = std::chrono::high_resolution_clock::now(); - } + inline void start() { _startTime = std::chrono::high_resolution_clock::now(); } + inline void stop() { _endTime = std::chrono::high_resolution_clock::now(); } inline double duration() const { std::chrono::duration elapsed = _endTime - _startTime;