[abi] Switched to C++17

This commit is contained in:
kobalicek
2025-05-24 15:53:19 +02:00
parent cecc73f297
commit 356dddbc55
125 changed files with 7518 additions and 3345 deletions

View File

@@ -1,8 +1,8 @@
{
"diagnostics": {
"asan": { "definitions": ["ASMJIT_SANITIZE=address"] },
"ubsan": { "definitions": ["ASMJIT_SANITIZE=undefined"] },
"msan": { "definitions": ["ASMJIT_SANITIZE=memory"] }
"msan": { "definitions": ["ASMJIT_SANITIZE=memory"] },
"ubsan": { "definitions": ["ASMJIT_SANITIZE=undefined"] }
},
"valgrind_arguments": [

View File

@@ -54,7 +54,6 @@ jobs:
- { title: "no-x86" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_X86=1" }
- { title: "no-aarch64" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_AARCH64=1" }
- { title: "lang-c++17" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++17" }
- { title: "lang-c++20" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++20" }
- { title: "lang-c++23" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++23" }

View File

@@ -195,7 +195,7 @@ function(asmjit_add_target target target_type)
DEFINE_SYMBOL ""
CXX_VISIBILITY_PRESET hidden)
target_compile_options(${target} PRIVATE ${X_CFLAGS} ${ASMJIT_SANITIZE_CFLAGS} $<$<CONFIG:Debug>:${X_CFLAGS_DBG}> $<$<NOT:$<CONFIG:Debug>>:${X_CFLAGS_REL}>)
target_compile_features(${target} PUBLIC cxx_std_11)
target_compile_features(${target} PUBLIC cxx_std_17)
target_link_options(${target} PRIVATE ${ASMJIT_PRIVATE_LFLAGS})
target_link_libraries(${target} PRIVATE ${X_LIBRARIES})

View File

@@ -48,13 +48,16 @@
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "./arm.h"
#include "./arm/a64assembler.h"
#include "./arm/a64builder.h"
#include "./arm/a64compiler.h"
#include "./arm/a64emitter.h"
#include "./arm/a64globals.h"
#include "./arm/a64instdb.h"
#include "./arm/a64operand.h"
#include "asmjit-scope-begin.h"
#include "arm/a64assembler.h"
#include "arm/a64builder.h"
#include "arm/a64compiler.h"
#include "arm/a64emitter.h"
#include "arm/a64globals.h"
#include "arm/a64instdb.h"
#include "arm/a64operand.h"
#include "asmjit-scope-end.h"
#endif // ASMJIT_A64_H_INCLUDED

View File

@@ -76,9 +76,12 @@
//! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode.
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "./core.h"
#include "./arm/armglobals.h"
#include "./arm/armoperand.h"
#include "./arm/armutils.h"
#include "core.h"
#include "asmjit-scope-begin.h"
#include "arm/armglobals.h"
#include "arm/armoperand.h"
#include "arm/armutils.h"
#include "asmjit-scope-end.h"
#endif // ASMJIT_ARM_H_INCLUDED

View File

@@ -24,8 +24,8 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Assembler - Utils
// ======================
static ASMJIT_FORCE_INLINE constexpr uint32_t diff(RegType a, RegType b) noexcept { return uint32_t(a) - uint32_t(b); }
static ASMJIT_FORCE_INLINE constexpr uint32_t diff(VecElementType elementType, VecElementType baseType) noexcept { return uint32_t(elementType) - uint32_t(baseType); }
static ASMJIT_INLINE_CONSTEXPR uint32_t diff(RegType a, RegType b) noexcept { return uint32_t(a) - uint32_t(b); }
static ASMJIT_INLINE_CONSTEXPR uint32_t diff(VecElementType elementType, VecElementType baseType) noexcept { return uint32_t(elementType) - uint32_t(baseType); }
// a64::Assembler - Cond
// =====================
@@ -74,39 +74,50 @@ static inline RegType extendOptionToRegType(uint32_t option) noexcept {
//! Struct that contains Size (2 bits), Q flag, and S (scalar) flag. These values
//! are used to encode Q, Size, and Scalar fields in an opcode.
struct SizeOp {
enum : uint8_t {
k128BitShift = 0,
kScalarShift = 1,
kSizeShift = 2,
//! \name Constants
//! \{
kQ = uint8_t(1u << k128BitShift),
kS = uint8_t(1u << kScalarShift),
static inline constexpr uint8_t k128BitShift = 0;
static inline constexpr uint8_t kScalarShift = 1;
static inline constexpr uint8_t kSizeShift = 2;
k00 = uint8_t(0 << kSizeShift),
k01 = uint8_t(1 << kSizeShift),
k10 = uint8_t(2 << kSizeShift),
k11 = uint8_t(3 << kSizeShift),
static inline constexpr uint8_t kQ = uint8_t(1u << k128BitShift);
static inline constexpr uint8_t kS = uint8_t(1u << kScalarShift);
k00Q = k00 | kQ,
k01Q = k01 | kQ,
k10Q = k10 | kQ,
k11Q = k11 | kQ,
static inline constexpr uint8_t k00 = uint8_t(0 << kSizeShift);
static inline constexpr uint8_t k01 = uint8_t(1 << kSizeShift);
static inline constexpr uint8_t k10 = uint8_t(2 << kSizeShift);
static inline constexpr uint8_t k11 = uint8_t(3 << kSizeShift);
k00S = k00 | kS,
k01S = k01 | kS,
k10S = k10 | kS,
k11S = k11 | kS,
static inline constexpr uint8_t k00Q = k00 | kQ;
static inline constexpr uint8_t k01Q = k01 | kQ;
static inline constexpr uint8_t k10Q = k10 | kQ;
static inline constexpr uint8_t k11Q = k11 | kQ;
kInvalid = 0xFFu,
static inline constexpr uint8_t k00S = k00 | kS;
static inline constexpr uint8_t k01S = k01 | kS;
static inline constexpr uint8_t k10S = k10 | kS;
static inline constexpr uint8_t k11S = k11 | kS;
// Masks used by SizeOpMap.
kSzQ = (0x3u << kSizeShift) | kQ,
kSzS = (0x3u << kSizeShift) | kS,
kSzQS = (0x3u << kSizeShift) | kQ | kS
};
static inline constexpr uint8_t kInvalid = 0xFFu;
// Masks used by SizeOpMap.
static inline constexpr uint8_t kSzQ = (0x3u << kSizeShift) | kQ;
static inline constexpr uint8_t kSzS = (0x3u << kSizeShift) | kS;
static inline constexpr uint8_t kSzQS = (0x3u << kSizeShift) | kQ | kS;
//! \}
//! \name Members
//! \{
uint8_t value;
//! \}
//! \name Accessors
//! \{
inline bool isValid() const noexcept { return value != kInvalid; }
inline void makeInvalid() noexcept { value = kInvalid; }
@@ -119,6 +130,8 @@ struct SizeOp {
ASMJIT_ASSERT(size() > 0);
value = uint8_t(value - (1u << kSizeShift));
}
//! \}
};
struct SizeOpTable {
@@ -282,8 +295,9 @@ static inline SizeOp armElementTypeToSizeOp(uint32_t vecOpType, RegType regType,
SizeOp op = table.array[index];
SizeOp modifiedOp { uint8_t(op.value & map.sizeOpMask) };
if (!Support::bitTest(map.acceptMask, op.value))
if (!Support::bitTest(map.acceptMask, op.value)) {
modifiedOp.makeInvalid();
}
return modifiedOp;
}
@@ -363,8 +377,9 @@ static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd,
for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) {
uint32_t hwImm = uint32_t(imm & 0xFFFFu);
if (hwImm == 0)
if (hwImm == 0) {
continue;
}
out[count++] = op | (hwIndex << 21) | (hwImm << 5) | rd;
op = kMovK;
@@ -382,8 +397,9 @@ static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd,
for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) {
uint32_t hwImm = uint32_t(imm & 0xFFFFu);
if (hwImm == 0xFFFFu)
if (hwImm == 0xFFFFu) {
continue;
}
out[count++] = op | (hwIndex << 21) | ((hwImm ^ negMask) << 5) | rd;
op = kMovK;
@@ -503,18 +519,22 @@ static inline bool armCheckMemBaseIndexRel(const Mem& mem) noexcept {
RegType baseType = mem.baseType();
RegType indexType = mem.indexType();
if (!Support::bitTest(kBaseMask, baseType))
if (!Support::bitTest(kBaseMask, baseType)) {
return false;
}
if (baseType > RegType::kLabelTag) {
// Index allows either GpW or GpX.
if (!Support::bitTest(kIndexMask, indexType))
if (!Support::bitTest(kIndexMask, indexType)) {
return false;
}
if (indexType == RegType::kNone)
if (indexType == RegType::kNone) {
return true;
else
}
else {
return !mem.hasOffset();
}
}
else {
// No index register allowed if this is a PC relative address (literal).
@@ -542,8 +562,9 @@ static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint
if (!reg.hasElementType()) {
// Scalar operation [HSD].
uint32_t sz = diff(reg.type(), RegType::kARM_VecH);
if (sz > 2u || !Support::bitTest(szBits[sHf].sizeMask, sz))
if (sz > 2u || !Support::bitTest(szBits[sHf].sizeMask, sz)) {
return false;
}
opcode->reset(szBits[sHf].mask[sz] ^ sOp);
*szOut = sz;
@@ -554,8 +575,9 @@ static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint
uint32_t q = diff(reg.type(), RegType::kARM_VecD);
uint32_t sz = diff(reg.elementType(), VecElementType::kH);
if (q > 1u || sz > 2u || !Support::bitTest(szBits[vHf].sizeMask, sz))
if (q > 1u || sz > 2u || !Support::bitTest(szBits[vHf].sizeMask, sz)) {
return false;
}
opcode->reset(szBits[vHf].mask[sz] ^ (vOp | (q << kQBitIndex)));
*szOut = sz;
@@ -773,8 +795,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
CondCode instCC = BaseInst::extractARMCondCode(instId);
instId = instId & uint32_t(InstIdParts::kRealId);
if (instId >= Inst::_kIdCount)
if (instId >= Inst::_kIdCount) {
instId = 0;
}
const InstDB::InstInfo* instInfo = &InstDB::_instInfoTable[instId];
uint32_t encodingIndex = instInfo->_encodingDataIndex;
@@ -794,21 +817,25 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
uint64_t offsetValue; // Offset value (if known).
if (ASMJIT_UNLIKELY(Support::test(options, kRequiresSpecialHandling))) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
// Unknown instruction.
if (ASMJIT_UNLIKELY(instId == 0))
if (ASMJIT_UNLIKELY(instId == 0)) {
goto InvalidInstruction;
}
// Condition code can only be used with 'B' instruction.
if (ASMJIT_UNLIKELY(instCC != CondCode::kAL && instId != Inst::kIdB))
if (ASMJIT_UNLIKELY(instCC != CondCode::kAL && instId != Inst::kIdB)) {
goto InvalidInstruction;
}
// Grow request, happens rarely.
err = writer.ensureSpace(this, 4);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
goto Failed;
}
#ifndef ASMJIT_NO_VALIDATION
// Strict validation.
@@ -817,8 +844,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
err = _funcs.validate(BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount, ValidationFlags::kNone);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
goto Failed;
}
}
#endif
}
@@ -4932,11 +4960,13 @@ EmitOp_Multiple:
{
ASMJIT_ASSERT(multipleOpCount > 0);
err = writer.ensureSpace(this, multipleOpCount * 4u);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
goto Failed;
}
for (uint32_t i = 0; i < multipleOpCount; i++)
for (uint32_t i = 0; i < multipleOpCount; i++) {
writer.emit32uLE(multipleOpData[i]);
}
goto EmitDone;
}
@@ -4946,28 +4976,33 @@ EmitOp_Multiple:
// --------------------------------------------------------------------------
EmitOp_MemBase_Rn5:
if (!checkMemBase(rmRel->as<Mem>()))
if (!checkMemBase(rmRel->as<Mem>())) {
goto InvalidAddress;
}
opcode.addReg(rmRel->as<Mem>().baseId(), 5);
goto EmitOp;
EmitOp_MemBaseNoImm_Rn5:
if (!checkMemBase(rmRel->as<Mem>()) || rmRel->as<Mem>().hasIndex())
if (!checkMemBase(rmRel->as<Mem>()) || rmRel->as<Mem>().hasIndex()) {
goto InvalidAddress;
}
if (rmRel->as<Mem>().hasOffset())
if (rmRel->as<Mem>().hasOffset()) {
goto InvalidDisplacement;
}
opcode.addReg(rmRel->as<Mem>().baseId(), 5);
goto EmitOp;
EmitOp_MemBaseIndex_Rn5_Rm16:
if (!rmRel->as<Mem>().hasBaseReg())
if (!rmRel->as<Mem>().hasBaseReg()) {
goto InvalidAddress;
}
if (rmRel->as<Mem>().indexId() > 30 && rmRel->as<Mem>().indexId() != Gp::kIdZr)
if (rmRel->as<Mem>().indexId() > 30 && rmRel->as<Mem>().indexId() != Gp::kIdZr) {
goto InvalidPhysId;
}
opcode.addReg(rmRel->as<Mem>().indexId(), 16);
opcode.addReg(rmRel->as<Mem>().baseId(), 5);
@@ -4992,8 +5027,9 @@ EmitOp_Rel:
}
LabelEntry* label = _code->labelEntry(labelId);
if (ASMJIT_UNLIKELY(!label))
if (ASMJIT_UNLIKELY(!label)) {
goto InvalidLabel;
}
if (offsetFormat.type() == OffsetType::kAArch64_ADRP) {
// TODO: [ARM] Always create relocation entry.
@@ -5009,8 +5045,9 @@ EmitOp_Rel:
size_t codeOffset = writer.offsetFrom(_bufferData);
LabelLink* link = _code->newLabelLink(label, _section->id(), codeOffset, intptr_t(labelOffset), offsetFormat);
if (ASMJIT_UNLIKELY(!link))
if (ASMJIT_UNLIKELY(!link)) {
goto OutOfMemory;
}
goto EmitOp;
}
@@ -5027,8 +5064,9 @@ EmitOp_Rel:
// Create a new RelocEntry as we cannot calculate the offset right now.
RelocEntry* re;
err = _code->newRelocEntry(&re, RelocType::kAbsToRel);
if (err)
if (err) {
goto Failed;
}
re->_sourceSectionId = _section->id();
re->_sourceOffset = codeOffset;
@@ -5039,8 +5077,9 @@ EmitOp_Rel:
else {
uint64_t pc = baseAddress + codeOffset;
if (offsetFormat.type() == OffsetType::kAArch64_ADRP)
if (offsetFormat.type() == OffsetType::kAArch64_ADRP) {
pc &= ~uint64_t(4096 - 1);
}
offsetValue = targetOffset - pc;
goto EmitOp_DispImm;
@@ -5051,12 +5090,14 @@ EmitOp_Rel:
EmitOp_DispImm:
{
if ((offsetValue & Support::lsbMask<uint32_t>(offsetFormat.immDiscardLsb())) != 0)
if ((offsetValue & Support::lsbMask<uint32_t>(offsetFormat.immDiscardLsb())) != 0) {
goto InvalidDisplacement;
}
int64_t dispImm64 = int64_t(offsetValue) >> offsetFormat.immDiscardLsb();
if (!Support::isEncodableOffset64(dispImm64, offsetFormat.immBitCount()))
if (!Support::isEncodableOffset64(dispImm64, offsetFormat.immBitCount())) {
goto InvalidDisplacement;
}
uint32_t dispImm32 = uint32_t(dispImm64 & Support::lsbMask<uint32_t>(offsetFormat.immBitCount()));
switch (offsetFormat.type()) {
@@ -5094,8 +5135,9 @@ EmitOp:
EmitDone:
if (Support::test(options, InstOptions::kReserved)) {
#ifndef ASMJIT_NO_LOGGING
if (_logger)
if (_logger) {
EmitterUtils::logInstructionEmitted(this, BaseInst::composeARMInstId(instId, instCC), options, o0, o1, o2, opExt, 0, 0, writer.cursor());
}
#endif
}
@@ -5141,21 +5183,26 @@ Failed:
Error Assembler::align(AlignMode alignMode, uint32_t alignment) {
constexpr uint32_t kNopA64 = 0xD503201Fu; // [11010101|00000011|00100000|00011111].
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (ASMJIT_UNLIKELY(uint32_t(alignMode) > uint32_t(AlignMode::kMaxValue)))
if (ASMJIT_UNLIKELY(uint32_t(alignMode) > uint32_t(AlignMode::kMaxValue))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (alignment <= 1)
if (alignment <= 1) {
return kErrorOk;
}
if (ASMJIT_UNLIKELY(alignment > Globals::kMaxAlignment || !Support::isPowerOf2(alignment)))
if (ASMJIT_UNLIKELY(alignment > Globals::kMaxAlignment || !Support::isPowerOf2(alignment))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
uint32_t i = uint32_t(Support::alignUpDiff<size_t>(offset(), alignment));
if (i == 0)
if (i == 0) {
return kErrorOk;
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, i));
@@ -5164,8 +5211,9 @@ Error Assembler::align(AlignMode alignMode, uint32_t alignment) {
case AlignMode::kCode: {
uint32_t pattern = kNopA64;
if (ASMJIT_UNLIKELY(offset() & 0x3u))
if (ASMJIT_UNLIKELY(offset() & 0x3u)) {
return DebugUtils::errored(kErrorInvalidState);
}
while (i >= 4) {
writer.emit32uLE(pattern);

View File

@@ -21,7 +21,7 @@ class ASMJIT_VIRTAPI Assembler
public EmitterExplicitT<Assembler> {
public:
typedef BaseAssembler Base;
using Base = BaseAssembler;
//! \name Construction & Destruction
//! \{

View File

@@ -17,8 +17,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
if (code)
if (code) {
code->attach(this);
}
}
Builder::~Builder() noexcept {}

View File

@@ -23,7 +23,7 @@ class ASMJIT_VIRTAPI Builder
public EmitterExplicitT<Builder> {
public:
ASMJIT_NONCOPYABLE(Builder)
typedef BaseBuilder Base;
using Base = BaseBuilder;
//! \name Construction & Destruction
//! \{

View File

@@ -18,8 +18,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
if (code)
if (code) {
code->attach(this);
}
}
Compiler::~Compiler() noexcept {}

View File

@@ -24,7 +24,7 @@ class ASMJIT_VIRTAPI Compiler
public EmitterExplicitT<Compiler> {
public:
ASMJIT_NONCOPYABLE(Compiler)
typedef BaseCompiler Base;
using Base = BaseCompiler;
//! \name Construction & Destruction
//! \{

View File

@@ -54,14 +54,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->ldr(dst.as<Gp>().x(), src);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->ldr(dst.as<Vec>().s(), src);
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->ldr(dst.as<Vec>().d(), src);
}
if (TypeUtils::isVec128(typeId))
if (TypeUtils::isVec128(typeId)) {
return emitter->ldr(dst.as<Vec>().q(), src);
}
break;
}
@@ -90,14 +93,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->str(src.as<Gp>().x(), dst);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->str(src.as<Vec>().s(), dst);
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->str(src.as<Vec>().d(), dst);
}
if (TypeUtils::isVec128(typeId))
if (TypeUtils::isVec128(typeId)) {
return emitter->str(src.as<Vec>().q(), dst);
}
break;
}
@@ -120,14 +126,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->mov(dst.as<Gp>().x(), src.as<Gp>().x());
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->fmov(dst.as<Vec>().s(), src.as<Vec>().s());
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->mov(dst.as<Vec>().b8(), src.as<Vec>().b8());
}
if (TypeUtils::isVec128(typeId))
if (TypeUtils::isVec128(typeId)) {
return emitter->mov(dst.as<Vec>().b16(), src.as<Vec>().b16());
}
break;
}
@@ -340,10 +349,12 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) {
mem.makePreIndex();
}
if (pair.ids[1] == BaseReg::kIdBad)
if (pair.ids[1] == BaseReg::kIdBad) {
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
else
}
else {
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
}
mem.resetOffsetMode();
@@ -422,10 +433,12 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
mem.makePostIndex();
}
if (pair.ids[1] == BaseReg::kIdBad)
if (pair.ids[1] == BaseReg::kIdBad) {
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
else
}
else {
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
}
mem.resetOffsetMode();
}

View File

@@ -25,18 +25,24 @@ static inline bool shouldTreatAsCDecl(CallConvId ccId) noexcept {
}
static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept {
if (typeId == TypeId::kFloat32)
if (typeId == TypeId::kFloat32) {
return RegType::kARM_VecS;
else if (typeId == TypeId::kFloat64)
}
else if (typeId == TypeId::kFloat64) {
return RegType::kARM_VecD;
else if (TypeUtils::isVec32(typeId))
}
else if (TypeUtils::isVec32(typeId)) {
return RegType::kARM_VecS;
else if (TypeUtils::isVec64(typeId))
}
else if (TypeUtils::isVec64(typeId)) {
return RegType::kARM_VecD;
else if (TypeUtils::isVec128(typeId))
}
else if (TypeUtils::isVec128(typeId)) {
return RegType::kARM_VecV;
else
}
else {
return RegType::kNone;
}
}
ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept {
@@ -116,8 +122,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
default: {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone)
if (regType == RegType::kNone) {
return DebugUtils::errored(kErrorInvalidRegType);
}
func._rets[valueIndex].initReg(regType, valueIndex, typeId);
break;
@@ -139,8 +146,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
if (TypeUtils::isInt(typeId)) {
uint32_t regId = BaseReg::kIdBad;
if (gpzPos < CallConv::kMaxRegArgsPerGroup)
if (gpzPos < CallConv::kMaxRegArgsPerGroup) {
regId = cc._passedOrder[RegGroup::kGp].id[gpzPos];
}
if (regId != BaseReg::kIdBad) {
RegType regType = typeId <= TypeId::kUInt32 ? RegType::kARM_GpW : RegType::kARM_GpX;
@@ -150,8 +158,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
}
else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize);
if (size >= 8)
if (size >= 8) {
stackOffset = Support::alignUp(stackOffset, 8);
}
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
}
@@ -161,13 +170,15 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) {
uint32_t regId = BaseReg::kIdBad;
if (vecPos < CallConv::kMaxRegArgsPerGroup)
if (vecPos < CallConv::kMaxRegArgsPerGroup) {
regId = cc._passedOrder[RegGroup::kVec].id[vecPos];
}
if (regId != BaseReg::kIdBad) {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone)
if (regType == RegType::kNone) {
return DebugUtils::errored(kErrorInvalidRegType);
}
arg.initTypeId(typeId);
arg.assignRegData(regType, regId);
@@ -176,8 +187,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
}
else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize);
if (size >= 8)
if (size >= 8) {
stackOffset = Support::alignUp(stackOffset, 8);
}
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
}

View File

@@ -802,7 +802,7 @@ namespace Predicate {
//! Address translate options (AT).
namespace AT {
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
}
@@ -860,7 +860,7 @@ namespace DB {
//! Data cache maintenance options.
namespace DC {
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
}
@@ -899,7 +899,7 @@ namespace DC {
//! Instruction cache maintenance options.
namespace IC {
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
}
@@ -953,7 +953,7 @@ namespace PSB {
}
namespace TLBI {
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
}
@@ -1052,7 +1052,7 @@ namespace TSB {
//! Processor state access through MSR.
namespace PState {
//! Encodes a pstate from `op0` and `op1`.
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op0, uint32_t op1) noexcept {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op0, uint32_t op1) noexcept {
return (op0 << 3) | (op1 << 0);
}
@@ -1081,17 +1081,17 @@ namespace SysReg {
};
//! Encodes a system register from `op0`, `op1`, `cRn`, `cRm`, and `op2` fields.
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op0, uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op0, uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op0 << 14) | (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
}
//! Encodes a system register from `fields`.
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(const Fields& fields) noexcept {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(const Fields& fields) noexcept {
return encode(fields.op0, fields.op1, fields.cRn, fields.cRm, fields.op2);
}
//! Decodes a system register to \ref Fields.
static ASMJIT_INLINE_NODEBUG constexpr Fields decode(uint32_t id) noexcept {
static ASMJIT_INLINE_CONSTEXPR Fields decode(uint32_t id) noexcept {
return Fields {
uint8_t((id >> 14) & 0x3u),
uint8_t((id >> 11) & 0x7u),

View File

@@ -23,8 +23,9 @@ namespace InstInternal {
#ifndef ASMJIT_NO_TEXT
Error instIdToString(InstId instId, InstStringifyOptions options, String& output) noexcept {
uint32_t realId = instId & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId)))
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) {
return DebugUtils::errored(kErrorInvalidInstruction);
}
return InstNameUtils::decode(InstDB::_instNameIndexTable[realId], options, InstDB::_instNameStringTable, output);
}
@@ -100,8 +101,9 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount
// Get the instruction data.
uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId)))
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) {
return DebugUtils::errored(kErrorInvalidInstruction);
}
out->_instFlags = InstRWFlags::kNone;
out->_opCount = uint8_t(opCount);
@@ -139,10 +141,12 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount
op._consecutiveLeadCount = 0;
if (srcOp.isReg()) {
if (i == 0)
if (i == 0) {
op._consecutiveLeadCount = uint8_t(opCount - 1);
else
}
else {
op.addOpFlags(OpRWFlags::kConsecutive);
}
}
else {
const Mem& memOp = srcOp.as<Mem>();

View File

@@ -47,9 +47,13 @@ struct InstInfo {
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; }
//! \}
@@ -57,6 +61,7 @@ struct InstInfo {
ASMJIT_VARAPI const InstInfo _instInfoTable[];
[[nodiscard]]
static inline const InstInfo& infoById(InstId instId) noexcept {
instId &= uint32_t(InstIdParts::kRealId);
ASMJIT_ASSERT(Inst::isDefinedId(instId));

View File

@@ -264,7 +264,7 @@ namespace EncodingData {
#define M_OPCODE(field, bits) \
uint32_t _##field : bits; \
ASMJIT_INLINE_NODEBUG constexpr uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); }
ASMJIT_INLINE_CONSTEXPR uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); }
struct BaseOp {
uint32_t opcode;
@@ -477,20 +477,20 @@ struct BaseAtomicCasp {
uint32_t xOffset : 5;
};
typedef BaseOp BaseBranchReg;
typedef BaseOp BaseBranchRel;
typedef BaseOp BaseBranchCmp;
typedef BaseOp BaseBranchTst;
typedef BaseOp BaseExtract;
typedef BaseOp BaseBfc;
typedef BaseOp BaseBfi;
typedef BaseOp BaseBfx;
typedef BaseOp BaseCCmp;
typedef BaseOp BaseCInc;
typedef BaseOp BaseCSet;
typedef BaseOp BaseCSel;
typedef BaseOp BaseMovKNZ;
typedef BaseOp BaseMull;
using BaseBranchReg = BaseOp;
using BaseBranchRel = BaseOp;
using BaseBranchCmp = BaseOp;
using BaseBranchTst = BaseOp;
using BaseExtract = BaseOp;
using BaseBfc = BaseOp;
using BaseBfi = BaseOp;
using BaseBfx = BaseOp;
using BaseCCmp = BaseOp;
using BaseCInc = BaseOp;
using BaseCSet = BaseOp;
using BaseCSel = BaseOp;
using BaseMovKNZ = BaseOp;
using BaseMull = BaseOp;
struct FSimdGeneric {
uint32_t _scalarOp : 28;
@@ -504,9 +504,9 @@ struct FSimdGeneric {
constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); }
};
typedef FSimdGeneric FSimdVV;
typedef FSimdGeneric FSimdVVV;
typedef FSimdGeneric FSimdVVVV;
using FSimdVV = FSimdGeneric;
using FSimdVVV = FSimdGeneric;
using FSimdVVVV = FSimdGeneric;
struct FSimdSV {
uint32_t opcode;

View File

@@ -49,17 +49,27 @@ public:
};
//! Test whether this register is ZR register.
ASMJIT_INLINE_NODEBUG constexpr bool isZR() const noexcept { return id() == kIdZr; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isZR() const noexcept { return id() == kIdZr; }
//! Test whether this register is SP register.
ASMJIT_INLINE_NODEBUG constexpr bool isSP() const noexcept { return id() == kIdSp; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isSP() const noexcept { return id() == kIdSp; }
//! Cast this register to a 32-bit W register (returns a new operand).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG GpW w() const noexcept;
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG GpW r32() const noexcept;
//! Cast this register to a 64-bit X register (returns a new operand).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG GpX x() const noexcept;
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG GpX r64() const noexcept;
};
@@ -118,117 +128,181 @@ public:
//! \endcond
//! Returns whether the register has element type or element index (or both).
ASMJIT_INLINE_NODEBUG constexpr bool hasElementTypeOrIndex() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>(); }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool hasElementTypeOrIndex() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>(); }
//! Returns whether the vector register has associated a vector element type.
ASMJIT_INLINE_NODEBUG constexpr bool hasElementType() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask>(); }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool hasElementType() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask>(); }
//! Returns vector element type of the register.
ASMJIT_INLINE_NODEBUG constexpr VecElementType elementType() const noexcept { return VecElementType(_signature.getField<kSignatureRegElementTypeMask>()); }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR VecElementType elementType() const noexcept { return VecElementType(_signature.getField<kSignatureRegElementTypeMask>()); }
//! Sets vector element type of the register to `elementType`.
ASMJIT_INLINE_NODEBUG void setElementType(VecElementType elementType) noexcept { _signature.setField<kSignatureRegElementTypeMask>(uint32_t(elementType)); }
//! Resets vector element type to none.
ASMJIT_INLINE_NODEBUG void resetElementType() noexcept { _signature.setField<kSignatureRegElementTypeMask>(0); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecB8() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB8() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementB);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecH4() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH4() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementH);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecS2() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecS2() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementS);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecD1() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecD1() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecB16() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB16() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecH8() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH8() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecS4() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecS4() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementS);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecD2() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecD2() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementD);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecB4x4() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB4x4() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB4);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecH2x4() const noexcept {
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH2x4() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH2);
}
//! Creates a cloned register with element access.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec at(uint32_t elementIndex) const noexcept {
return Vec((signature() & ~kSignatureRegElementIndexMask) | (elementIndex << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id());
}
//! Cast this register to an 8-bit B register (AArch64 only).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecB b() const noexcept;
//! Cast this register to a 16-bit H register (AArch64 only).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecH h() const noexcept;
//! Cast this register to a 32-bit S register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecS s() const noexcept;
//! Cast this register to a 64-bit D register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecD d() const noexcept;
//! Cast this register to a 128-bit Q register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV q() const noexcept;
//! Cast this register to a 128-bit V register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV v() const noexcept;
//! Casts this register to b (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v8() const noexcept;
//! Casts this register to h (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v16() const noexcept;
//! Casts this register to s (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v32() const noexcept;
//! Casts this register to d (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v64() const noexcept;
//! Casts this register to q (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v128() const noexcept;
//! Cast this register to a 128-bit V.B[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV b(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.H[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV h(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.S[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV s(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.D[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV d(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.H2[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV h2(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.B4[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV b4(uint32_t elementIndex) const noexcept;
//! Cast this register to V.8B.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecD b8() const noexcept;
//! Cast this register to V.16B.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV b16() const noexcept;
//! Cast this register to V.2H.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecS h2() const noexcept;
//! Cast this register to V.4H.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecD h4() const noexcept;
//! Cast this register to V.8H.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV h8() const noexcept;
//! Cast this register to V.2S.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecD s2() const noexcept;
//! Cast this register to V.4S.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV s4() const noexcept;
//! Cast this register to V.2D.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV d2() const noexcept;
static ASMJIT_INLINE_NODEBUG constexpr OperandSignature _makeElementAccessSignature(VecElementType elementType, uint32_t elementIndex) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR OperandSignature _makeElementAccessSignature(VecElementType elementType, uint32_t elementIndex) noexcept {
return OperandSignature{
uint32_t(RegTraits<RegType::kARM_VecV>::kSignature) |
uint32_t(kSignatureRegElementFlagMask) |
@@ -301,16 +375,24 @@ namespace regs {
#endif
//! Creates a 32-bit W register operand.
static ASMJIT_INLINE_NODEBUG constexpr GpW w(uint32_t id) noexcept { return GpW(id); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR GpW w(uint32_t id) noexcept { return GpW(id); }
//! Creates a 64-bit X register operand.
static ASMJIT_INLINE_NODEBUG constexpr GpX x(uint32_t id) noexcept { return GpX(id); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR GpX x(uint32_t id) noexcept { return GpX(id); }
//! Creates a 32-bit S register operand.
static ASMJIT_INLINE_NODEBUG constexpr VecS s(uint32_t id) noexcept { return VecS(id); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR VecS s(uint32_t id) noexcept { return VecS(id); }
//! Creates a 64-bit D register operand.
static ASMJIT_INLINE_NODEBUG constexpr VecD d(uint32_t id) noexcept { return VecD(id); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR VecD d(uint32_t id) noexcept { return VecD(id); }
//! Creates a 1282-bit V register operand.
static ASMJIT_INLINE_NODEBUG constexpr VecV v(uint32_t id) noexcept { return VecV(id); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR VecV v(uint32_t id) noexcept { return VecV(id); }
static constexpr GpW w0 = GpW(0);
static constexpr GpW w1 = GpW(1);
@@ -589,22 +671,36 @@ using namespace regs;
//! \{
//! Constructs a `UXTB #value` extend and shift (unsigned byte extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtb(uint32_t value) noexcept { return Shift(ShiftOp::kUXTB, value); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift uxtb(uint32_t value) noexcept { return Shift(ShiftOp::kUXTB, value); }
//! Constructs a `UXTH #value` extend and shift (unsigned hword extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxth(uint32_t value) noexcept { return Shift(ShiftOp::kUXTH, value); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift uxth(uint32_t value) noexcept { return Shift(ShiftOp::kUXTH, value); }
//! Constructs a `UXTW #value` extend and shift (unsigned word extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtw(uint32_t value) noexcept { return Shift(ShiftOp::kUXTW, value); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift uxtw(uint32_t value) noexcept { return Shift(ShiftOp::kUXTW, value); }
//! Constructs a `UXTX #value` extend and shift (unsigned dword extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtx(uint32_t value) noexcept { return Shift(ShiftOp::kUXTX, value); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift uxtx(uint32_t value) noexcept { return Shift(ShiftOp::kUXTX, value); }
//! Constructs a `SXTB #value` extend and shift (signed byte extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtb(uint32_t value) noexcept { return Shift(ShiftOp::kSXTB, value); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift sxtb(uint32_t value) noexcept { return Shift(ShiftOp::kSXTB, value); }
//! Constructs a `SXTH #value` extend and shift (signed hword extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxth(uint32_t value) noexcept { return Shift(ShiftOp::kSXTH, value); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift sxth(uint32_t value) noexcept { return Shift(ShiftOp::kSXTH, value); }
//! Constructs a `SXTW #value` extend and shift (signed word extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtw(uint32_t value) noexcept { return Shift(ShiftOp::kSXTW, value); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift sxtw(uint32_t value) noexcept { return Shift(ShiftOp::kSXTW, value); }
//! Constructs a `SXTX #value` extend and shift (signed dword extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtx(uint32_t value) noexcept { return Shift(ShiftOp::kSXTX, value); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift sxtx(uint32_t value) noexcept { return Shift(ShiftOp::kSXTX, value); }
//! \}
@@ -612,49 +708,57 @@ static ASMJIT_INLINE_NODEBUG constexpr Shift sxtx(uint32_t value) noexcept { ret
//! \{
//! Creates `[base, offset]` memory operand (offset mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, int32_t offset = 0) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset);
}
//! Creates `[base, offset]!` memory operand (pre-index mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex));
}
//! Creates `[base], offset` memory operand (post-index mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex));
}
//! Creates `[base, index]` memory operand (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Gp& index) noexcept {
return Mem(base, index);
}
//! Creates `[base, index]!` memory operand (pre-index mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_pre(const Gp& base, const Gp& index) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_pre(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex));
}
//! Creates `[base], index` memory operand (post-index mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_post(const Gp& base, const Gp& index) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_post(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex));
}
//! Creates `[base, index, SHIFT_OP #shift]` memory operand (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept {
return Mem(base, index, shift);
}
//! Creates `[base, offset]` memory operand (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Label& base, int32_t offset = 0) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Label& base, int32_t offset = 0) noexcept {
return Mem(base, offset);
}
// TODO: [ARM] PC + offset address.
#if 0
//! Creates `[PC + offset]` (relative) memory operand.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const PC& pc, int32_t offset = 0) noexcept {
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const PC& pc, int32_t offset = 0) noexcept {
return Mem(pc, offset);
}
#endif

View File

@@ -22,7 +22,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// ========================
// TODO: [ARM] These should be shared with all backends.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline uint64_t raImmMaskFromSize(uint32_t size) noexcept {
ASMJIT_ASSERT(size > 0 && size < 256);
static const uint64_t masks[] = {
@@ -47,6 +47,7 @@ static const RegMask raConsecutiveLeadCountToRegMaskFilter[5] = {
0x1FFFFFFFu // [4] 4 consecutive registers.
};
[[nodiscard]]
static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept {
static constexpr RATiedFlags map[] = {
RATiedFlags::kNone,
@@ -58,15 +59,18 @@ static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept {
return map[uint32_t(rwFlags & OpRWFlags::kRW)];
}
[[nodiscard]]
static inline RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept {
return raUseOutFlagsFromRWFlags(flags);
}
[[nodiscard]]
static inline RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemBaseRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
}
[[nodiscard]]
static inline RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemIndexRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
@@ -82,18 +86,31 @@ public:
: RACFGBuilderT<RACFGBuilder>(pass),
_arch(pass->cc()->arch()) {}
[[nodiscard]]
inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cc); }
[[nodiscard]]
Error onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept;
[[nodiscard]]
Error onBeforeInvoke(InvokeNode* invokeNode) noexcept;
[[nodiscard]]
Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept;
[[nodiscard]]
Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept;
[[nodiscard]]
Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept;
[[nodiscard]]
Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept;
[[nodiscard]]
Error onBeforeRet(FuncRetNode* funcRet) noexcept;
[[nodiscard]]
Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept;
};
@@ -105,20 +122,26 @@ static InstControlFlow getControlFlowType(InstId instId) noexcept {
switch (BaseInst::extractRealId(instId)) {
case Inst::kIdB:
case Inst::kIdBr:
if (BaseInst::extractARMCondCode(instId) == CondCode::kAL)
if (BaseInst::extractARMCondCode(instId) == CondCode::kAL) {
return InstControlFlow::kJump;
else
}
else {
return InstControlFlow::kBranch;
}
case Inst::kIdBl:
case Inst::kIdBlr:
return InstControlFlow::kCall;
case Inst::kIdCbz:
case Inst::kIdCbnz:
case Inst::kIdTbz:
case Inst::kIdTbnz:
return InstControlFlow::kBranch;
case Inst::kIdRet:
return InstControlFlow::kReturn;
default:
return InstControlFlow::kRegular;
}
@@ -180,12 +203,14 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
if (opRwInfo.consecutiveLeadCount()) {
// There must be a single consecutive register lead, otherwise the RW data is invalid.
if (consecutiveOffset != 0xFFFFFFFFu)
if (consecutiveOffset != 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState);
}
// A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct.
if (RATiedReg::consecutiveDataFromFlags(flags) != 0)
if (RATiedReg::consecutiveDataFromFlags(flags) != 0) {
return DebugUtils::errored(kErrorNotConsecutiveRegs);
}
flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1);
consecutiveOffset = 0;
@@ -208,8 +233,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
flags |= RATiedFlags::kUseFixed;
}
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu)
if (consecutiveOffset == 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState);
}
flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
}
}
@@ -220,8 +246,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
flags |= RATiedFlags::kOutFixed;
}
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu)
if (consecutiveOffset == 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState);
}
flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
}
}
@@ -231,19 +258,23 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
// Only the first 0..15 registers can be used if the register uses
// element accessor that accesses half-words (h[0..7] elements).
if (instInfo.hasFlag(InstDB::kInstFlagVH0_15) && reg.as<Vec>().elementType() == VecElementType::kH) {
if (Support::test(flags, RATiedFlags::kUse))
if (Support::test(flags, RATiedFlags::kUse)) {
useId &= 0x0000FFFFu;
else
}
else {
outId &= 0x0000FFFFu;
}
}
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent));
if (singleRegOps == i)
if (singleRegOps == i) {
singleRegOps++;
}
if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive))
if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive)) {
consecutiveParent = workReg->workId();
}
}
}
else if (op.isMem()) {
@@ -254,7 +285,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
if (mem.isRegHome()) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg));
_pass->getOrCreateStackSlot(workReg);
if (ASMJIT_UNLIKELY(!_pass->getOrCreateStackSlot(workReg))) {
return DebugUtils::errored(kErrorOutOfMemory);
}
}
else if (mem.hasBaseReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.baseId());
@@ -273,10 +306,12 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse))
if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
else
}
else {
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
}
@@ -299,10 +334,12 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse))
if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
else
}
else {
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
}
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
}
@@ -374,8 +411,9 @@ Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
if (fd.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& ret = fd.ret(valueIndex);
if (!ret)
if (!ret) {
break;
}
const Operand& op = invokeNode->ret(valueIndex);
if (op.isReg()) {
@@ -411,14 +449,16 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
const FuncValuePack& argPack = fd.argPack(argIndex);
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
if (!argPack[valueIndex])
if (!argPack[valueIndex]) {
continue;
}
const FuncValue& arg = argPack[valueIndex];
const Operand& op = invokeNode->arg(argIndex, valueIndex);
if (op.isNone())
if (op.isNone()) {
continue;
}
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
@@ -427,8 +467,9 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept
if (arg.isIndirect()) {
RegGroup regGroup = workReg->group();
if (regGroup != RegGroup::kGp)
if (regGroup != RegGroup::kGp) {
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
}
else if (arg.isReg()) {
@@ -445,8 +486,9 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept
for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) {
const FuncValue& ret = fd.ret(retIndex);
if (!ret)
if (!ret) {
break;
}
const Operand& op = invokeNode->ret(retIndex);
if (op.isReg()) {
@@ -525,11 +567,13 @@ Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& a
DebugUtils::unused(invokeNode);
Mem stackPtr = ptr(_pass->_sp.as<Gp>(), arg.stackOffset());
if (reg.isGp())
if (reg.isGp()) {
return cc()->str(reg.as<Gp>(), stackPtr);
}
if (reg.isVec())
if (reg.isVec()) {
return cc()->str(reg.as<Vec>(), stackPtr);
}
return DebugUtils::errored(kErrorInvalidState);
}
@@ -549,11 +593,14 @@ Error RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept {
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
if (op.isNone()) continue;
if (op.isNone()) {
continue;
}
const FuncValue& ret = funcDetail.ret(i);
if (ASMJIT_UNLIKELY(!ret.isReg()))
if (ASMJIT_UNLIKELY(!ret.isReg())) {
return DebugUtils::errored(kErrorInvalidAssignment);
}
if (op.isReg()) {
// Register return value.
@@ -614,8 +661,9 @@ void ARMRAPass::onInit() noexcept {
// Apple ABI requires that the frame-pointer register is not changed by leaf functions and properly updated
// by non-leaf functions. So, let's make this register unavailable as it's just not safe to update it.
if (hasFP || cc()->environment().isDarwin())
if (hasFP || cc()->environment().isDarwin()) {
makeUnavailable(RegGroup::kGp, Gp::kIdFp);
}
makeUnavailable(RegGroup::kGp, Gp::kIdSp);
makeUnavailable(RegGroup::kGp, Gp::kIdOs); // OS-specific use, usually TLS.
@@ -663,13 +711,17 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no
Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask());
uint32_t useId = tiedReg->useId();
while (useIt.hasNext())
while (useIt.hasNext()) {
inst->rewriteIdAtIndex(useIt.next(), useId);
}
Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask());
uint32_t outId = tiedReg->outId();
while (outIt.hasNext())
while (outIt.hasNext()) {
inst->rewriteIdAtIndex(outIt.next(), outId);
}
}
// This data is allocated by Zone passed to `runOnFunction()`, which
@@ -703,8 +755,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no
BaseMem& mem = op.as<BaseMem>();
if (mem.isRegHome()) {
uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId());
if (ASMJIT_UNLIKELY(virtIndex >= virtCount))
if (ASMJIT_UNLIKELY(virtIndex >= virtCount)) {
return DebugUtils::errored(kErrorInvalidVirtId);
}
VirtReg* virtReg = cc()->virtRegByIndex(virtIndex);
RAWorkReg* workReg = virtReg->workReg();
@@ -730,8 +783,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no
inst->setOp(1, Imm(offset));
}
else {
if (mem.hasIndex())
if (mem.hasIndex()) {
return DebugUtils::errored(kErrorInvalidAddressIndex);
}
GpX dst(inst->op(0).as<Gp>().id());
GpX base(mem.baseId());
@@ -775,8 +829,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no
// ================================
Error ARMRAPass::updateStackFrame() noexcept {
if (_func->frame().hasFuncCalls())
if (_func->frame().hasFuncCalls()) {
_func->frame().addDirtyRegs(RegGroup::kGp, Support::bitMask(Gp::kIdLr));
}
return BaseRAPass::updateStackFrame();
}

View File

@@ -29,10 +29,15 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
class ARMRAPass : public BaseRAPass {
public:
ASMJIT_NONCOPYABLE(ARMRAPass)
typedef BaseRAPass Base;
using Base = BaseRAPass;
//! \name Members
//! \{
EmitHelper _emitHelper;
//! \}
//! \name Construction & Destruction
//! \{
@@ -45,9 +50,11 @@ public:
//! \{
//! Returns the compiler casted to `arm::Compiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); }
//! Returns emit helper.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitHelper* emitHelper() noexcept { return &_emitHelper; }
//! \}

View File

@@ -334,10 +334,12 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
ASMJIT_ASSERT(vReg != nullptr);
const char* name = vReg->name();
if (name && name[0] != '\0')
if (name && name[0] != '\0') {
ASMJIT_PROPAGATE(sb.append(name));
else
}
else {
ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(rId))));
}
virtRegFormatted = true;
}
@@ -356,19 +358,22 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
case RegType::kARM_VecD:
case RegType::kARM_VecV:
letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kARM_VecB)];
if (elementType)
if (elementType) {
letter = 'v';
}
break;
case RegType::kARM_GpW:
if (Environment::is64Bit(arch)) {
letter = 'w';
if (rId == a64::Gp::kIdZr)
if (rId == a64::Gp::kIdZr) {
return sb.append("wzr", 3);
}
if (rId == a64::Gp::kIdSp)
if (rId == a64::Gp::kIdSp) {
return sb.append("wsp", 3);
}
}
else {
letter = 'r';
@@ -377,17 +382,20 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
case RegType::kARM_GpX:
if (Environment::is64Bit(arch)) {
if (rId == a64::Gp::kIdZr)
if (rId == a64::Gp::kIdZr) {
return sb.append("xzr", 3);
if (rId == a64::Gp::kIdSp)
}
if (rId == a64::Gp::kIdSp) {
return sb.append("sp", 2);
}
letter = 'x';
break;
}
// X registers are undefined in 32-bit mode.
ASMJIT_FALLTHROUGH;
[[fallthrough]];
default:
ASMJIT_PROPAGATE(sb.appendFormat("<Reg-%u>?%u", uint32_t(regType), rId));
@@ -445,8 +453,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegisterList(
count++;
} while (rMask & mask);
if (!first)
if (!first) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, regType, start, 0, 0xFFFFFFFFu));
if (count >= 2u) {
@@ -477,8 +486,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
uint32_t elementType = op._signature.getField<BaseVec::kSignatureRegElementTypeMask>();
uint32_t elementIndex = op.as<BaseVec>().elementIndex();
if (!op.as<BaseVec>().hasElementIndex())
if (!op.as<BaseVec>().hasElementIndex()) {
elementIndex = 0xFFFFFFFFu;
}
return formatRegister(sb, flags, emitter, arch, reg.type(), reg.id(), elementType, elementIndex);
}
@@ -524,8 +534,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
int64_t off = int64_t(m.offset());
uint32_t base = 10;
if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9)
if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9) {
base = 16;
}
if (base == 10) {
ASMJIT_PROPAGATE(sb.appendInt(off, base));
@@ -538,16 +549,19 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
if (m.hasShift()) {
ASMJIT_PROPAGATE(sb.append(' '));
if (!m.isPreOrPost())
if (!m.isPreOrPost()) {
ASMJIT_PROPAGATE(formatShiftOp(sb, m.shiftOp()));
}
ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift()));
}
if (!m.isPostIndex())
if (!m.isPostIndex()) {
ASMJIT_PROPAGATE(sb.append(']'));
}
if (m.isPreIndex())
if (m.isPreIndex()) {
ASMJIT_PROPAGATE(sb.append('!'));
}
return kErrorOk;
}

View File

@@ -47,40 +47,40 @@ public:
ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg)
//! Gets whether the register is either `R` or `W` register (32-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isGpR() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isGpR() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
//! Gets whether the register is either `R` or `W` register (32-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isGpW() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isGpW() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
//! Gets whether the register is an `X` register (64-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isGpX() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpX>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isGpX() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpX>::kSignature; }
//! Gets whether the register is a VEC-B register (8-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecB() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVecB() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
//! Gets whether the register is a VEC-H register (16-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecH() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVecH() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
//! Gets whether the register is a VEC-S register (32-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecS() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVecS() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
//! Gets whether the register is a VEC-D register (64-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecD() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVecD() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
//! Gets whether the register is a VEC-Q register (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecQ() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVecQ() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
//! Gets whether the register is either VEC-D (64-bit) or VEC-Q (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; }
ASMJIT_INLINE_CONSTEXPR bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; }
//! Gets whether the register is a VEC-V register (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecV() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVecV() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
//! Gets whether the register is an 8-bit vector register or view, alias if \ref isVecB().
ASMJIT_INLINE_NODEBUG constexpr bool isVec8() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVec8() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
//! Gets whether the register is a 16-bit vector register or view, alias if \ref isVecH().
ASMJIT_INLINE_NODEBUG constexpr bool isVec16() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVec16() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
//! Gets whether the register is a 32-bit vector register or view, alias if \ref isVecS().
ASMJIT_INLINE_NODEBUG constexpr bool isVec32() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVec32() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
//! Gets whether the register is a 64-bit vector register or view, alias if \ref isVecD().
ASMJIT_INLINE_NODEBUG constexpr bool isVec64() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVec64() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
//! Gets whether the register is a 128-bit vector register or view, alias if \ref isVecQ().
ASMJIT_INLINE_NODEBUG constexpr bool isVec128() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
ASMJIT_INLINE_CONSTEXPR bool isVec128() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
template<RegType kRegType>
ASMJIT_INLINE_NODEBUG void setRegT(uint32_t id) noexcept {
ASMJIT_INLINE_CONSTEXPR void setRegT(uint32_t id) noexcept {
setSignature(RegTraits<kRegType>::kSignature);
setId(id);
}
@@ -95,13 +95,13 @@ public:
static ASMJIT_INLINE_NODEBUG OperandSignature signatureOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToSignature(type); }
template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG RegGroup groupOfT() noexcept { return RegTraits<kRegType>::kGroup; }
static ASMJIT_INLINE_CONSTEXPR RegGroup groupOfT() noexcept { return RegTraits<kRegType>::kGroup; }
template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG TypeId typeIdOfT() noexcept { return RegTraits<kRegType>::kTypeId; }
static ASMJIT_INLINE_CONSTEXPR TypeId typeIdOfT() noexcept { return RegTraits<kRegType>::kTypeId; }
template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG OperandSignature signatureOfT() noexcept { return OperandSignature{RegTraits<kRegType>::kSignature}; }
static ASMJIT_INLINE_CONSTEXPR OperandSignature signatureOfT() noexcept { return OperandSignature{RegTraits<kRegType>::kSignature}; }
static ASMJIT_INLINE_NODEBUG bool isGpW(const Operand_& op) noexcept { return op.as<Reg>().isGpW(); }
static ASMJIT_INLINE_NODEBUG bool isGpX(const Operand_& op) noexcept { return op.as<Reg>().isGpX(); }
@@ -146,16 +146,16 @@ public:
};
//! Returns whether the register has element index (it's an element index access).
ASMJIT_INLINE_NODEBUG constexpr bool hasElementIndex() const noexcept { return _signature.hasField<kSignatureRegElementFlagMask>(); }
ASMJIT_INLINE_CONSTEXPR bool hasElementIndex() const noexcept { return _signature.hasField<kSignatureRegElementFlagMask>(); }
//! Returns element index of the register.
ASMJIT_INLINE_NODEBUG constexpr uint32_t elementIndex() const noexcept { return _signature.getField<kSignatureRegElementIndexMask>(); }
ASMJIT_INLINE_CONSTEXPR uint32_t elementIndex() const noexcept { return _signature.getField<kSignatureRegElementIndexMask>(); }
//! Sets element index of the register to `elementType`.
ASMJIT_INLINE_NODEBUG void setElementIndex(uint32_t elementIndex) noexcept {
ASMJIT_INLINE_CONSTEXPR void setElementIndex(uint32_t elementIndex) noexcept {
_signature |= kSignatureRegElementFlagMask;
_signature.setField<kSignatureRegElementIndexMask>(elementIndex);
}
//! Resets element index of the register.
ASMJIT_INLINE_NODEBUG void resetElementIndex() noexcept {
ASMJIT_INLINE_CONSTEXPR void resetElementIndex() noexcept {
_signature &= ~(kSignatureRegElementFlagMask | kSignatureRegElementIndexMask);
}
};
@@ -187,35 +187,35 @@ public:
//! \{
//! Construct a default `Mem` operand, that points to [0].
ASMJIT_INLINE_NODEBUG constexpr Mem() noexcept
ASMJIT_INLINE_CONSTEXPR Mem() noexcept
: BaseMem() {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const Mem& other) noexcept
ASMJIT_INLINE_CONSTEXPR Mem(const Mem& other) noexcept
: BaseMem(other) {}
ASMJIT_INLINE_NODEBUG explicit Mem(Globals::NoInit_) noexcept
: BaseMem(Globals::NoInit) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept
ASMJIT_INLINE_CONSTEXPR Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept
: BaseMem(signature, baseId, indexId, offset) {}
ASMJIT_INLINE_NODEBUG constexpr explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
ASMJIT_INLINE_CONSTEXPR explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(RegType::kLabelTag) |
signature, base.id(), 0, off) {}
ASMJIT_INLINE_NODEBUG constexpr explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
ASMJIT_INLINE_CONSTEXPR explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
signature, base.id(), 0, off) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept
ASMJIT_INLINE_CONSTEXPR Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
Signature::fromMemIndexType(index.type()) |
signature, base.id(), index.id(), 0) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept
ASMJIT_INLINE_CONSTEXPR Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
Signature::fromMemIndexType(index.type()) |
@@ -223,7 +223,7 @@ public:
Signature::fromValue<kSignatureMemShiftValueMask>(shift.value()) |
signature, base.id(), index.id(), 0) {}
ASMJIT_INLINE_NODEBUG constexpr explicit Mem(uint64_t base, Signature signature = Signature{0}) noexcept
ASMJIT_INLINE_CONSTEXPR explicit Mem(uint64_t base, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {}
@@ -232,7 +232,10 @@ public:
//! \name Overloaded Operators
//! \{
ASMJIT_INLINE_NODEBUG Mem& operator=(const Mem& other) noexcept = default;
ASMJIT_INLINE_CONSTEXPR Mem& operator=(const Mem& other) noexcept {
copyFrom(other);
return *this;
}
//! \}
@@ -240,24 +243,24 @@ public:
//! \{
//! Clones the memory operand.
ASMJIT_INLINE_NODEBUG constexpr Mem clone() const noexcept { return Mem(*this); }
ASMJIT_INLINE_CONSTEXPR Mem clone() const noexcept { return Mem(*this); }
//! Gets new memory operand adjusted by `off`.
ASMJIT_INLINE_NODEBUG Mem cloneAdjusted(int64_t off) const noexcept {
ASMJIT_INLINE_CONSTEXPR Mem cloneAdjusted(int64_t off) const noexcept {
Mem result(*this);
result.addOffset(off);
return result;
}
//! Clones the memory operand and makes it pre-index.
ASMJIT_INLINE_NODEBUG Mem pre() const noexcept {
ASMJIT_INLINE_CONSTEXPR Mem pre() const noexcept {
Mem result(*this);
result.setOffsetMode(OffsetMode::kPreIndex);
return result;
}
//! Clones the memory operand, applies a given offset `off` and makes it pre-index.
ASMJIT_INLINE_NODEBUG Mem pre(int64_t off) const noexcept {
ASMJIT_INLINE_CONSTEXPR Mem pre(int64_t off) const noexcept {
Mem result(*this);
result.setOffsetMode(OffsetMode::kPreIndex);
result.addOffset(off);
@@ -265,14 +268,14 @@ public:
}
//! Clones the memory operand and makes it post-index.
ASMJIT_INLINE_NODEBUG Mem post() const noexcept {
ASMJIT_INLINE_CONSTEXPR Mem post() const noexcept {
Mem result(*this);
result.setOffsetMode(OffsetMode::kPostIndex);
return result;
}
//! Clones the memory operand, applies a given offset `off` and makes it post-index.
ASMJIT_INLINE_NODEBUG Mem post(int64_t off) const noexcept {
ASMJIT_INLINE_CONSTEXPR Mem post(int64_t off) const noexcept {
Mem result(*this);
result.setOffsetMode(OffsetMode::kPostIndex);
result.addOffset(off);
@@ -296,12 +299,12 @@ public:
using BaseMem::setIndex;
ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index, uint32_t shift) noexcept {
ASMJIT_INLINE_CONSTEXPR void setIndex(const BaseReg& index, uint32_t shift) noexcept {
setIndex(index);
setShift(shift);
}
ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index, Shift shift) noexcept {
ASMJIT_INLINE_CONSTEXPR void setIndex(const BaseReg& index, Shift shift) noexcept {
setIndex(index);
setShift(shift);
}
@@ -312,48 +315,48 @@ public:
//! \{
//! Gets offset mode.
ASMJIT_INLINE_NODEBUG constexpr OffsetMode offsetMode() const noexcept { return OffsetMode(_signature.getField<kSignatureMemOffsetModeMask>()); }
ASMJIT_INLINE_CONSTEXPR OffsetMode offsetMode() const noexcept { return OffsetMode(_signature.getField<kSignatureMemOffsetModeMask>()); }
//! Sets offset mode to `mode`.
ASMJIT_INLINE_NODEBUG void setOffsetMode(OffsetMode mode) noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(mode)); }
ASMJIT_INLINE_CONSTEXPR void setOffsetMode(OffsetMode mode) noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(mode)); }
//! Resets offset mode to default (fixed offset, without write-back).
ASMJIT_INLINE_NODEBUG void resetOffsetMode() noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(OffsetMode::kFixed)); }
ASMJIT_INLINE_CONSTEXPR void resetOffsetMode() noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(OffsetMode::kFixed)); }
//! Tests whether the current memory offset mode is fixed (see \ref OffsetMode::kFixed).
ASMJIT_INLINE_NODEBUG constexpr bool isFixedOffset() const noexcept { return offsetMode() == OffsetMode::kFixed; }
ASMJIT_INLINE_CONSTEXPR bool isFixedOffset() const noexcept { return offsetMode() == OffsetMode::kFixed; }
//! Tests whether the current memory offset mode is either pre-index or post-index (write-back is used).
ASMJIT_INLINE_NODEBUG constexpr bool isPreOrPost() const noexcept { return offsetMode() != OffsetMode::kFixed; }
ASMJIT_INLINE_CONSTEXPR bool isPreOrPost() const noexcept { return offsetMode() != OffsetMode::kFixed; }
//! Tests whether the current memory offset mode is pre-index (write-back is used).
ASMJIT_INLINE_NODEBUG constexpr bool isPreIndex() const noexcept { return offsetMode() == OffsetMode::kPreIndex; }
ASMJIT_INLINE_CONSTEXPR bool isPreIndex() const noexcept { return offsetMode() == OffsetMode::kPreIndex; }
//! Tests whether the current memory offset mode is post-index (write-back is used).
ASMJIT_INLINE_NODEBUG constexpr bool isPostIndex() const noexcept { return offsetMode() == OffsetMode::kPostIndex; }
ASMJIT_INLINE_CONSTEXPR bool isPostIndex() const noexcept { return offsetMode() == OffsetMode::kPostIndex; }
//! Sets offset mode of this memory operand to pre-index (write-back is used).
ASMJIT_INLINE_NODEBUG void makePreIndex() noexcept { setOffsetMode(OffsetMode::kPreIndex); }
ASMJIT_INLINE_CONSTEXPR void makePreIndex() noexcept { setOffsetMode(OffsetMode::kPreIndex); }
//! Sets offset mode of this memory operand to post-index (write-back is used).
ASMJIT_INLINE_NODEBUG void makePostIndex() noexcept { setOffsetMode(OffsetMode::kPostIndex); }
ASMJIT_INLINE_CONSTEXPR void makePostIndex() noexcept { setOffsetMode(OffsetMode::kPostIndex); }
//! Gets shift operation that is used by index register.
ASMJIT_INLINE_NODEBUG constexpr ShiftOp shiftOp() const noexcept { return ShiftOp(_signature.getField<kSignatureMemShiftOpMask>()); }
ASMJIT_INLINE_CONSTEXPR ShiftOp shiftOp() const noexcept { return ShiftOp(_signature.getField<kSignatureMemShiftOpMask>()); }
//! Sets shift operation that is used by index register.
ASMJIT_INLINE_NODEBUG void setShiftOp(ShiftOp sop) noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(sop)); }
ASMJIT_INLINE_CONSTEXPR void setShiftOp(ShiftOp sop) noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(sop)); }
//! Resets shift operation that is used by index register to LSL (default value).
ASMJIT_INLINE_NODEBUG void resetShiftOp() noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(ShiftOp::kLSL)); }
ASMJIT_INLINE_CONSTEXPR void resetShiftOp() noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(ShiftOp::kLSL)); }
//! Gets whether the memory operand has shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG constexpr bool hasShift() const noexcept { return _signature.hasField<kSignatureMemShiftValueMask>(); }
ASMJIT_INLINE_CONSTEXPR bool hasShift() const noexcept { return _signature.hasField<kSignatureMemShiftValueMask>(); }
//! Gets the memory operand's shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG constexpr uint32_t shift() const noexcept { return _signature.getField<kSignatureMemShiftValueMask>(); }
ASMJIT_INLINE_CONSTEXPR uint32_t shift() const noexcept { return _signature.getField<kSignatureMemShiftValueMask>(); }
//! Sets the memory operand's shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG void setShift(uint32_t shift) noexcept { _signature.setField<kSignatureMemShiftValueMask>(shift); }
ASMJIT_INLINE_CONSTEXPR void setShift(uint32_t shift) noexcept { _signature.setField<kSignatureMemShiftValueMask>(shift); }
//! Sets the memory operand's shift and shift operation.
ASMJIT_INLINE_NODEBUG void setShift(Shift shift) noexcept {
ASMJIT_INLINE_CONSTEXPR void setShift(Shift shift) noexcept {
_signature.setField<kSignatureMemShiftOpMask>(uint32_t(shift.op()));
_signature.setField<kSignatureMemShiftValueMask>(shift.value());
}
//! Resets the memory operand's shift (aka scale) constant to zero.
ASMJIT_INLINE_NODEBUG void resetShift() noexcept { _signature.setField<kSignatureMemShiftValueMask>(0); }
ASMJIT_INLINE_CONSTEXPR void resetShift() noexcept { _signature.setField<kSignatureMemShiftValueMask>(0); }
//! \}
};
@@ -362,17 +365,17 @@ public:
//! \{
//! Constructs a `LSL #value` shift (logical shift left).
static ASMJIT_INLINE_NODEBUG constexpr Shift lsl(uint32_t value) noexcept { return Shift(ShiftOp::kLSL, value); }
static ASMJIT_INLINE_CONSTEXPR Shift lsl(uint32_t value) noexcept { return Shift(ShiftOp::kLSL, value); }
//! Constructs a `LSR #value` shift (logical shift right).
static ASMJIT_INLINE_NODEBUG constexpr Shift lsr(uint32_t value) noexcept { return Shift(ShiftOp::kLSR, value); }
static ASMJIT_INLINE_CONSTEXPR Shift lsr(uint32_t value) noexcept { return Shift(ShiftOp::kLSR, value); }
//! Constructs a `ASR #value` shift (arithmetic shift right).
static ASMJIT_INLINE_NODEBUG constexpr Shift asr(uint32_t value) noexcept { return Shift(ShiftOp::kASR, value); }
static ASMJIT_INLINE_CONSTEXPR Shift asr(uint32_t value) noexcept { return Shift(ShiftOp::kASR, value); }
//! Constructs a `ROR #value` shift (rotate right).
static ASMJIT_INLINE_NODEBUG constexpr Shift ror(uint32_t value) noexcept { return Shift(ShiftOp::kROR, value); }
static ASMJIT_INLINE_CONSTEXPR Shift ror(uint32_t value) noexcept { return Shift(ShiftOp::kROR, value); }
//! Constructs a `RRX` shift (rotate with carry by 1).
static ASMJIT_INLINE_NODEBUG constexpr Shift rrx() noexcept { return Shift(ShiftOp::kRRX, 0); }
static ASMJIT_INLINE_CONSTEXPR Shift rrx() noexcept { return Shift(ShiftOp::kRRX, 0); }
//! Constructs a `MSL #value` shift (logical shift left filling ones).
static ASMJIT_INLINE_NODEBUG constexpr Shift msl(uint32_t value) noexcept { return Shift(ShiftOp::kMSL, value); }
static ASMJIT_INLINE_CONSTEXPR Shift msl(uint32_t value) noexcept { return Shift(ShiftOp::kMSL, value); }
//! \}
@@ -385,7 +388,7 @@ static ASMJIT_INLINE_NODEBUG constexpr Shift msl(uint32_t value) noexcept { retu
//! Absolute memory operands can only be used if it's known that the PC relative offset is encodable and that it
//! would be within the limits. Absolute address is also often output from disassemblers, so AsmJit supports it to
//! make it possible to assemble such output back.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(uint64_t base) noexcept { return Mem(base); }
static ASMJIT_INLINE_CONSTEXPR Mem ptr(uint64_t base) noexcept { return Mem(base); }
//! \}

View File

@@ -18,7 +18,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(arm)
namespace Utils {
//! Encodes a 12-bit immediate part of opcode that ise used by a standard 32-bit ARM encoding.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline bool encodeAArch32Imm(uint64_t imm, uint32_t* encodedImmOut) noexcept {
if (imm & 0xFFFFFFFF00000000u)
return false;
@@ -73,7 +73,7 @@ struct LogicalImm {
//! | 0 | 11110s | .....r | 2 |
//! +---+--------+--------+------+
//! ```
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noexcept {
// Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits.
do {
@@ -121,7 +121,7 @@ static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noex
//! Returns true if the given `imm` value is encodable as a logical immediate. The `width` argument describes the
//! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate
//! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noexcept {
LogicalImm dummy;
return encodeLogicalImm(imm, width, &dummy);
@@ -129,7 +129,7 @@ static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noe
//! Returns true if the given `imm` value is encodable as an immediate with `add` and `sub` instructions on AArch64.
//! These two instructions can encode 12-bit immediate value optionally shifted left by 12 bits.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_INLINE_NODEBUG bool isAddSubImm(uint64_t imm) noexcept {
return imm <= 0xFFFu || (imm & ~uint64_t(0xFFFu << 12)) == 0;
}
@@ -153,7 +153,7 @@ static ASMJIT_INLINE_NODEBUG uint32_t encodeImm64ByteMaskToImm8(uint64_t imm) no
//! \cond
//! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8.
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static ASMJIT_FORCE_INLINE bool isFPImm8Generic(T val) noexcept {
static ASMJIT_INLINE bool isFPImm8Generic(T val) noexcept {
constexpr uint32_t kAllBsMask = Support::lsbMask<uint32_t>(kNumBBits);
constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1);
constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern;

View File

@@ -24,6 +24,8 @@
#ifndef ASMJIT_ASMJIT_H_INCLUDED
#define ASMJIT_ASMJIT_H_INCLUDED
#pragma message("asmjit/asmjit.h is deprecated! Please use asmjit/core.h, asmjit/x86.h, or asmjit/a64.h")
#include "./core.h"
#ifndef ASMJIT_NO_X86

View File

@@ -102,21 +102,21 @@ namespace asmjit {
//!
//! - Requirements:
//!
//! - AsmJit won't build without C++11 enabled. If you use older GCC or Clang you would have to enable at least
//! C++11 standard through compiler flags.
//! - AsmJit won't build without C++17 enabled. If you use older GCC or Clang you would have to enable at least
//! C++17 standard through compiler flags.
//!
//! - Tested:
//!
//! - **Clang** - Tested by GitHub Actions - Clang 10+ is officially supported and tested by CI, older Clang versions
//! having C++11 should work, but are not tested anymore due to upgraded CI images.
//! having C++17 should work, but these versions are not tested anymore due to upgraded CI images.
//!
//! - **GNU** - Tested by GitHub Actions - GCC 7+ is officially supported, older GCC versions from 4.8+ having C++11
//! enabled should also work, but are not tested anymore due to upgraded CI images.
//! - **GNU** - Tested by GitHub Actions - GCC 9+ is officially supported and tested by CI, older GCC versions such
//! as GCC 7 should work, but these versions are not tested anymore due to upgraded CI images.
//!
//! - **MINGW** - Reported to work, but not tested in our CI environment (help welcome).
//! - **MINGW** - Reported to work, but not tested in our CI environment (help welcome!).
//!
//! - **MSVC** - Tested by GitHub Actions - VS2019+ is officially supported, VS2015 and VS2017 is reported to work,
//! but not tested by CI anymore.
//! - **MSVC** - Tested by GitHub Actions - VS2019 and onwards are officially supported and tested by CI, VS2015 and
//! VS2017 are not tested anymore due to upgraded CI images.
//!
//! ### Supported Operating Systems and Platforms
//!
@@ -173,7 +173,7 @@ namespace asmjit {
//! cmake_minimum_required(VERSION 3.30)
//!
//! project(asmjit_consumer C CXX) # Both C and CXX are required.
//! set(CMAKE_CXX_STANDARD 17) # C++11 and never is supported.
//! set(CMAKE_CXX_STANDARD 17) # C++17 and never is supported.
//!
//! set(ASMJIT_DIR "3rdparty/asmjit") # Location of AsmJit.
//! set(ASMJIT_STATIC TRUE) # Force static build.
@@ -201,12 +201,12 @@ namespace asmjit {
//!
//! \section build_backends AsmJit Backends
//!
//! AsmJit currently supports only X86/X64 backend, but the plan is to add more backends in the future. By default
//! AsmJit builds only the host backend, which is auto-detected at compile-time, but this can be overridden.
//! All backends AsmJit supports are included by default. To exclude a backend use the following build-type macros:
//!
//! - \ref ASMJIT_NO_X86 - Disables both X86 and X86_64 backends.
//! - \ref ASMJIT_NO_AARCH64 - Disables AArch64 backend.
//! - \ref ASMJIT_NO_FOREIGN - Disables the support for foreign architecture backends, only keeps a native backend.
//! For example if your target is X86, `ASMJIT_NO_FOREIGN` would disable every backend but X86.
//!
//! \section build_options Build Options
//!
@@ -268,14 +268,24 @@ namespace asmjit {
//! - Visit our [Public Gitter Chat](https://app.gitter.im/#/room/#asmjit:gitter.im) if you need a quick help.
//!
//! - Build AsmJit with `ASMJIT_NO_DEPRECATED` macro defined to make sure that you are not using deprecated
//! functionality at all. Deprecated functions are decorated with `ASMJIT_DEPRECATED()` macro, but sometimes
//! functionality at all. Deprecated functions are decorated with `[[deprecated]]` attribute, but sometimes
//! it's not possible to decorate everything like classes, which are used by deprecated functions as well,
//! because some compilers would warn about that. If your project compiles fine with `ASMJIT_NO_DEPRECATED`
//! it's not using anything, which was deprecated.
//!
//! \section api_changes API Changes
//!
//! ### Changes committed at XXXX-XX-XX
//! ### Changes committed at 2025-05-24
//!
//! Core changes:
//!
//! - AsmJit now requires C++17 to compile.
//!
//! - Deprecated asmjit/asmjit.h header. Use asmjit/core.h to include everything except backend specific stuff,
//! and asmjit/x86.h or asmjit/a64.h to include tools of a specific architecture. At this time the asmjit.h
//! header is just deprecated, so it will still work as it used to for some time.
//!
//! ### Changes committed at 2025-05-10
//!
//! Core changes:
//!
@@ -565,7 +575,7 @@ namespace asmjit {
//! using namespace asmjit;
//!
//! // Signature of the generated function.
//! typedef int (*Func)(void);
//! using Func = int (*)(void);
//!
//! int main() {
//! JitRuntime rt; // Runtime specialized for JIT code execution.
@@ -712,7 +722,7 @@ namespace asmjit {
//!
//! using namespace asmjit;
//!
//! typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b);
//! using SumIntsFunc = void (*)(int* dst, const int* a, const int* b);
//!
//! int main() {
//! // Create a custom environment that matches the current host environment.

View File

@@ -16,7 +16,7 @@
#define ASMJIT_LIBRARY_MAKE_VERSION(major, minor, patch) ((major << 16) | (minor << 8) | (patch))
//! AsmJit library version, see \ref ASMJIT_LIBRARY_MAKE_VERSION for a version format reference.
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 15, 0)
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 16, 0)
//! \def ASMJIT_ABI_NAMESPACE
//!
@@ -27,7 +27,7 @@
//! AsmJit default, which makes it possible to use multiple AsmJit libraries within a single project, totally
//! controlled by users. This is useful especially in cases in which some of such library comes from third party.
#if !defined(ASMJIT_ABI_NAMESPACE)
#define ASMJIT_ABI_NAMESPACE v1_15
#define ASMJIT_ABI_NAMESPACE v1_16
#endif // !ASMJIT_ABI_NAMESPACE
//! \}
@@ -287,27 +287,26 @@ namespace asmjit {
//! is exported. However, GCC has some strange behavior that even if one or more symbol is exported it doesn't export
//! typeinfo unless the class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
//! \def ASMJIT_FORCE_INLINE
//! \def ASMJIT_INLINE
//!
//! Decorator to force inlining of functions, uses either `__attribute__((__always_inline__))` or __forceinline,
//! depending on C++ compiler.
//! \def ASMJIT_INLINE_NODEBUG
//!
//! Like \ref ASMJIT_FORCE_INLINE, but uses additionally `__nodebug__` or `__artificial__` attribute to make the
//! Like \ref ASMJIT_INLINE, but uses additionally `__nodebug__` or `__artificial__` attribute to make the
//! debugging of some AsmJit functions easier, especially getters and one-line abstractions where usually you don't
//! want to step in.
//! \def ASMJIT_INLINE_CONSTEXPR
//!
//! Like \ref ASMJIT_INLINE_NODEBUG, but having an additional `constexpr` attribute.
//! \def ASMJIT_NOINLINE
//!
//! Decorator to avoid inlining of functions, uses either `__attribute__((__noinline__))` or `__declspec(noinline)`
//! depending on C++ compiler.
//! \def ASMJIT_NORETURN
//!
//! Decorator that marks functions that should never return. Typically used to implement assertion handlers that
//! terminate, so the function never returns.
//! \def ASMJIT_CDECL
//!
//! CDECL function attribute - either `__attribute__((__cdecl__))` or `__cdecl`.
@@ -371,11 +370,11 @@ namespace asmjit {
// Function attributes.
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
#define ASMJIT_FORCE_INLINE inline __attribute__((__always_inline__))
#define ASMJIT_INLINE inline __attribute__((__always_inline__))
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
#define ASMJIT_FORCE_INLINE __forceinline
#define ASMJIT_INLINE __forceinline
#else
#define ASMJIT_FORCE_INLINE inline
#define ASMJIT_INLINE inline
#endif
@@ -387,15 +386,14 @@ namespace asmjit {
#define ASMJIT_INLINE_NODEBUG inline
#endif
#define ASMJIT_INLINE_CONSTEXPR constexpr ASMJIT_INLINE_NODEBUG
#if defined(__GNUC__)
#define ASMJIT_NOINLINE __attribute__((__noinline__))
#define ASMJIT_NORETURN __attribute__((__noreturn__))
#elif defined(_MSC_VER)
#define ASMJIT_NOINLINE __declspec(noinline)
#define ASMJIT_NORETURN __declspec(noreturn)
#else
#define ASMJIT_NOINLINE
#define ASMJIT_NORETURN
#endif
// Calling conventions.
@@ -424,7 +422,7 @@ namespace asmjit {
#define ASMJIT_VECTORCALL
#endif
// Type alignment (not allowed by C++11 'alignas' keyword).
// Type alignment (not allowed by C++17 'alignas' keyword).
#if defined(__GNUC__)
#define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
#elif defined(_MSC_VER)
@@ -442,35 +440,15 @@ namespace asmjit {
#define ASMJIT_MAY_ALIAS
#endif
//! \def ASMJIT_MAYBE_UNUSED
//!
//! Expands to `[[maybe_unused]]` if supported or a compiler attribute instead.
#if __cplusplus >= 201703L
#define ASMJIT_MAYBE_UNUSED [[maybe_unused]]
#elif defined(__GNUC__)
#define ASMJIT_MAYBE_UNUSED __attribute__((unused))
#else
#define ASMJIT_MAYBE_UNUSED
#endif
#if defined(__clang_major__) && __clang_major__ >= 4 && !defined(_DOXYGEN)
// NOTE: Clang allows to apply this attribute to function arguments, which is what we want. Once GCC decides to
// support this use, we will enable it for GCC as well. However, until that, it will be clang only, which is
// what we need for static analysis.
#if defined(__clang__) && !defined(_DOXYGEN)
// NOTE: Clang allows to apply this attribute to function arguments, which is what we want. Once GCC decides
// to support this use, we will enable it for GCC as well. However, until that, it will be clang only, which
// is what we need for static analysis.
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT __attribute__((__nonnull__))
#else
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT
#endif
//! \def ASMJIT_NOEXCEPT_TYPE
//!
//! Defined to `noexcept` in C++17 mode or nothing otherwise. Used by function typedefs.
#if __cplusplus >= 201703L
#define ASMJIT_NOEXCEPT_TYPE noexcept
#else
#define ASMJIT_NOEXCEPT_TYPE
#endif
//! \def ASMJIT_ASSUME(...)
//!
//! Macro that tells the C/C++ compiler that the expression `...` evaluates to true.
@@ -504,35 +482,13 @@ namespace asmjit {
#define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
#endif
//! \def ASMJIT_FALLTHROUGH
//!
//! Portable [[fallthrough]] attribute.
#if defined(__clang__) && __cplusplus >= 201103L
#define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
#elif defined(__GNUC__) && __GNUC__ >= 7
#define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__))
#else
#define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */
#endif
//! \def ASMJIT_DEPRECATED
//!
//! Marks function, class, struct, enum, or anything else as deprecated.
#if defined(__GNUC__)
#define ASMJIT_DEPRECATED(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
#elif defined(_MSC_VER)
#define ASMJIT_DEPRECATED(MESSAGE) __declspec(deprecated(MESSAGE))
#else
#define ASMJIT_DEPRECATED(MESSAGE)
#endif
// Utilities.
#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
#elif defined(__GNUC__) && __GNUC__ >= 5
#elif defined(__GNUC__)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
#else
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
@@ -541,25 +497,14 @@ namespace asmjit {
// Diagnostic Macros
// ======================================
#if !defined(__clang__) && !defined(__INTEL_COMPILER) && !defined(_DOXYGEN)
#if defined(__GNUC__) && __GNUC__ == 4
// There is a bug in GCC 4.X that has been fixed in GCC 5+, so just silence the warning.
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
#define ASMJIT_END_DIAGNOSTIC_SCOPE \
_Pragma("GCC diagnostic pop")
#elif defined(_MSC_VER)
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
__pragma(warning(push)) \
__pragma(warning(disable: 4127)) /* conditional expression is const */ \
__pragma(warning(disable: 4201)) /* nameless struct/union */
#define ASMJIT_END_DIAGNOSTIC_SCOPE \
__pragma(warning(pop))
#endif
#endif
#if !defined(ASMJIT_BEGIN_DIAGNOSTIC_SCOPE) && !defined(ASMJIT_END_DIAGNOSTIC_SCOPE)
#if defined(_MSC_VER) && !defined(__clang__) && !defined(_DOXYGEN)
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
__pragma(warning(push)) \
__pragma(warning(disable: 4127)) /* conditional expression is const */ \
__pragma(warning(disable: 4201)) /* nameless struct/union */
#define ASMJIT_END_DIAGNOSTIC_SCOPE \
__pragma(warning(pop))
#else
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE
#define ASMJIT_END_DIAGNOSTIC_SCOPE
#endif
@@ -568,19 +513,19 @@ namespace asmjit {
// ======================================
#if !defined(ASMJIT_NO_ABI_NAMESPACE) && !defined(_DOXYGEN)
#define ASMJIT_BEGIN_NAMESPACE \
ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
namespace asmjit { \
#define ASMJIT_BEGIN_NAMESPACE \
ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
namespace asmjit { \
inline namespace ASMJIT_ABI_NAMESPACE {
#define ASMJIT_END_NAMESPACE \
}} \
#define ASMJIT_END_NAMESPACE \
}} \
ASMJIT_END_DIAGNOSTIC_SCOPE
#else
#define ASMJIT_BEGIN_NAMESPACE \
ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
#define ASMJIT_BEGIN_NAMESPACE \
ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
namespace asmjit {
#define ASMJIT_END_NAMESPACE \
} \
#define ASMJIT_END_NAMESPACE \
} \
ASMJIT_END_DIAGNOSTIC_SCOPE
#endif
@@ -590,13 +535,13 @@ namespace asmjit {
// C++ Utilities
// =============
#define ASMJIT_NONCOPYABLE(Type) \
Type(const Type& other) = delete; \
#define ASMJIT_NONCOPYABLE(Type) \
Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete;
#define ASMJIT_NONCONSTRUCTIBLE(Type) \
Type() = delete; \
Type(const Type& other) = delete; \
#define ASMJIT_NONCONSTRUCTIBLE(Type) \
Type() = delete; \
Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete;
//! \def ASMJIT_DEFINE_ENUM_FLAGS(T)
@@ -605,38 +550,32 @@ namespace asmjit {
#ifdef _DOXYGEN
#define ASMJIT_DEFINE_ENUM_FLAGS(T)
#else
#define ASMJIT_DEFINE_ENUM_FLAGS(T) \
static ASMJIT_INLINE_NODEBUG constexpr T operator~(T a) noexcept { \
return T(~(std::underlying_type<T>::type)(a)); \
} \
\
static ASMJIT_INLINE_NODEBUG constexpr T operator|(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) | \
(std::underlying_type<T>::type)(b)); \
} \
static ASMJIT_INLINE_NODEBUG constexpr T operator&(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) & \
(std::underlying_type<T>::type)(b)); \
} \
static ASMJIT_INLINE_NODEBUG constexpr T operator^(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) ^ \
(std::underlying_type<T>::type)(b)); \
} \
\
static ASMJIT_INLINE_NODEBUG T& operator|=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) | \
(std::underlying_type<T>::type)(b)); \
return a; \
} \
static ASMJIT_INLINE_NODEBUG T& operator&=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) & \
(std::underlying_type<T>::type)(b)); \
return a; \
} \
static ASMJIT_INLINE_NODEBUG T& operator^=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) ^ \
(std::underlying_type<T>::type)(b)); \
return a; \
#define ASMJIT_DEFINE_ENUM_FLAGS(T) \
static ASMJIT_INLINE_CONSTEXPR T operator~(T a) noexcept { \
return T(~std::underlying_type_t<T>(a)); \
} \
\
static ASMJIT_INLINE_CONSTEXPR T operator|(T a, T b) noexcept { \
return T(std::underlying_type_t<T>(a) | std::underlying_type_t<T>(b)); \
} \
static ASMJIT_INLINE_CONSTEXPR T operator&(T a, T b) noexcept { \
return T(std::underlying_type_t<T>(a) & std::underlying_type_t<T>(b)); \
} \
static ASMJIT_INLINE_CONSTEXPR T operator^(T a, T b) noexcept { \
return T(std::underlying_type_t<T>(a) ^ std::underlying_type_t<T>(b)); \
} \
\
static ASMJIT_INLINE_CONSTEXPR T& operator|=(T& a, T b) noexcept { \
a = T(std::underlying_type_t<T>(a) | std::underlying_type_t<T>(b)); \
return a; \
} \
static ASMJIT_INLINE_CONSTEXPR T& operator&=(T& a, T b) noexcept { \
a = T(std::underlying_type_t<T>(a) & std::underlying_type_t<T>(b)); \
return a; \
} \
static ASMJIT_INLINE_CONSTEXPR T& operator^=(T& a, T b) noexcept { \
a = T(std::underlying_type_t<T>(a) ^ std::underlying_type_t<T>(b)); \
return a; \
}
#endif
@@ -646,18 +585,18 @@ namespace asmjit {
#if defined(_DOXYGEN) || (defined(_MSC_VER) && _MSC_VER <= 1900)
#define ASMJIT_DEFINE_ENUM_COMPARE(T)
#else
#define ASMJIT_DEFINE_ENUM_COMPARE(T) \
static ASMJIT_INLINE_NODEBUG bool operator<(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) < (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_INLINE_NODEBUG bool operator<=(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) <= (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_INLINE_NODEBUG bool operator>(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) > (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_INLINE_NODEBUG bool operator>=(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) >= (std::underlying_type<T>::type)(b); \
#define ASMJIT_DEFINE_ENUM_COMPARE(T) \
static ASMJIT_INLINE_CONSTEXPR bool operator<(T a, T b) noexcept { \
return (std::underlying_type_t<T>)(a) < (std::underlying_type_t<T>)(b); \
} \
static ASMJIT_INLINE_CONSTEXPR bool operator<=(T a, T b) noexcept { \
return (std::underlying_type_t<T>)(a) <= (std::underlying_type_t<T>)(b); \
} \
static ASMJIT_INLINE_CONSTEXPR bool operator>(T a, T b) noexcept { \
return (std::underlying_type_t<T>)(a) > (std::underlying_type_t<T>)(b); \
} \
static ASMJIT_INLINE_CONSTEXPR bool operator>=(T a, T b) noexcept { \
return (std::underlying_type_t<T>)(a) >= (std::underlying_type_t<T>)(b); \
}
#endif

View File

@@ -101,9 +101,12 @@ static constexpr CondCode _reverseCondTable[] = {
//! \endcond
//! Reverses a condition code (reverses the corresponding operands of a comparison).
static ASMJIT_INLINE_NODEBUG constexpr CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; }
//! Negates a condition code.
static ASMJIT_INLINE_NODEBUG constexpr CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); }
//! Memory offset mode.
//!
@@ -180,20 +183,24 @@ public:
ASMJIT_INLINE_NODEBUG Shift() noexcept = default;
//! Copy constructor (default)
ASMJIT_INLINE_NODEBUG constexpr Shift(const Shift& other) noexcept = default;
ASMJIT_INLINE_CONSTEXPR Shift(const Shift& other) noexcept = default;
//! Constructs Shift from operation `op` and shift `value`.
ASMJIT_INLINE_NODEBUG constexpr Shift(ShiftOp op, uint32_t value) noexcept
ASMJIT_INLINE_CONSTEXPR Shift(ShiftOp op, uint32_t value) noexcept
: _op(op),
_value(value) {}
//! Returns the shift operation.
ASMJIT_INLINE_NODEBUG constexpr ShiftOp op() const noexcept { return _op; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR ShiftOp op() const noexcept { return _op; }
//! Sets shift operation to `op`.
ASMJIT_INLINE_NODEBUG void setOp(ShiftOp op) noexcept { _op = op; }
//! Returns the shift amount.
ASMJIT_INLINE_NODEBUG constexpr uint32_t value() const noexcept { return _value; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR uint32_t value() const noexcept { return _value; }
//! Sets shift amount to `value`.
ASMJIT_INLINE_NODEBUG void setValue(uint32_t value) noexcept { _value = value; }
};

View File

@@ -106,51 +106,64 @@ ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegSignature(Arch arch, TypeId typeId
// TODO: Remove this, should never be used like this.
// Passed RegType instead of TypeId?
if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue))
if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue)) {
typeId = archTraits.regTypeToTypeId(RegType(uint32_t(typeId)));
}
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId)))
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId))) {
return DebugUtils::errored(kErrorInvalidTypeId);
}
// First normalize architecture dependent types.
if (TypeUtils::isAbstract(typeId)) {
bool is32Bit = Environment::is32Bit(arch);
if (typeId == TypeId::kIntPtr)
if (typeId == TypeId::kIntPtr) {
typeId = is32Bit ? TypeId::kInt32 : TypeId::kInt64;
else
}
else {
typeId = is32Bit ? TypeId::kUInt32 : TypeId::kUInt64;
}
}
// Type size helps to construct all groups of registers.
// TypeId is invalid if the size is zero.
uint32_t size = TypeUtils::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size))
if (ASMJIT_UNLIKELY(!size)) {
return DebugUtils::errored(kErrorInvalidTypeId);
}
if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80))
if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80)) {
return DebugUtils::errored(kErrorInvalidUseOfF80);
}
RegType regType = RegType::kNone;
if (TypeUtils::isBetween(typeId, TypeId::_kBaseStart, TypeId::_kVec32Start)) {
regType = archTraits._typeIdToRegType[uint32_t(typeId) - uint32_t(TypeId::_kBaseStart)];
if (regType == RegType::kNone) {
if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64)
if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64) {
return DebugUtils::errored(kErrorInvalidUseOfGpq);
else
}
else {
return DebugUtils::errored(kErrorInvalidTypeId);
}
}
}
else {
if (size <= 8 && archTraits._regSignature[RegType::kVec64].isValid())
if (size <= 8 && archTraits._regSignature[RegType::kVec64].isValid()) {
regType = RegType::kVec64;
else if (size <= 16 && archTraits._regSignature[RegType::kVec128].isValid())
}
else if (size <= 16 && archTraits._regSignature[RegType::kVec128].isValid()) {
regType = RegType::kVec128;
else if (size == 32 && archTraits._regSignature[RegType::kVec256].isValid())
}
else if (size == 32 && archTraits._regSignature[RegType::kVec256].isValid()) {
regType = RegType::kVec256;
else if (archTraits._regSignature[RegType::kVec512].isValid())
}
else if (archTraits._regSignature[RegType::kVec512].isValid()) {
regType = RegType::kVec512;
else
}
else {
return DebugUtils::errored(kErrorInvalidTypeId);
}
}
*typeIdOut = typeId;

View File

@@ -205,49 +205,76 @@ struct ArchTraits {
//! \{
//! Returns stack pointer register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spRegId() const noexcept { return _spRegId; }
//! Returns stack frame register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t fpRegId() const noexcept { return _fpRegId; }
//! Returns link register id, if the architecture provides it.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t linkRegId() const noexcept { return _linkRegId; }
//! Returns instruction pointer register id, if the architecture provides it.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t ipRegId() const noexcept { return _ipRegId; }
//! Returns a hardware stack alignment requirement.
//!
//! \note This is a hardware constraint. Architectures that don't constrain it would return the lowest alignment
//! (1), however, some architectures may constrain the alignment, for example AArch64 requires 16-byte alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
//! Tests whether the architecture provides link register, which is used across function calls. If the link
//! register is not provided then a function call pushes the return address on stack (X86/X64).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; }
//! Returns minimum addressable offset on stack guaranteed for all instructions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t minStackOffset() const noexcept { return _minStackOffset; }
//! Returns maximum addressable offset on stack depending on specific instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
//! Returns ISA flags of the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstHints instFeatureHints(RegGroup group) const noexcept { return _instHints[group]; }
//! Tests whether the given register `group` has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstHint(RegGroup group, InstHints feature) const noexcept { return Support::test(_instHints[group], feature); }
//! Tests whether the ISA provides register swap instruction for the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstRegSwap(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kRegSwap); }
//! Tests whether the ISA provides push/pop instructions for the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstPushPop(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kPushPop); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegType(RegType type) const noexcept {
return type <= RegType::kMaxValue && _regSignature[type].isValid();
}
//! Returns an operand signature from the given register `type` of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature regTypeToSignature(RegType type) const noexcept { return _regSignature[type]; }
//! Returns a register from the given register `type` of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegGroup regTypeToGroup(RegType type) const noexcept { return _regSignature[type].regGroup(); }
//! Returns a register size the given register `type` of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regTypeToSize(RegType type) const noexcept { return _regSignature[type].size(); }
//! Returns a corresponding `TypeId` from the given register `type` of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId regTypeToTypeId(RegType type) const noexcept { return _regTypeToTypeId[type]; }
//! Returns a table of ISA word names that appear in formatted text. Word names are ISA dependent.
@@ -257,9 +284,11 @@ struct ArchTraits {
//! - [1] 16-bits
//! - [2] 32-bits
//! - [3] 64-bits
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ArchTypeNameId* typeNameIdTable() const noexcept { return _typeNameIdTable; }
//! Returns an ISA word name identifier of the given `index`, see \ref typeNameIdTable() for more details.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArchTypeNameId typeNameIdByIndex(uint32_t index) const noexcept { return _typeNameIdTable[index]; }
//! \}
@@ -268,6 +297,7 @@ struct ArchTraits {
//! \{
//! Returns a const reference to `ArchTraits` for the given architecture `arch`.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG const ArchTraits& byArch(Arch arch) noexcept;
//! \}

View File

@@ -26,12 +26,14 @@ BaseAssembler::~BaseAssembler() noexcept {}
// =================================
Error BaseAssembler::setOffset(size_t offset) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
if (ASMJIT_UNLIKELY(offset > size))
if (ASMJIT_UNLIKELY(offset > size)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
_bufferPtr = _bufferData + offset;
return kErrorOk;
@@ -50,15 +52,18 @@ static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noe
}
Error BaseAssembler::section(Section* section) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section)
if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section) {
return reportError(DebugUtils::errored(kErrorInvalidSection));
}
#ifndef ASMJIT_NO_LOGGING
if (_logger)
if (_logger) {
_logger->logf(".section %s {#%u}\n", section->name(), section->id());
}
#endif
BaseAssembler_initSection(this, section);
@@ -73,10 +78,12 @@ Label BaseAssembler::newLabel() {
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
Error err = _code->newLabelEntry(&le);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
reportError(err);
else
}
else {
labelId = le->id();
}
}
return Label(labelId);
}
@@ -86,28 +93,33 @@ Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, LabelType
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
reportError(err);
else
}
else {
labelId = le->id();
}
}
return Label(labelId);
}
Error BaseAssembler::bind(const Label& label) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
Error err = _code->bindLabel(label, _section->id(), offset());
#ifndef ASMJIT_NO_LOGGING
if (_logger)
if (_logger) {
EmitterUtils::logLabelBound(this, label);
}
#endif
resetInlineComment();
if (err)
if (err) {
return reportError(err);
}
return kErrorOk;
}
@@ -116,11 +128,13 @@ Error BaseAssembler::bind(const Label& label) {
// =====================
Error BaseAssembler::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (dataSize == 0)
if (dataSize == 0) {
return kErrorOk;
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
@@ -144,11 +158,13 @@ Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t item
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId)))
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (itemCount == 0 || repeatCount == 0)
if (itemCount == 0 || repeatCount == 0) {
return kErrorOk;
}
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
@@ -156,15 +172,16 @@ Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t item
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
if (ASMJIT_UNLIKELY(of))
if (ASMJIT_UNLIKELY(of)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
for (size_t i = 0; i < repeatCount; i++)
for (size_t i = 0; i < repeatCount; i++) {
writer.emitData(data, dataSize);
}
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
@@ -194,18 +211,21 @@ static const TypeId dataTypeIdBySize[9] = {
#endif
Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (ASMJIT_UNLIKELY(!isLabelValid(label)))
if (ASMJIT_UNLIKELY(!isLabelValid(label))) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
size_t size = pool.size();
if (!size)
if (!size) {
return kErrorOk;
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
@@ -234,21 +254,25 @@ Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
}
Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
ASMJIT_ASSERT(_code != nullptr);
RelocEntry* re;
LabelEntry* le = _code->labelEntry(label);
if (ASMJIT_UNLIKELY(!le))
if (ASMJIT_UNLIKELY(!le)) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
if (dataSize == 0)
if (dataSize == 0) {
dataSize = registerSize();
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) {
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
@@ -266,8 +290,9 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
#endif
Error err = _code->newRelocEntry(&re, RelocType::kRelToAbs);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
re->_sourceSectionId = _section->id();
re->_sourceOffset = offset();
@@ -282,8 +307,9 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
of.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of);
if (ASMJIT_UNLIKELY(!link))
if (ASMJIT_UNLIKELY(!link)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
link->relocId = re->id();
}
@@ -296,20 +322,24 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
}
Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
LabelEntry* labelEntry = _code->labelEntry(label);
LabelEntry* baseEntry = _code->labelEntry(base);
if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry))
if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry)) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
if (dataSize == 0)
if (dataSize == 0) {
dataSize = registerSize();
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) {
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
@@ -336,12 +366,14 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
else {
RelocEntry* re;
Error err = _code->newRelocEntry(&re, RelocType::kExpression);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
Expression* exp = _code->_zone.newT<Expression>();
if (ASMJIT_UNLIKELY(!exp))
if (ASMJIT_UNLIKELY(!exp)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
exp->reset();
exp->opType = ExpressionOpType::kSub;
@@ -365,8 +397,9 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
Error BaseAssembler::comment(const char* data, size_t size) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk;
}

View File

@@ -28,7 +28,7 @@ ASMJIT_BEGIN_NAMESPACE
class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
public:
ASMJIT_NONCOPYABLE(BaseAssembler)
typedef BaseEmitter Base;
using Base = BaseEmitter;
//! Current section where the assembling happens.
Section* _section = nullptr;
@@ -53,11 +53,15 @@ public:
//! \{
//! Returns the capacity of the current CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
//! Returns the number of remaining bytes in the current CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
//! Returns the current position in the CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
//! Sets the current position in the CodeBuffer to `offset`.
@@ -66,10 +70,15 @@ public:
ASMJIT_API Error setOffset(size_t offset);
//! Returns the start of the CodeBuffer in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferData() const noexcept { return _bufferData; }
//! Returns the end (first invalid byte) in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
//! Returns the current pointer in the CodeBuffer in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
//! \}
@@ -78,6 +87,7 @@ public:
//! \{
//! Returns the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* currentSection() const noexcept { return _section; }
ASMJIT_API Error section(Section* section) override;

View File

@@ -35,8 +35,9 @@ public:
// =======================
static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
for (Pass* pass : self->_passes)
for (Pass* pass : self->_passes) {
pass->~Pass();
}
self->_passes.reset();
}
@@ -62,8 +63,9 @@ Error BaseBuilder::newInstNode(InstNode** out, InstId instId, InstOptions instOp
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
if (ASMJIT_UNLIKELY(!node))
if (ASMJIT_UNLIKELY(!node)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
*out = new(Support::PlacementNew{node}) InstNode(this, instId, instOptions, opCount, opCapacity);
return kErrorOk;
@@ -88,15 +90,17 @@ Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const vo
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId)))
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
if (ASMJIT_UNLIKELY(of))
if (ASMJIT_UNLIKELY(of)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedDataNode>(&node));
@@ -109,13 +113,15 @@ Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const vo
uint8_t* dstData = node->_inlineData;
if (dataSize > EmbedDataNode::kInlineBufferSize) {
dstData = static_cast<uint8_t*>(_dataZone.alloc(dataSize, 8));
if (ASMJIT_UNLIKELY(!dstData))
if (ASMJIT_UNLIKELY(!dstData)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
node->_externalData = dstData;
}
if (data)
if (data) {
memcpy(dstData, data, dataSize);
}
*out = node;
return kErrorOk;
@@ -132,13 +138,15 @@ Error BaseBuilder::newCommentNode(CommentNode** out, const char* data, size_t si
*out = nullptr;
if (data) {
if (size == SIZE_MAX)
if (size == SIZE_MAX) {
size = strlen(data);
}
if (size > 0) {
data = static_cast<char*>(_dataZone.dup(data, size, true));
if (ASMJIT_UNLIKELY(!data))
if (ASMJIT_UNLIKELY(!data)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
}
}
@@ -168,15 +176,18 @@ BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
node->_next = next;
prev->_next = node;
if (next)
if (next) {
next->_prev = node;
else
}
else {
_nodeList._last = node;
}
}
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
if (node->isSection()) {
_dirtySectionLinks = true;
}
_cursor = node;
return node;
@@ -193,14 +204,17 @@ BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
node->_next = next;
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
if (node->isSection()) {
_dirtySectionLinks = true;
}
prev->_next = node;
if (next)
if (next) {
next->_prev = node;
else
}
else {
_nodeList._last = node;
}
return node;
}
@@ -218,43 +232,54 @@ BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
node->_next = next;
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
if (node->isSection()) {
_dirtySectionLinks = true;
}
next->_prev = node;
if (prev)
if (prev) {
prev->_next = node;
else
}
else {
_nodeList._first = node;
}
return node;
}
BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
if (!node->isActive())
if (!node->isActive()) {
return node;
}
BaseNode* prev = node->prev();
BaseNode* next = node->next();
if (_nodeList._first == node)
if (_nodeList._first == node) {
_nodeList._first = next;
else
}
else {
prev->_next = next;
}
if (_nodeList._last == node)
if (_nodeList._last == node) {
_nodeList._last = prev;
else
}
else {
next->_prev = prev;
}
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
if (_cursor == node)
if (node->isSection()) {
_dirtySectionLinks = true;
}
if (_cursor == node) {
_cursor = prev;
}
return node;
}
@@ -265,21 +290,26 @@ void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
return;
}
if (!first->isActive())
if (!first->isActive()) {
return;
}
BaseNode* prev = first->prev();
BaseNode* next = last->next();
if (_nodeList._first == first)
if (_nodeList._first == first) {
_nodeList._first = next;
else
}
else {
prev->_next = next;
}
if (_nodeList._last == last)
if (_nodeList._last == last) {
_nodeList._last = prev;
else
}
else {
next->_prev = prev;
}
BaseNode* node = first;
uint32_t didRemoveSection = false;
@@ -293,16 +323,19 @@ void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
node->clearFlags(NodeFlags::kIsActive);
didRemoveSection |= uint32_t(node->isSection());
if (_cursor == node)
if (_cursor == node) {
_cursor = prev;
}
if (node == last)
if (node == last) {
break;
}
node = next;
}
if (didRemoveSection)
if (didRemoveSection) {
_dirtySectionLinks = true;
}
}
BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
@@ -317,28 +350,34 @@ BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId)))
if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId))) {
return reportError(DebugUtils::errored(kErrorInvalidSection));
}
if (sectionId >= _sectionNodes.size()) {
Error err = _sectionNodes.reserve(&_allocator, sectionId + 1);
if (ASMJIT_UNLIKELY(err != kErrorOk))
if (ASMJIT_UNLIKELY(err != kErrorOk)) {
return reportError(err);
}
}
SectionNode* node = nullptr;
if (sectionId < _sectionNodes.size())
if (sectionId < _sectionNodes.size()) {
node = _sectionNodes[sectionId];
}
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<SectionNode>(&node, sectionId));
// We have already reserved enough space, this cannot fail now.
if (sectionId >= _sectionNodes.size())
_sectionNodes.resize(&_allocator, sectionId + 1);
if (sectionId >= _sectionNodes.size()) {
// SAFETY: No need to check for error condition as we have already reserved enough space.
(void)_sectionNodes.resize(&_allocator, sectionId + 1);
}
_sectionNodes[sectionId] = node;
}
@@ -361,36 +400,42 @@ Error BaseBuilder::section(Section* section) {
// This is a bit tricky. We cache section links to make sure that
// switching sections doesn't involve traversal in linked-list unless
// the position of the section has changed.
if (hasDirtySectionLinks())
if (hasDirtySectionLinks()) {
updateSectionLinks();
}
if (node->_nextSection)
if (node->_nextSection) {
_cursor = node->_nextSection->_prev;
else
}
else {
_cursor = _nodeList.last();
}
}
return kErrorOk;
}
void BaseBuilder::updateSectionLinks() noexcept {
if (!_dirtySectionLinks)
if (!_dirtySectionLinks) {
return;
}
BaseNode* node_ = _nodeList.first();
SectionNode* currentSection = nullptr;
while (node_) {
if (node_->isSection()) {
if (currentSection)
if (currentSection) {
currentSection->_nextSection = node_->as<SectionNode>();
}
currentSection = node_->as<SectionNode>();
}
node_ = node_->next();
}
if (currentSection)
if (currentSection) {
currentSection->_nextSection = nullptr;
}
_dirtySectionLinks = false;
}
@@ -401,15 +446,18 @@ void BaseBuilder::updateSectionLinks() noexcept {
Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
uint32_t index = labelId;
if (ASMJIT_UNLIKELY(index >= _code->labelCount()))
if (ASMJIT_UNLIKELY(index >= _code->labelCount())) {
return DebugUtils::errored(kErrorInvalidLabel);
}
if (index >= _labelNodes.size())
if (index >= _labelNodes.size()) {
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
}
LabelNode* node = _labelNodes[index];
if (!node) {
@@ -422,8 +470,9 @@ Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
}
Error BaseBuilder::registerLabelNode(LabelNode* node) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
LabelEntry* le;
ASMJIT_PROPAGATE(_code->newLabelEntry(&le));
@@ -445,13 +494,15 @@ static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) {
uint32_t growBy = labelId - self->_labelNodes.size();
Error err = self->_labelNodes.willGrow(&self->_allocator, growBy);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return self->reportError(err);
}
LabelNode* node;
ASMJIT_PROPAGATE(self->_newNodeT<LabelNode>(&node, labelId));
self->_labelNodes.resize(&self->_allocator, labelId + 1);
// SAFETY: No need to check for error condition as we have already reserved enough space.
(void)self->_labelNodes.resize(&self->_allocator, labelId + 1);
self->_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
@@ -495,15 +546,18 @@ Error BaseBuilder::bind(const Label& label) {
// ====================
ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
for (Pass* pass : _passes)
if (strcmp(pass->name(), name) == 0)
for (Pass* pass : _passes) {
if (strcmp(pass->name(), name) == 0) {
return pass;
}
}
return nullptr;
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(pass == nullptr)) {
// Since this is directly called by `addPassT()` we treat `null` argument
@@ -512,8 +566,9 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
}
else if (ASMJIT_UNLIKELY(pass->_cb)) {
// Kinda weird, but okay...
if (pass->_cb == this)
if (pass->_cb == this) {
return kErrorOk;
}
return DebugUtils::errored(kErrorInvalidState);
}
@@ -523,15 +578,18 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(pass == nullptr))
if (ASMJIT_UNLIKELY(pass == nullptr)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
if (pass->_cb != nullptr) {
if (pass->_cb != this)
if (pass->_cb != this) {
return DebugUtils::errored(kErrorInvalidState);
}
uint32_t index = _passes.indexOf(pass);
ASMJIT_ASSERT(index != Globals::kNotFound);
@@ -545,11 +603,13 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
}
Error BaseBuilder::runPasses() {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (_passes.empty())
if (_passes.empty()) {
return kErrorOk;
}
ErrorHandler* prev = errorHandler();
PostponedErrorHandler postponed;
@@ -560,14 +620,16 @@ Error BaseBuilder::runPasses() {
for (Pass* pass : _passes) {
_passZone.reset();
err = pass->run(&_passZone, _logger);
if (err)
if (err) {
break;
}
}
_passZone.reset();
setErrorHandler(prev);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
}
return kErrorOk;
}
@@ -580,8 +642,9 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1,
InstOptions options = instOptions() | forcedInstOptions();
if (Support::test(options, InstOptions::kReserved)) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
#ifndef ASMJIT_NO_VALIDATION
// Strict validation.
@@ -626,12 +689,14 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1,
node->setOp(0, o0);
node->setOp(1, o1);
node->setOp(2, o2);
for (uint32_t i = 3; i < opCount; i++)
for (uint32_t i = 3; i < opCount; i++) {
node->setOp(i, opExt[i - 3]);
}
node->resetOpRange(opCount, opCapacity);
if (comment)
if (comment) {
node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
}
addNode(node);
resetExtraReg();
@@ -642,8 +707,9 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1,
// ===================
Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
AlignNode* node;
ASMJIT_PROPAGATE(newAlignNode(&node, alignMode, alignment));
@@ -657,8 +723,9 @@ Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) {
// ===================
Error BaseBuilder::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, data, dataSize));
@@ -669,8 +736,9 @@ Error BaseBuilder::embed(const void* data, size_t dataSize) {
}
Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t itemRepeat) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
@@ -681,11 +749,13 @@ Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCo
}
Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (!isLabelValid(label))
if (!isLabelValid(label)) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
@@ -710,11 +780,13 @@ static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept {
}
Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (!BaseBuilder_checkDataSize(dataSize))
if (!BaseBuilder_checkDataSize(dataSize)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
EmbedLabelNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelNode>(&node, label.id(), uint32_t(dataSize)));
@@ -724,11 +796,13 @@ Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
}
Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (!BaseBuilder_checkDataSize(dataSize))
if (!BaseBuilder_checkDataSize(dataSize)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
EmbedLabelDeltaNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelDeltaNode>(&node, label.id(), base.id(), uint32_t(dataSize)));
@@ -741,8 +815,9 @@ Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t
// =====================
Error BaseBuilder::comment(const char* data, size_t size) {
if (ASMJIT_UNLIKELY(!_code))
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
CommentNode* node;
ASMJIT_PROPAGATE(newCommentNode(&node, data, size));
@@ -827,7 +902,9 @@ Error BaseBuilder::serializeTo(BaseEmitter* dst) {
err = dst->comment(node->inlineComment());
}
if (err) break;
if (err) {
break;
}
node_ = node_->next();
} while (node_);
@@ -843,8 +920,9 @@ Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
SectionNode* initialSection;
Error err = sectionNodeOf(&initialSection, 0);
if (!err)
if (!err) {
err = _passes.willGrow(&_allocator, 8);
}
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);

View File

@@ -163,9 +163,13 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _first == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* first() const noexcept { return _first; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* last() const noexcept { return _last; }
//! \}
@@ -185,7 +189,7 @@ public:
class ASMJIT_VIRTAPI BaseBuilder : public BaseEmitter {
public:
ASMJIT_NONCOPYABLE(BaseBuilder)
typedef BaseEmitter Base;
using Base = BaseEmitter;
//! \name Members
//! \{
@@ -231,11 +235,15 @@ public:
//! \name Node Management
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeList nodeList() const noexcept { return _nodeList; }
//! Returns the first node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* firstNode() const noexcept { return _nodeList.first(); }
//! Returns the last node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* lastNode() const noexcept { return _nodeList.last(); }
//! Allocates and instantiates a new node of type `T` and returns its instance. If the allocation fails `nullptr`
@@ -282,6 +290,7 @@ public:
//! When the Builder/Compiler is created it automatically creates a '.text' \ref SectionNode, which will be the
//! initial one. When instructions are added they are always added after the cursor and the cursor is changed
//! to be that newly added node. Use `setCursor()` to change where new nodes are inserted.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* cursor() const noexcept { return _cursor; }
//! Sets the current node to `node` and return the previous one.
@@ -302,11 +311,13 @@ public:
//!
//! \note If a section of some id is not associated with the Builder/Compiler it would be null, so always check
//! for nulls if you iterate over the vector.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<SectionNode*>& sectionNodes() const noexcept {
return _sectionNodes;
}
//! Tests whether the `SectionNode` of the given `sectionId` was registered.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegisteredSectionNode(uint32_t sectionId) const noexcept {
return sectionId < _sectionNodes.size() && _sectionNodes[sectionId] != nullptr;
}
@@ -321,6 +332,7 @@ public:
//! Returns whether the section links of active section nodes are dirty. You can update these links by calling
//! `updateSectionLinks()` in such case.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDirtySectionLinks() const noexcept { return _dirtySectionLinks; }
//! Updates links of all active section nodes.
@@ -335,14 +347,17 @@ public:
//!
//! \note If a label of some id is not associated with the Builder/Compiler it would be null, so always check for
//! nulls if you iterate over the vector.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<LabelNode*>& labelNodes() const noexcept { return _labelNodes; }
//! Tests whether the `LabelNode` of the given `labelId` was registered.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegisteredLabelNode(uint32_t labelId) const noexcept {
return labelId < _labelNodes.size() && _labelNodes[labelId] != nullptr;
}
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegisteredLabelNode(const Label& label) const noexcept {
return hasRegisteredLabelNode(label.id());
}
@@ -364,8 +379,12 @@ public:
//! Use \ref labelNodeOf() functions to get back \ref LabelNode from a label or its identifier.
ASMJIT_API Error registerLabelNode(LabelNode* ASMJIT_NONNULL(node));
[[nodiscard]]
ASMJIT_API Label newLabel() override;
[[nodiscard]]
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Error bind(const Label& label) override;
//! \}
@@ -374,6 +393,7 @@ public:
//! \{
//! Returns a vector of `Pass` instances that will be executed by `runPasses()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<Pass*>& passes() const noexcept { return _passes; }
//! Allocates and instantiates a new pass of type `T` and returns its instance. If the allocation fails `nullptr` is
@@ -384,10 +404,12 @@ public:
//! \remarks The pointer returned (if non-null) is owned by the Builder or Compiler. When the Builder/Compiler is
//! destroyed it destroys all passes it created so no manual memory management is required.
template<typename T>
[[nodiscard]]
inline T* newPassT() noexcept { return _codeZone.newT<T>(); }
//! \overload
template<typename T, typename... Args>
[[nodiscard]]
inline T* newPassT(Args&&... args) noexcept { return _codeZone.newT<T>(std::forward<Args>(args)...); }
template<typename T>
@@ -399,9 +421,12 @@ public:
//! Returns `Pass` by name.
//!
//! If the pass having the given `name` doesn't exist `nullptr` is returned.
[[nodiscard]]
ASMJIT_API Pass* passByName(const char* name) const noexcept;
//! Adds `pass` to the list of passes.
ASMJIT_API Error addPass(Pass* pass) noexcept;
//! Removes `pass` from the list of passes and delete it.
ASMJIT_API Error deletePass(Pass* pass) noexcept;
@@ -604,17 +629,24 @@ public:
//! Casts this node to `T*`.
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* as() noexcept { return static_cast<T*>(this); }
//! Casts this node to `const T*`.
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return static_cast<const T*>(this); }
//! Returns previous node or `nullptr` if this node is either first or not part of Builder/Compiler node-list.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* prev() const noexcept { return _prev; }
//! Returns next node or `nullptr` if this node is either last or not part of Builder/Compiler node-list.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* next() const noexcept { return _next; }
//! Returns the type of the node, see \ref NodeType.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeType type() const noexcept { return _any._nodeType; }
//! Sets the type of the node, see `NodeType` (internal).
@@ -624,37 +656,65 @@ public:
ASMJIT_INLINE_NODEBUG void setType(NodeType type) noexcept { _any._nodeType = type; }
//! Tests whether this node is either `InstNode` or extends it.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInst() const noexcept { return hasFlag(NodeFlags::kActsAsInst); }
//! Tests whether this node is `SectionNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isSection() const noexcept { return type() == NodeType::kSection; }
//! Tests whether this node is either `LabelNode` or extends it.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabel() const noexcept { return hasFlag(NodeFlags::kActsAsLabel); }
//! Tests whether this node is `AlignNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAlign() const noexcept { return type() == NodeType::kAlign; }
//! Tests whether this node is `EmbedDataNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedData() const noexcept { return type() == NodeType::kEmbedData; }
//! Tests whether this node is `EmbedLabelNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedLabel() const noexcept { return type() == NodeType::kEmbedLabel; }
//! Tests whether this node is `EmbedLabelDeltaNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedLabelDelta() const noexcept { return type() == NodeType::kEmbedLabelDelta; }
//! Tests whether this node is `ConstPoolNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isConstPool() const noexcept { return type() == NodeType::kConstPool; }
//! Tests whether this node is `CommentNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isComment() const noexcept { return type() == NodeType::kComment; }
//! Tests whether this node is `SentinelNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isSentinel() const noexcept { return type() == NodeType::kSentinel; }
//! Tests whether this node is `FuncNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFunc() const noexcept { return type() == NodeType::kFunc; }
//! Tests whether this node is `FuncRetNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFuncRet() const noexcept { return type() == NodeType::kFuncRet; }
//! Tests whether this node is `InvokeNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInvoke() const noexcept { return type() == NodeType::kInvoke; }
//! Returns the node flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeFlags flags() const noexcept { return _any._nodeFlags; }
//! Tests whether the node has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(NodeFlags flag) const noexcept { return Support::test(_any._nodeFlags, flag); }
//! Replaces node flags with `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(NodeFlags flags) noexcept { _any._nodeFlags = flags; }
//! Adds the given `flags` to node flags.
@@ -663,24 +723,39 @@ public:
ASMJIT_INLINE_NODEBUG void clearFlags(NodeFlags flags) noexcept { _any._nodeFlags &= ~flags; }
//! Tests whether the node is code that can be executed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isCode() const noexcept { return hasFlag(NodeFlags::kIsCode); }
//! Tests whether the node is data that cannot be executed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isData() const noexcept { return hasFlag(NodeFlags::kIsData); }
//! Tests whether the node is informative only (is never encoded like comment, etc...).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInformative() const noexcept { return hasFlag(NodeFlags::kIsInformative); }
//! Tests whether the node is removable if it's in an unreachable code block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRemovable() const noexcept { return hasFlag(NodeFlags::kIsRemovable); }
//! Tests whether the node has no effect when executed (label, .align, nop, ...).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasNoEffect() const noexcept { return hasFlag(NodeFlags::kHasNoEffect); }
//! Tests whether the node is part of the code.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isActive() const noexcept { return hasFlag(NodeFlags::kIsActive); }
//! Tests whether the node has a position assigned.
//!
//! \remarks Returns `true` if node position is non-zero.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPosition() const noexcept { return _position != 0; }
//! Returns node position.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t position() const noexcept { return _position; }
//! Sets node position.
//!
//! Node position is a 32-bit unsigned integer that is used by Compiler to track where the node is relatively to
@@ -697,10 +772,15 @@ public:
//! size so you can either store a pointer or `int64_t` value through `setUserDataAsPtr()`, `setUserDataAsInt64()`
//! and `setUserDataAsUInt64()`.
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* userDataAsPtr() const noexcept { return static_cast<T*>(_userDataPtr); }
//! Returns user data casted to `int64_t`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int64_t userDataAsInt64() const noexcept { return int64_t(_userDataU64); }
//! Returns user data casted to `uint64_t`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t userDataAsUInt64() const noexcept { return _userDataU64; }
//! Sets user data to `data`.
@@ -715,10 +795,14 @@ public:
ASMJIT_INLINE_NODEBUG void resetUserData() noexcept { _userDataU64 = 0; }
//! Tests whether the node has an associated pass data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPassData() const noexcept { return _passData != nullptr; }
//! Returns the node pass data - data used during processing & transformations.
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* passData() const noexcept { return (T*)_passData; }
//! Sets the node pass data to `data`.
template<typename T>
ASMJIT_INLINE_NODEBUG void setPassData(T* data) noexcept { _passData = (void*)data; }
@@ -726,9 +810,13 @@ public:
ASMJIT_INLINE_NODEBUG void resetPassData() noexcept { _passData = nullptr; }
//! Tests whether the node has an inline comment/annotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInlineComment() const noexcept { return _inlineComment != nullptr; }
//! Returns an inline comment/annotation string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets an inline comment/annotation string to `s`.
ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets an inline comment/annotation string to nullptr.
@@ -752,10 +840,10 @@ public:
//! embed 5. The rest (up to 6 operands) is considered extended.
//!
//! The number of operands InstNode holds is decided when \ref InstNode is created.
static constexpr uint32_t kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_));
static inline constexpr uint32_t kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_));
//! Count of maximum number of operands \ref InstNode can hold.
static constexpr uint32_t kFullOpCapacity = Globals::kMaxOpCount;
static inline constexpr uint32_t kFullOpCapacity = Globals::kMaxOpCount;
//! \}
@@ -791,7 +879,10 @@ public:
//! \name Instruction Object
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseInst& baseInst() noexcept { return _baseInst; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BaseInst& baseInst() const noexcept { return _baseInst; }
//! \}
@@ -800,8 +891,11 @@ public:
//! \{
//! Returns the instruction id, see `BaseInst::Id`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _baseInst.id(); }
//! Returns the instruction real id, see `BaseInst::Id`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _baseInst.realId(); }
//! Sets the instruction id to `id`, see `BaseInst::Id`.
@@ -813,9 +907,13 @@ public:
//! \{
//! Returns instruction options, see \ref InstOptions for more details.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _baseInst.options(); }
//! Tests whether instruction has the given \option` set/enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return _baseInst.hasOption(option); }
//! Sets instruction `options` to the provided value, resetting all others.
ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _baseInst.setOptions(options); }
//! Adds instruction `options` to the instruction.
@@ -831,11 +929,17 @@ public:
//! \{
//! Tests whether the node has an extra register operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _baseInst.hasExtraReg(); }
//! Returns extra register operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _baseInst.extraReg(); }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _baseInst.extraReg(); }
//! Sets extra register operand to `reg`.
ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _baseInst.setExtraReg(reg); }
//! Sets extra register operand to `reg`.
@@ -849,24 +953,30 @@ public:
//! \{
//! Returns operand count.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _inst._opCount; }
//! Returns operand capacity.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t opCapacity() const noexcept { return _inst._opCapacity; }
//! Sets operand count.
ASMJIT_INLINE_NODEBUG void setOpCount(uint32_t opCount) noexcept { _inst._opCount = uint8_t(opCount); }
//! Returns operands array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand* operands() noexcept {
return reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(this) + sizeof(InstNode));
}
//! Returns operands array (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand* operands() const noexcept {
return reinterpret_cast<const Operand*>(reinterpret_cast<const uint8_t*>(this) + sizeof(InstNode));
}
//! Returns operand at the given `index`.
[[nodiscard]]
inline Operand& op(uint32_t index) noexcept {
ASMJIT_ASSERT(index < opCapacity());
@@ -875,6 +985,7 @@ public:
}
//! Returns operand at the given `index` (const).
[[nodiscard]]
inline const Operand& op(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < opCapacity());
@@ -911,6 +1022,7 @@ public:
//! \{
//! Tests whether the given operand type `opType` is used by the instruction.
[[nodiscard]]
inline bool hasOpType(OperandType opType) const noexcept {
const Operand* ops = operands();
for (uint32_t i = 0, count = opCount(); i < count; i++)
@@ -920,18 +1032,26 @@ public:
}
//! Tests whether the instruction uses at least one register operand.
[[nodiscard]]
inline bool hasRegOp() const noexcept { return hasOpType(OperandType::kReg); }
//! Tests whether the instruction uses at least one memory operand.
[[nodiscard]]
inline bool hasMemOp() const noexcept { return hasOpType(OperandType::kMem); }
//! Tests whether the instruction uses at least one immediate operand.
[[nodiscard]]
inline bool hasImmOp() const noexcept { return hasOpType(OperandType::kImm); }
//! Tests whether the instruction uses at least one label operand.
[[nodiscard]]
inline bool hasLabelOp() const noexcept { return hasOpType(OperandType::kLabel); }
//! Returns the index of the given operand type `opType`.
//!
//! \note If the operand type wa found, the value returned represents its index in \ref operands()
//! array, otherwise \ref Globals::kNotFound is returned to signalize that the operand was not found.
[[nodiscard]]
inline uint32_t indexOfOpType(OperandType opType) const noexcept {
uint32_t i = 0;
uint32_t count = opCount();
@@ -947,10 +1067,15 @@ public:
}
//! A shortcut that calls `indexOfOpType(OperandType::kMem)`.
[[nodiscard]]
inline uint32_t indexOfMemOp() const noexcept { return indexOfOpType(OperandType::kMem); }
//! A shortcut that calls `indexOfOpType(OperandType::kImm)`.
[[nodiscard]]
inline uint32_t indexOfImmOp() const noexcept { return indexOfOpType(OperandType::kImm); }
//! A shortcut that calls `indexOfOpType(OperandType::kLabel)`.
[[nodiscard]]
inline uint32_t indexOfLabelOp() const noexcept { return indexOfOpType(OperandType::kLabel); }
//! \}
@@ -961,18 +1086,22 @@ public:
//! \cond INTERNAL
//! Returns uint32_t[] view that represents BaseInst::RegOnly and instruction operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t* _getRewriteArray() noexcept { return &_baseInst._extraReg._id; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint32_t* _getRewriteArray() const noexcept { return &_baseInst._extraReg._id; }
//! Maximum value of rewrite id - 6 operands each having 4 slots is 24, one RegOnly having 2 slots => 26.
static constexpr uint32_t kMaxRewriteId = 26 - 1;
static inline constexpr uint32_t kMaxRewriteId = 26 - 1;
//! Returns a rewrite index of the given pointer to `id`.
//!
//! This function returns a value that can be then passed to `\ref rewriteIdAtIndex() function. It can address
//! any id from any operand that is used by the instruction in addition to \ref BaseInst::regOnly field, which
//! can also be used by the register allocator.
[[nodiscard]]
inline uint32_t getRewriteIndex(const uint32_t* id) const noexcept {
const uint32_t* array = _getRewriteArray();
ASMJIT_ASSERT(array <= id);
@@ -1010,14 +1139,16 @@ public:
//! There are only two capacities used - \ref kBaseOpCapacity and \ref kFullOpCapacity, so this function
//! is used to decide between these two. The general rule is that instructions that can be represented with
//! \ref kBaseOpCapacity would use this value, and all others would take \ref kFullOpCapacity.
static ASMJIT_INLINE_NODEBUG constexpr uint32_t capacityOfOpCount(uint32_t opCount) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR uint32_t capacityOfOpCount(uint32_t opCount) noexcept {
return opCount <= kBaseOpCapacity ? kBaseOpCapacity : kFullOpCapacity;
}
//! Calculates the size of \ref InstNode required to hold at most `opCapacity` operands.
//!
//! This function is used internally to allocate \ref InstNode.
static ASMJIT_INLINE_NODEBUG constexpr size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept {
return sizeof(InstNode) + opCapacity * sizeof(Operand);
}
//! \endcond
@@ -1107,8 +1238,11 @@ public:
//! \{
//! Returns \ref Label representation of the \ref LabelNode.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); }
//! Returns the id of the label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! \}
@@ -1146,12 +1280,16 @@ public:
//! \{
//! Returns align mode.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG AlignMode alignMode() const noexcept { return _alignData._alignMode; }
//! Sets align mode to `alignMode`.
ASMJIT_INLINE_NODEBUG void setAlignMode(AlignMode alignMode) noexcept { _alignData._alignMode = alignMode; }
//! Returns align offset in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
//! Sets align offset in bytes to `offset`.
ASMJIT_INLINE_NODEBUG void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
@@ -1167,9 +1305,7 @@ public:
ASMJIT_NONCOPYABLE(EmbedDataNode)
//! \cond INTERNAL
enum : uint32_t {
kInlineBufferSize = 128 - (sizeof(BaseNode) + sizeof(size_t) * 2)
};
static inline constexpr uint32_t kInlineBufferSize = 128 - (sizeof(BaseNode) + sizeof(size_t) * 2);
//! \endcond
//! \name Members
@@ -1204,30 +1340,38 @@ public:
//! \{
//! Returns data type as \ref TypeId.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _embed._typeId; }
//! Returns the size of a single data element.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t typeSize() const noexcept { return _embed._typeSize; }
//! Returns a pointer to the data casted to `uint8_t`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* data() const noexcept {
return dataSize() <= kInlineBufferSize ? const_cast<uint8_t*>(_inlineData) : _externalData;
}
//! Returns a pointer to the data casted to `T`.
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* dataAs() const noexcept { return reinterpret_cast<T*>(data()); }
//! Returns the number of (typed) items in the array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t itemCount() const noexcept { return _itemCount; }
//! Returns how many times the data is repeated (default 1).
//!
//! Repeated data is useful when defining constants for SIMD, for example.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t repeatCount() const noexcept { return _repeatCount; }
//! Returns the size of the data, not considering the number of times it repeats.
//!
//! \note The returned value is the same as `typeSize() * itemCount()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return typeSize() * _itemCount; }
//! \}
@@ -1261,17 +1405,23 @@ public:
//! \{
//! Returns the label to embed as \ref Label operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); }
//! Returns the id of the label.
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! Sets the label id from `label` operand.
ASMJIT_INLINE_NODEBUG void setLabel(const Label& label) noexcept { setLabelId(label.id()); }
//! Returns the id of the label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! Sets the label id (use with caution, improper use can break a lot of things).
ASMJIT_INLINE_NODEBUG void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; }
//! Returns the data size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t dataSize() const noexcept { return _dataSize; }
//! Sets the data size.
ASMJIT_INLINE_NODEBUG void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
@@ -1308,18 +1458,25 @@ public:
//! \{
//! Returns the label as `Label` operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); }
//! Returns the id of the label.
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! Sets the label id from `label` operand.
ASMJIT_INLINE_NODEBUG void setLabel(const Label& label) noexcept { setLabelId(label.id()); }
//! Returns the id of the label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! Sets the label id.
ASMJIT_INLINE_NODEBUG void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; }
//! Returns the base label as `Label` operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label baseLabel() const noexcept { return Label(_baseLabelId); }
//! Returns the id of the base label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t baseLabelId() const noexcept { return _baseLabelId; }
//! Sets the base label id from `label` operand.
@@ -1328,7 +1485,9 @@ public:
ASMJIT_INLINE_NODEBUG void setBaseLabelId(uint32_t baseLabelId) noexcept { _baseLabelId = baseLabelId; }
//! Returns the size of the embedded label address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t dataSize() const noexcept { return _dataSize; }
//! Sets the size of the embedded label address.
ASMJIT_INLINE_NODEBUG void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
@@ -1366,15 +1525,23 @@ public:
//! \{
//! Tests whether the constant-pool is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _constPool.empty(); }
//! Returns the size of the constant-pool in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _constPool.size(); }
//! Returns minimum alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _constPool.alignment(); }
//! Returns the wrapped `ConstPool` instance.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ConstPool& constPool() noexcept { return _constPool; }
//! Returns the wrapped `ConstPool` instance (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ConstPool& constPool() const noexcept { return _constPool; }
//! \}
@@ -1431,6 +1598,7 @@ public:
//! \{
//! Returns the type of the sentinel.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SentinelType sentinelType() const noexcept {
return _sentinel._sentinelType;
}
@@ -1471,8 +1639,11 @@ public:
//! \{
//! Returns \ref BaseBuilder associated with the pass.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BaseBuilder* cb() const noexcept { return _cb; }
//! Returns the name of the pass.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name; }
//! \}

View File

@@ -45,11 +45,14 @@ struct CodeBuffer {
//! \{
//! Returns a reference to the byte at the given `index`.
[[nodiscard]]
inline uint8_t& operator[](size_t index) noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \overload
[[nodiscard]]
inline const uint8_t& operator[](size_t index) const noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
@@ -61,34 +64,47 @@ struct CodeBuffer {
//! \{
//! Returns code buffer flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CodeBufferFlags flags() const noexcept { return _flags; }
//! Tests whether the code buffer has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Tests whether this code buffer has a fixed size.
//!
//! Fixed size means that the code buffer is fixed and cannot grow.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return hasFlag(CodeBufferFlags::kIsFixed); }
//! Tests whether the data in this code buffer is external.
//!
//! External data can only be provided by users, it's never used by AsmJit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return hasFlag(CodeBufferFlags::kIsExternal); }
//! Tests whether the data in this code buffer is allocated (non-null).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return _data != nullptr; }
//! Tests whether the code buffer is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return !_size; }
//! Returns the size of the data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns the capacity of the data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return _capacity; }
//! Returns the pointer to the data the buffer references.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _data; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _data; }
//! \}
@@ -96,10 +112,16 @@ struct CodeBuffer {
//! \name Iterators
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* begin() noexcept { return _data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* begin() const noexcept { return _data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* end() noexcept { return _data + _size; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* end() const noexcept { return _data + _size; }
//! \}

View File

@@ -83,8 +83,9 @@ static void CodeHolder_resetInternal(CodeHolder* self, ResetPolicy resetPolicy)
uint32_t numSections = self->_sections.size();
for (i = 0; i < numSections; i++) {
Section* section = self->_sections[i];
if (section->_buffer.data() && !section->_buffer.isExternal())
if (section->_buffer.data() && !section->_buffer.isExternal()) {
::free(section->_buffer._data);
}
section->_buffer._data = nullptr;
section->_buffer._capacity = 0;
}
@@ -150,8 +151,9 @@ Error CodeHolder::init(const Environment& environment, uint64_t baseAddress) noe
Error CodeHolder::init(const Environment& environment, const CpuFeatures& cpuFeatures, uint64_t baseAddress) noexcept {
// Cannot reinitialize if it's locked or there is one or more emitter attached.
if (isInitialized())
if (isInitialized()) {
return DebugUtils::errored(kErrorAlreadyInitialized);
}
// If we are just initializing there should be no emitters attached.
ASMJIT_ASSERT(_emitters.empty());
@@ -193,23 +195,27 @@ void CodeHolder::reset(ResetPolicy resetPolicy) noexcept {
Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
// Catch a possible misuse of the API.
if (ASMJIT_UNLIKELY(!emitter))
if (ASMJIT_UNLIKELY(!emitter)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
// Invalid emitter, this should not be possible.
EmitterType type = emitter->emitterType();
if (ASMJIT_UNLIKELY(type == EmitterType::kNone || uint32_t(type) > uint32_t(EmitterType::kMaxValue)))
if (ASMJIT_UNLIKELY(type == EmitterType::kNone || uint32_t(type) > uint32_t(EmitterType::kMaxValue))) {
return DebugUtils::errored(kErrorInvalidState);
}
uint64_t archMask = emitter->_archMask;
if (ASMJIT_UNLIKELY(!(archMask & (uint64_t(1) << uint32_t(arch())))))
if (ASMJIT_UNLIKELY(!(archMask & (uint64_t(1) << uint32_t(arch()))))) {
return DebugUtils::errored(kErrorInvalidArch);
}
// This is suspicious, but don't fail if `emitter` is already attached
// to this code holder. This is not error, but it's not recommended.
if (emitter->_code != nullptr) {
if (emitter->_code == this)
if (emitter->_code == this) {
return kErrorOk;
}
return DebugUtils::errored(kErrorInvalidState);
}
@@ -225,18 +231,21 @@ Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
}
Error CodeHolder::detach(BaseEmitter* emitter) noexcept {
if (ASMJIT_UNLIKELY(!emitter))
if (ASMJIT_UNLIKELY(!emitter)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(emitter->_code != this))
if (ASMJIT_UNLIKELY(emitter->_code != this)) {
return DebugUtils::errored(kErrorInvalidState);
}
// NOTE: We always detach if we were asked to, if error happens during
// `emitter->onDetach()` we just propagate it, but the BaseEmitter will
// be detached.
Error err = kErrorOk;
if (!emitter->isDestroyed())
if (!emitter->isDestroyed()) {
err = emitter->onDetach(this);
}
// Disconnect CodeHolder <-> BaseEmitter.
uint32_t index = _emitters.indexOf(emitter);
@@ -275,13 +284,16 @@ static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t
uint8_t* oldData = cb->_data;
uint8_t* newData;
if (oldData && !cb->isExternal())
if (oldData && !cb->isExternal()) {
newData = static_cast<uint8_t*>(::realloc(oldData, n));
else
}
else {
newData = static_cast<uint8_t*>(::malloc(n));
}
if (ASMJIT_UNLIKELY(!newData))
if (ASMJIT_UNLIKELY(!newData)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
cb->_data = newData;
cb->_capacity = n;
@@ -306,35 +318,44 @@ static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t
Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
// The size of the section must be valid.
size_t size = cb->size();
if (ASMJIT_UNLIKELY(n > std::numeric_limits<uintptr_t>::max() - size))
if (ASMJIT_UNLIKELY(n > std::numeric_limits<uintptr_t>::max() - size)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
// We can now check if growing the buffer is really necessary. It's unlikely
// that this function is called while there is still room for `n` bytes.
size_t capacity = cb->capacity();
size_t required = cb->size() + n;
if (ASMJIT_UNLIKELY(required <= capacity))
return kErrorOk;
if (cb->isFixed())
if (ASMJIT_UNLIKELY(required <= capacity)) {
return kErrorOk;
}
if (cb->isFixed()) {
return DebugUtils::errored(kErrorTooLarge);
}
size_t kInitialCapacity = 8096;
if (capacity < kInitialCapacity)
if (capacity < kInitialCapacity) {
capacity = kInitialCapacity;
else
}
else {
capacity += Globals::kAllocOverhead;
}
do {
size_t old = capacity;
if (capacity < Globals::kGrowThreshold)
if (capacity < Globals::kGrowThreshold) {
capacity *= 2;
else
}
else {
capacity += Globals::kGrowThreshold;
}
// Overflow.
if (ASMJIT_UNLIKELY(old > capacity))
if (ASMJIT_UNLIKELY(old > capacity)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
} while (capacity - Globals::kAllocOverhead < required);
return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead);
@@ -343,11 +364,13 @@ Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
size_t capacity = cb->capacity();
if (n <= capacity)
if (n <= capacity) {
return kErrorOk;
}
if (cb->isFixed())
if (cb->isFixed()) {
return DebugUtils::errored(kErrorTooLarge);
}
return CodeHolder_reserveInternal(this, cb, n);
}
@@ -358,28 +381,34 @@ Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, SectionFlags flags, uint32_t alignment, int32_t order) noexcept {
*sectionOut = nullptr;
if (nameSize == SIZE_MAX)
if (nameSize == SIZE_MAX) {
nameSize = strlen(name);
}
if (alignment == 0)
if (alignment == 0) {
alignment = 1;
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment)))
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment))) {
return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize))
if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize)) {
return DebugUtils::errored(kErrorInvalidSectionName);
}
uint32_t sectionId = _sections.size();
if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId))
if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManySections);
}
ASMJIT_PROPAGATE(_sections.willGrow(&_allocator));
ASMJIT_PROPAGATE(_sectionsByOrder.willGrow(&_allocator));
Section* section = _allocator.allocZeroedT<Section>();
if (ASMJIT_UNLIKELY(!section))
if (ASMJIT_UNLIKELY(!section)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
section->_id = sectionId;
section->_flags = flags;
@@ -399,24 +428,28 @@ Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t name
}
Section* CodeHolder::sectionByName(const char* name, size_t nameSize) const noexcept {
if (nameSize == SIZE_MAX)
if (nameSize == SIZE_MAX) {
nameSize = strlen(name);
}
// This could be also put in a hash-table similarly like we do with labels,
// however it's questionable as the number of sections should be pretty low
// in general. Create an issue if this becomes a problem.
if (nameSize <= Globals::kMaxSectionNameSize) {
for (Section* section : _sections)
if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0')
for (Section* section : _sections) {
if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0') {
return section;
}
}
}
return nullptr;
}
Section* CodeHolder::ensureAddressTableSection() noexcept {
if (_addressTableSection)
if (_addressTableSection) {
return _addressTableSection;
}
newSection(&_addressTableSection,
CodeHolder_addrTabName,
@@ -429,16 +462,19 @@ Section* CodeHolder::ensureAddressTableSection() noexcept {
Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept {
AddressTableEntry* entry = _addressTableEntries.get(address);
if (entry)
if (entry) {
return kErrorOk;
}
Section* section = ensureAddressTableSection();
if (ASMJIT_UNLIKELY(!section))
if (ASMJIT_UNLIKELY(!section)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
entry = _zone.newT<AddressTableEntry>(address);
if (ASMJIT_UNLIKELY(!entry))
if (ASMJIT_UNLIKELY(!entry)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
_addressTableEntries.insert(entry);
section->_virtualSize += _environment.registerSize();
@@ -452,24 +488,26 @@ Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept {
//! Only used to lookup a label from `_namedLabels`.
class LabelByName {
public:
const char* _key {};
uint32_t _keySize {};
uint32_t _hashCode {};
uint32_t _parentId {};
inline LabelByName(const char* key, size_t keySize, uint32_t hashCode, uint32_t parentId) noexcept
: _key(key),
_keySize(uint32_t(keySize)),
_hashCode(hashCode),
_parentId(parentId) {}
[[nodiscard]]
inline uint32_t hashCode() const noexcept { return _hashCode; }
[[nodiscard]]
inline bool matches(const LabelEntry* entry) const noexcept {
return entry->nameSize() == _keySize &&
entry->parentId() == _parentId &&
::memcmp(entry->name(), _key, _keySize) == 0;
}
const char* _key;
uint32_t _keySize;
uint32_t _hashCode;
uint32_t _parentId;
};
// Returns a hash of `name` and fixes `nameSize` if it's `SIZE_MAX`.
@@ -479,7 +517,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize
size_t i = 0;
for (;;) {
uint8_t c = uint8_t(name[i]);
if (!c) break;
if (!c) {
break;
}
hashCode = Support::hashRound(hashCode, c);
i++;
}
@@ -488,7 +528,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize
else {
for (size_t i = 0; i < nameSize; i++) {
uint8_t c = uint8_t(name[i]);
if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName);
if (ASMJIT_UNLIKELY(!c)) {
return DebugUtils::errored(kErrorInvalidLabelName);
}
hashCode = Support::hashRound(hashCode, c);
}
}
@@ -497,7 +539,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize
LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept {
LabelLink* link = _allocator.allocT<LabelLink>();
if (ASMJIT_UNLIKELY(!link)) return nullptr;
if (ASMJIT_UNLIKELY(!link)) {
return nullptr;
}
link->next = le->_links;
le->_links = link;
@@ -516,14 +560,16 @@ Error CodeHolder::newLabelEntry(LabelEntry** entryOut) noexcept {
*entryOut = nullptr;
uint32_t labelId = _labelEntries.size();
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManyLabels);
}
ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
LabelEntry* le = _allocator.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le))
if (ASMJIT_UNLIKELY(!le)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
le->_setId(labelId);
le->_parentId = Globals::kInvalidId;
@@ -539,10 +585,12 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
if (ASMJIT_UNLIKELY(nameSize == 0)) {
if (type == LabelType::kAnonymous)
if (type == LabelType::kAnonymous) {
return newLabelEntry(entryOut);
else
}
else {
return DebugUtils::errored(kErrorInvalidLabelName);
}
}
if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize))
@@ -551,18 +599,21 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
switch (type) {
case LabelType::kAnonymous: {
// Anonymous labels cannot have a parent (or more specifically, parent is useless here).
if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId))
if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) {
return DebugUtils::errored(kErrorInvalidParentLabel);
}
uint32_t labelId = _labelEntries.size();
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManyLabels);
}
ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
LabelEntry* le = _allocator.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le))
if (ASMJIT_UNLIKELY(!le)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
// NOTE: This LabelEntry has a name, but we leave its hashCode as zero as it's anonymous.
le->_setId(labelId);
@@ -577,8 +628,9 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
}
case LabelType::kLocal: {
if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size()))
if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size())) {
return DebugUtils::errored(kErrorInvalidParentLabel);
}
hashCode ^= parentId;
break;
@@ -586,8 +638,9 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
case LabelType::kGlobal:
case LabelType::kExternal: {
if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId))
if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) {
return DebugUtils::errored(kErrorInvalidParentLabel);
}
break;
}
@@ -600,20 +653,23 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
// different id, this is already accomplished by having a different hashes
// between the same label names having different parent labels.
LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId));
if (ASMJIT_UNLIKELY(le))
if (ASMJIT_UNLIKELY(le)) {
return DebugUtils::errored(kErrorLabelAlreadyDefined);
}
Error err = kErrorOk;
uint32_t labelId = _labelEntries.size();
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManyLabels);
}
ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
le = _allocator.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le))
if (ASMJIT_UNLIKELY(!le)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
le->_hashCode = hashCode;
le->_setId(labelId);
@@ -631,24 +687,28 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
uint32_t CodeHolder::labelIdByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
if (ASMJIT_UNLIKELY(!nameSize))
if (ASMJIT_UNLIKELY(!nameSize)) {
return 0;
}
if (parentId != Globals::kInvalidId)
if (parentId != Globals::kInvalidId) {
hashCode ^= parentId;
}
LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId));
return le ? le->id() : uint32_t(Globals::kInvalidId);
}
ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept {
if (!hasUnresolvedLinks())
if (!hasUnresolvedLinks()) {
return kErrorOk;
}
Error err = kErrorOk;
for (LabelEntry* le : labelEntries()) {
if (!le->isBound())
if (!le->isBound()) {
continue;
}
LabelLinkIterator link(le);
if (link) {
@@ -695,15 +755,18 @@ ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept {
ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId, uint64_t toOffset) noexcept {
LabelEntry* le = labelEntry(label);
if (ASMJIT_UNLIKELY(!le))
if (ASMJIT_UNLIKELY(!le)) {
return DebugUtils::errored(kErrorInvalidLabel);
}
if (ASMJIT_UNLIKELY(toSectionId > _sections.size()))
if (ASMJIT_UNLIKELY(toSectionId > _sections.size())) {
return DebugUtils::errored(kErrorInvalidSection);
}
// Label can be bound only once.
if (ASMJIT_UNLIKELY(le->isBound()))
if (ASMJIT_UNLIKELY(le->isBound())) {
return DebugUtils::errored(kErrorLabelAlreadyBound);
}
// Bind the label.
Section* section = _sections[toSectionId];
@@ -761,12 +824,14 @@ Error CodeHolder::newRelocEntry(RelocEntry** dst, RelocType relocType) noexcept
ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator));
uint32_t relocId = _relocations.size();
if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId))
if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManyRelocations);
}
RelocEntry* re = _allocator.allocZeroedT<RelocEntry>();
if (ASMJIT_UNLIKELY(!re))
if (ASMJIT_UNLIKELY(!re)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
re->_id = relocId;
re->_relocType = relocType;
@@ -798,8 +863,9 @@ static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, ui
case ExpressionValueType::kLabel: {
LabelEntry* le = exp->value[i].label;
if (!le->isBound())
if (!le->isBound()) {
return DebugUtils::errored(kErrorExpressionLabelNotBound);
}
v = le->section()->offset() + le->offset();
break;
}
@@ -863,14 +929,16 @@ Error CodeHolder::flatten() noexcept {
uint64_t realSize = section->realSize();
if (realSize) {
uint64_t alignedOffset = Support::alignUp(offset, section->alignment());
if (ASMJIT_UNLIKELY(alignedOffset < offset))
if (ASMJIT_UNLIKELY(alignedOffset < offset)) {
return DebugUtils::errored(kErrorTooLarge);
}
Support::FastUInt8 of = 0;
offset = Support::addOverflow(alignedOffset, realSize, &of);
if (ASMJIT_UNLIKELY(of))
if (ASMJIT_UNLIKELY(of)) {
return DebugUtils::errored(kErrorTooLarge);
}
}
}
@@ -879,13 +947,15 @@ Error CodeHolder::flatten() noexcept {
offset = 0;
for (Section* section : _sectionsByOrder) {
uint64_t realSize = section->realSize();
if (realSize)
if (realSize) {
offset = Support::alignUp(offset, section->alignment());
}
section->_offset = offset;
// Make sure the previous section extends a bit to cover the alignment.
if (prev)
if (prev) {
prev->_virtualSize = offset - prev->_offset;
}
prev = section;
offset += realSize;
@@ -908,16 +978,18 @@ size_t CodeHolder::codeSize() const noexcept {
}
}
if ((sizeof(uint64_t) > sizeof(size_t) && offset > uint64_t(SIZE_MAX)) || of)
if ((sizeof(uint64_t) > sizeof(size_t) && offset > uint64_t(SIZE_MAX)) || of) {
return SIZE_MAX;
}
return size_t(offset);
}
Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
// Base address must be provided.
if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress))
if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
_baseAddress = baseAddress;
uint32_t addressSize = _environment.registerSize();
@@ -927,22 +999,23 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
uint8_t* addressTableEntryData = nullptr;
if (addressTableSection) {
ASMJIT_PROPAGATE(
reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize())));
ASMJIT_PROPAGATE(reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize())));
addressTableEntryData = addressTableSection->_buffer.data();
}
// Relocate all recorded locations.
for (const RelocEntry* re : _relocations) {
// Possibly deleted or optimized-out entry.
if (re->relocType() == RelocType::kNone)
if (re->relocType() == RelocType::kNone) {
continue;
}
Section* sourceSection = sectionById(re->sourceSectionId());
Section* targetSection = nullptr;
if (re->targetSectionId() != Globals::kInvalidId)
if (re->targetSectionId() != Globals::kInvalidId) {
targetSection = sectionById(re->targetSectionId());
}
uint64_t value = re->payload();
uint64_t sectionOffset = sourceSection->offset();
@@ -951,8 +1024,9 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
// Make sure that the `RelocEntry` doesn't go out of bounds.
size_t regionSize = re->format().regionSize();
if (ASMJIT_UNLIKELY(re->sourceOffset() >= sourceSection->bufferSize() ||
sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize))
sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize)) {
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
uint8_t* buffer = sourceSection->data();
@@ -970,8 +1044,9 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
case RelocType::kRelToAbs: {
// Value is currently a relative offset from the start of its section.
// We have to convert it to an absolute offset (including base address).
if (ASMJIT_UNLIKELY(!targetSection))
if (ASMJIT_UNLIKELY(!targetSection)) {
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
//value += baseAddress + sectionOffset + sourceOffset + regionSize;
value += baseAddress + targetSection->offset();
@@ -982,40 +1057,46 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
value -= baseAddress + sectionOffset + sourceOffset + regionSize;
// Sign extend as we are not interested in the high 32-bit word in a 32-bit address space.
if (addressSize <= 4)
if (addressSize <= 4) {
value = uint64_t(int64_t(int32_t(value & 0xFFFFFFFFu)));
else if (!Support::isInt32(int64_t(value)))
}
else if (!Support::isInt32(int64_t(value))) {
return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
}
break;
}
case RelocType::kX64AddressEntry: {
size_t valueOffset = size_t(re->sourceOffset()) + re->format().valueOffset();
if (re->format().valueSize() != 4 || valueOffset < 2)
if (re->format().valueSize() != 4 || valueOffset < 2) {
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
// First try whether a relative 32-bit displacement would work.
value -= baseAddress + sectionOffset + sourceOffset + regionSize;
if (!Support::isInt32(int64_t(value))) {
// Relative 32-bit displacement is not possible, use '.addrtab' section.
AddressTableEntry* atEntry = _addressTableEntries.get(re->payload());
if (ASMJIT_UNLIKELY(!atEntry))
if (ASMJIT_UNLIKELY(!atEntry)) {
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
// Cannot be null as we have just matched the `AddressTableEntry`.
ASMJIT_ASSERT(addressTableSection != nullptr);
if (!atEntry->hasAssignedSlot())
if (!atEntry->hasAssignedSlot()) {
atEntry->_slot = addressTableEntryCount++;
}
size_t atEntryIndex = size_t(atEntry->slot()) * addressSize;
uint64_t addrSrc = sectionOffset + sourceOffset + regionSize;
uint64_t addrDst = addressTableSection->offset() + uint64_t(atEntryIndex);
value = addrDst - addrSrc;
if (!Support::isInt32(int64_t(value)))
if (!Support::isInt32(int64_t(value))) {
return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
}
// Bytes that replace [REX, OPCODE] bytes.
uint32_t byte0 = 0xFF;
@@ -1064,14 +1145,16 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
}
Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, CopySectionFlags copyFlags) noexcept {
if (ASMJIT_UNLIKELY(!isSectionValid(sectionId)))
if (ASMJIT_UNLIKELY(!isSectionValid(sectionId))) {
return DebugUtils::errored(kErrorInvalidSection);
}
Section* section = sectionById(sectionId);
size_t bufferSize = section->bufferSize();
if (ASMJIT_UNLIKELY(dstSize < bufferSize))
if (ASMJIT_UNLIKELY(dstSize < bufferSize)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
memcpy(dst, section->data(), bufferSize);
@@ -1086,14 +1169,16 @@ Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId,
Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, CopySectionFlags copyFlags) noexcept {
size_t end = 0;
for (Section* section : _sectionsByOrder) {
if (section->offset() > dstSize)
if (section->offset() > dstSize) {
return DebugUtils::errored(kErrorInvalidArgument);
}
size_t bufferSize = section->bufferSize();
size_t offset = size_t(section->offset());
if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize))
if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
uint8_t* dstTarget = static_cast<uint8_t*>(dst) + offset;
size_t paddingSize = 0;

View File

@@ -175,34 +175,50 @@ public:
//! \{
//! Returns the section id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
//! Returns the section name, as a null terminated string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.str; }
//! Returns the section data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _buffer.data(); }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _buffer.data(); }
//! Returns the section flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SectionFlags flags() const noexcept { return _flags; }
//! Tests whether the section has the given `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(SectionFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Adds `flags` to the section flags.
ASMJIT_INLINE_NODEBUG void addFlags(SectionFlags flags) noexcept { _flags |= flags; }
//! Removes `flags` from the section flags.
ASMJIT_INLINE_NODEBUG void clearFlags(SectionFlags flags) noexcept { _flags &= ~flags; }
//! Returns the minimum section alignment
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
//! Sets the minimum section alignment
ASMJIT_INLINE_NODEBUG void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
//! Returns the section order, which has a higher priority than section id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int32_t order() const noexcept { return _order; }
//! Returns the section offset, relative to base.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t offset() const noexcept { return _offset; }
//! Set the section offset.
ASMJIT_INLINE_NODEBUG void setOffset(uint64_t offset) noexcept { _offset = offset; }
@@ -212,18 +228,26 @@ public:
//! size returned by `bufferSize()` as the buffer stores real data emitted by assemblers or appended by users.
//!
//! Use `realSize()` to get the real and final size of this section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t virtualSize() const noexcept { return _virtualSize; }
//! Sets the virtual size of the section.
ASMJIT_INLINE_NODEBUG void setVirtualSize(uint64_t virtualSize) noexcept { _virtualSize = virtualSize; }
//! Returns the buffer size of the section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t bufferSize() const noexcept { return _buffer.size(); }
//! Returns the real size of the section calculated from virtual and buffer sizes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t realSize() const noexcept { return Support::max<uint64_t>(virtualSize(), bufferSize()); }
//! Returns the `CodeBuffer` used by this section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CodeBuffer& buffer() noexcept { return _buffer; }
//! Returns the `CodeBuffer` used by this section (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const CodeBuffer& buffer() const noexcept { return _buffer; }
//! \}
@@ -256,15 +280,25 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t address() const noexcept { return _address; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t slot() const noexcept { return _slot; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAssignedSlot() const noexcept { return _slot != 0xFFFFFFFFu; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator<(const AddressTableEntry& other) const noexcept { return _address < other._address; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator>(const AddressTableEntry& other) const noexcept { return _address > other._address; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator<(uint64_t queryAddress) const noexcept { return _address < queryAddress; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator>(uint64_t queryAddress) const noexcept { return _address > queryAddress; }
//! \}
@@ -434,19 +468,32 @@ struct OffsetFormat {
}
//! Returns flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; }
//! Returns the size of the region/instruction where the offset is encoded.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regionSize() const noexcept { return _regionSize; }
//! Returns the offset of the word relative to the start of the region where the offset is.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t valueOffset() const noexcept { return _valueOffset; }
//! Returns the size of the data-type (word) that contains the offset, in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t valueSize() const noexcept { return _valueSize; }
//! Returns the count of bits of the offset value in the data it's stored in.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immBitCount() const noexcept { return _immBitCount; }
//! Returns the bit-shift of the offset value in the data it's stored in.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immBitShift() const noexcept { return _immBitShift; }
//! Returns the number of least significant bits of the offset value, that must be zero and that are not part of
//! the encoded data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immDiscardLsb() const noexcept { return _immDiscardLsb; }
//! Resets this offset format to a simple data value of `dataSize` bytes.
@@ -536,17 +583,28 @@ struct RelocEntry {
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RelocType relocType() const noexcept { return _relocType; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OffsetFormat& format() const noexcept { return _format; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sourceSectionId() const noexcept { return _sourceSectionId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t targetSectionId() const noexcept { return _targetSectionId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t sourceOffset() const noexcept { return _sourceOffset; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t payload() const noexcept { return _payload; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Expression* payloadAsExpression() const noexcept {
return reinterpret_cast<Expression*>(uintptr_t(_payload));
}
@@ -603,15 +661,14 @@ public:
//! \name Constants
//! \{
enum : uint32_t {
//! SSO size of \ref _name.
//!
//! \cond INTERNAL
//! Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has granularity of 32 bytes anyway). This
//! gives `_name` the remaining space, which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
//! \endcond
kStaticNameSize = 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*))
};
//! SSO size of \ref _name.
//!
//! \cond INTERNAL
//! Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has granularity of 32 bytes anyway). This
//! gives `_name` the remaining space, which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
//! \endcond
static inline constexpr uint32_t kStaticNameSize =
64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*));
//! \}
@@ -642,52 +699,68 @@ public:
// compiler targeting 64-bit CPU will add to align the structure to 64-bits.
//! Returns label id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _customData; }
//! Sets label id (internal, used only by `CodeHolder`).
ASMJIT_INLINE_NODEBUG void _setId(uint32_t id) noexcept { _customData = id; }
//! Returns label type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelType type() const noexcept { return _type; }
//! Tests whether the label has a parent label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasParent() const noexcept { return _parentId != Globals::kInvalidId; }
//! Returns label's parent id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t parentId() const noexcept { return _parentId; }
//! Returns the section where the label was bound.
//!
//! If the label was not yet bound the return value is `nullptr`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* section() const noexcept { return _section; }
//! Tests whether the label has name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasName() const noexcept { return !_name.empty(); }
//! Returns the label's name.
//!
//! \note Local labels will return their local name without their parent part, for example ".L1".
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); }
//! Returns size of label's name.
//!
//! \note Label name is always null terminated, so you can use `strlen()` to get it, however, it's also cached in
//! `LabelEntry` itself, so if you want to know the size the fastest way is to call `LabelEntry::nameSize()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns links associated with this label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelLink* links() const noexcept { return _links; }
//! Tests whether the label is bound.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBound() const noexcept { return _section != nullptr; }
//! Tests whether the label is bound to a the given `sectionId`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBoundTo(Section* section) const noexcept { return _section == section; }
//! Returns the label offset (only useful if the label is bound).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t offset() const noexcept { return _offset; }
//! Returns the hash-value of label's name and its parent label (if any).
//!
//! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function is implemented in `Support::hashString()`
//! and `Support::hashRound()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hashCode() const noexcept { return _hashCode; }
//! \}
@@ -776,6 +849,7 @@ public:
//! Tests whether the `CodeHolder` has been initialized.
//!
//! Emitters can be only attached to initialized `CodeHolder` instances.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _environment.isInitialized(); }
//! Initializes CodeHolder to hold code described by the given `environment` and `baseAddress`.
@@ -805,6 +879,7 @@ public:
//! \note This should be only used for AsmJit's purposes. Code holder uses arena allocator to allocate everything,
//! so anything allocated through this allocator will be invalidated by \ref CodeHolder::reset() or by CodeHolder's
//! destructor.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); }
//! \}
@@ -813,19 +888,27 @@ public:
//! \{
//! Returns the target environment information.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; }
//! Returns the target architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); }
//! Returns the target sub-architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); }
//! Returns the minimum CPU features of the target architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const CpuFeatures& cpuFeatures() const noexcept { return _cpuFeatures; }
//! Tests whether a static base-address is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
//! Returns a static base-address or \ref Globals::kNoBaseAddress, if not set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t baseAddress() const noexcept { return _baseAddress; }
//! \}
@@ -834,6 +917,7 @@ public:
//! \{
//! Returns a vector of attached emitters.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<BaseEmitter*>& emitters() const noexcept { return _emitters; }
//! \}
@@ -842,6 +926,7 @@ public:
//! \{
//! Returns the attached logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; }
//! Attaches a `logger` to CodeHolder and propagates it to all attached emitters.
ASMJIT_API void setLogger(Logger* logger) noexcept;
@@ -852,8 +937,10 @@ public:
//! \{
//! Tests whether the CodeHolder has an attached error handler, see \ref ErrorHandler.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Returns the attached error handler.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Attach an error handler to this `CodeHolder`.
ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
@@ -881,13 +968,19 @@ public:
//! \{
//! Returns an array of `Section*` records.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<Section*>& sections() const noexcept { return _sections; }
//! Returns an array of `Section*` records sorted according to section order first, then section id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<Section*>& sectionsByOrder() const noexcept { return _sectionsByOrder; }
//! Returns the number of sections.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sectionCount() const noexcept { return _sections.size(); }
//! Tests whether the given `sectionId` is valid.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isSectionValid(uint32_t sectionId) const noexcept { return sectionId < _sections.size(); }
//! Creates a new section and return its pointer in `sectionOut`.
@@ -896,19 +989,23 @@ public:
ASMJIT_API Error newSection(Section** sectionOut, const char* name, size_t nameSize = SIZE_MAX, SectionFlags flags = SectionFlags::kNone, uint32_t alignment = 1, int32_t order = 0) noexcept;
//! Returns a section entry of the given index.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* sectionById(uint32_t sectionId) const noexcept { return _sections[sectionId]; }
//! Returns section-id that matches the given `name`.
//!
//! If there is no such section `Section::kInvalidId` is returned.
[[nodiscard]]
ASMJIT_API Section* sectionByName(const char* name, size_t nameSize = SIZE_MAX) const noexcept;
//! Returns '.text' section (section that commonly represents code).
//!
//! \note Text section is always the first section in \ref CodeHolder::sections() array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* textSection() const noexcept { return _sections[0]; }
//! Tests whether '.addrtab' section exists.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAddressTable() const noexcept { return _addressTableSection != nullptr; }
//! Returns '.addrtab' section.
@@ -917,10 +1014,12 @@ public:
//! addresses that cannot be encoded in instructions like 'jmp' or 'call'.
//!
//! \note This section is created on demand, the returned pointer can be null.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* addressTableSection() const noexcept { return _addressTableSection; }
//! Ensures that '.addrtab' section exists (creates it if it doesn't) and
//! returns it. Can return `nullptr` on out of memory condition.
[[nodiscard]]
ASMJIT_API Section* ensureAddressTableSection() noexcept;
//! Used to add an address to an address table.
@@ -939,22 +1038,27 @@ public:
//! \{
//! Returns array of `LabelEntry*` records.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<LabelEntry*>& labelEntries() const noexcept { return _labelEntries; }
//! Returns number of labels created.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t labelCount() const noexcept { return _labelEntries.size(); }
//! Tests whether the label having `id` is valid (i.e. created by `newLabelEntry()`).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelValid(uint32_t labelId) const noexcept {
return labelId < _labelEntries.size();
}
//! Tests whether the `label` is valid (i.e. created by `newLabelEntry()`).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept {
return label.id() < _labelEntries.size();
}
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelBound(uint32_t labelId) const noexcept {
return isLabelValid(labelId) && _labelEntries[labelId]->isBound();
}
@@ -962,16 +1066,19 @@ public:
//! Tests whether the `label` is already bound.
//!
//! Returns `false` if the `label` is not valid.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelBound(const Label& label) const noexcept {
return isLabelBound(label.id());
}
//! Returns LabelEntry of the given label `id`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelEntry* labelEntry(uint32_t labelId) const noexcept {
return isLabelValid(labelId) ? _labelEntries[labelId] : static_cast<LabelEntry*>(nullptr);
}
//! Returns LabelEntry of the given `label`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelEntry* labelEntry(const Label& label) const noexcept {
return labelEntry(label.id());
}
@@ -980,12 +1087,14 @@ public:
//!
//! The offset returned is relative to the start of the section. Zero offset is returned for unbound labels,
//! which is their initial offset value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t labelOffset(uint32_t labelId) const noexcept {
ASMJIT_ASSERT(isLabelValid(labelId));
return _labelEntries[labelId]->offset();
}
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t labelOffset(const Label& label) const noexcept {
return labelOffset(label.id());
}
@@ -994,6 +1103,7 @@ public:
//!
//! \remarks The offset of the section where the label is bound must be valid in order to use this function,
//! otherwise the value returned will not be reliable.
[[nodiscard]]
inline uint64_t labelOffsetFromBase(uint32_t labelId) const noexcept {
ASMJIT_ASSERT(isLabelValid(labelId));
const LabelEntry* le = _labelEntries[labelId];
@@ -1001,6 +1111,7 @@ public:
}
//! \overload
[[nodiscard]]
inline uint64_t labelOffsetFromBase(const Label& label) const noexcept {
return labelOffsetFromBase(label.id());
}
@@ -1031,6 +1142,7 @@ public:
//!
//! If the named label doesn't a default constructed \ref Label is returned,
//! which has its id set to \ref Globals::kInvalidId.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept {
return Label(labelIdByName(name, nameSize, parentId));
}
@@ -1038,16 +1150,21 @@ public:
//! Returns a label id by name.
//!
//! If the named label doesn't exist \ref Globals::kInvalidId is returned.
[[nodiscard]]
ASMJIT_API uint32_t labelIdByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Tests whether there are any unresolved label links.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUnresolvedLinks() const noexcept { return _unresolvedLinkCount != 0; }
//! Returns the number of label links, which are unresolved.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t unresolvedLinkCount() const noexcept { return _unresolvedLinkCount; }
//! Creates a new label-link used to store information about yet unbound labels.
//!
//! Returns `null` if the allocation failed.
[[nodiscard]]
ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept;
//! Resolves cross-section links (`LabelLink`) associated with each label that was used as a destination in code
@@ -1066,11 +1183,15 @@ public:
//! \{
//! Tests whether the code contains relocation entries.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRelocEntries() const noexcept { return !_relocations.empty(); }
//! Returns array of `RelocEntry*` records.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<RelocEntry*>& relocEntries() const noexcept { return _relocations; }
//! Returns a RelocEntry of the given `id`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RelocEntry* relocEntry(uint32_t id) const noexcept { return _relocations[id]; }
//! Creates a new relocation entry of type `relocType`.
@@ -1093,6 +1214,7 @@ public:
//! \note All sections will be iterated over and the code size returned would represent the minimum code size of
//! all combined sections after applying minimum alignment. Code size may decrease after calling `flatten()` and
//! `relocateToBase()`.
[[nodiscard]]
ASMJIT_API size_t codeSize() const noexcept;
//! Relocates the code to the given `baseAddress`.

View File

@@ -16,8 +16,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
uint32_t discardLsb = format.immDiscardLsb();
// Invalid offset (should not happen).
if (!bitCount || bitCount > format.valueSize() * 8u)
if (!bitCount || bitCount > format.valueSize() * 8u) {
return false;
}
uint32_t value;
uint32_t u = 0;
@@ -27,8 +28,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
// absolute value.
if (format.hasSignBit()) {
u = uint32_t(offset64 >= 0);
if (u == 0)
if (u == 0) {
offset64 = -offset64;
}
unsignedLogic = true;
}
@@ -36,30 +38,35 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
if (unsignedLogic) {
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false;
}
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
}
value = uint32_t(offset64 & Support::lsbMask<uint32_t>(bitCount));
if (value != offset64)
if (value != offset64) {
return false;
}
}
else {
// The rest of OffsetType options are all signed.
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false;
}
offset64 >>= discardLsb;
}
if (!Support::isInt32(offset64))
if (!Support::isInt32(offset64)) {
return false;
}
value = uint32_t(int32_t(offset64));
if (!Support::isEncodableOffset32(int32_t(value), bitCount))
if (!Support::isEncodableOffset32(int32_t(value), bitCount)) {
return false;
}
}
switch (format.type()) {
@@ -72,8 +79,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
// Opcode: {.....|imm:1|..N.N|......|imm:3|....|imm:8}
case OffsetType::kThumb32_ADR: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 12 || bitShift != 0)
if (format.valueSize() != 4 || bitCount != 12 || bitShift != 0) {
return false;
}
uint32_t imm8 = (value & 0x00FFu);
uint32_t imm3 = (value & 0x0700u) << (12 - 8);
@@ -88,13 +96,14 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kThumb32_BLX:
// The calculation is the same as `B`, but the first LSB bit must be zero, so account for that.
value <<= 1;
ASMJIT_FALLTHROUGH;
[[fallthrough]];
// Opcode: {....|.|imm[23]|imm[20:11]|..|ja|.|jb|imm[10:0]}
case OffsetType::kThumb32_B: {
// Sanity checks.
if (format.valueSize() != 4)
if (format.valueSize() != 4) {
return false;
}
uint32_t ia = (value & 0x0007FFu);
uint32_t ib = (value & 0x1FF800u) << (16 - 11);
@@ -109,8 +118,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
// Opcode: {....|.|imm[19]|....|imm[16:11]|..|ja|.|jb|imm[10:0]}
case OffsetType::kThumb32_BCond: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 20 || bitShift != 0)
if (format.valueSize() != 4 || bitCount != 20 || bitShift != 0) {
return false;
}
uint32_t ia = (value & 0x0007FFu);
uint32_t ib = (value & 0x01F800u) << (16 - 11);
@@ -124,8 +134,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kAArch32_ADR: {
uint32_t encodedImm;
if (!arm::Utils::encodeAArch32Imm(value, &encodedImm))
if (!arm::Utils::encodeAArch32Imm(value, &encodedImm)) {
return false;
}
*dst = (Support::bitMask(22) << u) | (encodedImm << bitShift);
return true;
@@ -138,8 +149,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kAArch32_U23_0To3At0_4To7At8: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 8 || bitShift != 0)
if (format.valueSize() != 4 || bitCount != 8 || bitShift != 0) {
return false;
}
uint32_t immLo = (value & 0x0Fu);
uint32_t immHi = (value & 0xF0u) << (8 - 4);
@@ -150,8 +162,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kAArch32_1To24At0_0At24: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 25 || bitShift != 0)
if (format.valueSize() != 4 || bitCount != 25 || bitShift != 0) {
return false;
}
uint32_t immLo = (value & 0x0000001u) << 24;
uint32_t immHi = (value & 0x1FFFFFEu) >> 1;
@@ -163,8 +176,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kAArch64_ADR:
case OffsetType::kAArch64_ADRP: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5)
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5) {
return false;
}
uint32_t immLo = value & 0x3u;
uint32_t immHi = (value >> 2) & Support::lsbMask<uint32_t>(19);
@@ -182,8 +196,9 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
uint32_t bitCount = format.immBitCount();
uint32_t discardLsb = format.immDiscardLsb();
if (!bitCount || bitCount > format.valueSize() * 8u)
if (!bitCount || bitCount > format.valueSize() * 8u) {
return false;
}
uint64_t value;
@@ -191,26 +206,30 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
if (format.type() == OffsetType::kUnsignedOffset) {
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false;
}
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
}
value = uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount);
if (value != uint64_t(offset64))
if (value != uint64_t(offset64)) {
return false;
}
}
else {
// The rest of OffsetType options are all signed.
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false;
}
offset64 >>= discardLsb;
}
if (!Support::isEncodableOffset64(offset64, bitCount))
if (!Support::isEncodableOffset64(offset64, bitCount)) {
return false;
}
value = uint64_t(offset64);
}
@@ -235,8 +254,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
switch (format.valueSize()) {
case 1: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
if (!encodeOffset32(&mask, offset64, format)) {
return false;
}
Support::writeU8(dst, uint8_t(Support::readU8(dst) | mask));
return true;
@@ -244,8 +264,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
case 2: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
if (!encodeOffset32(&mask, offset64, format)) {
return false;
}
Support::writeU16uLE(dst, uint16_t(Support::readU16uLE(dst) | mask));
return true;
@@ -263,8 +284,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
case 8: {
uint64_t mask;
if (!encodeOffset64(&mask, offset64, format))
if (!encodeOffset64(&mask, offset64, format)) {
return false;
}
Support::writeU64uLE(dst, Support::readU64uLE(dst) | mask);
return true;

View File

@@ -23,10 +23,11 @@ class CodeWriter {
public:
uint8_t* _cursor;
ASMJIT_FORCE_INLINE explicit CodeWriter(BaseAssembler* a) noexcept
ASMJIT_INLINE_NODEBUG explicit CodeWriter(BaseAssembler* a) noexcept
: _cursor(a->_bufferPtr) {}
ASMJIT_FORCE_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
[[nodiscard]]
ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
if (ASMJIT_UNLIKELY(remainingSpace < n)) {
CodeBuffer& buffer = a->_section->_buffer;
@@ -38,25 +39,28 @@ public:
return kErrorOk;
}
ASMJIT_FORCE_INLINE uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_FORCE_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_FORCE_INLINE void advance(size_t n) noexcept { _cursor += n; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_FORCE_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_INLINE_NODEBUG void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_INLINE_NODEBUG void advance(size_t n) noexcept { _cursor += n; }
[[nodiscard]]
ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_ASSERT(_cursor >= from);
return (size_t)(_cursor - from);
}
template<typename T>
ASMJIT_FORCE_INLINE void emit8(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_INLINE void emit8(T val) noexcept {
using U = std::make_unsigned_t<T>;
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor++;
}
template<typename T, typename Y>
ASMJIT_FORCE_INLINE void emit8If(T val, Y cond) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size_t(cond) <= 1u);
_cursor[0] = uint8_t(U(val) & U(0xFF));
@@ -64,42 +68,42 @@ public:
}
template<typename T>
ASMJIT_FORCE_INLINE void emit16uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_INLINE void emit16uLE(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::writeU16uLE(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_FORCE_INLINE void emit16uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_INLINE void emit16uBE(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::writeU16uBE(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_FORCE_INLINE void emit32uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_INLINE void emit32uLE(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
template<typename T>
ASMJIT_FORCE_INLINE void emit32uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_INLINE void emit32uBE(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
ASMJIT_FORCE_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memcpy(_cursor, data, size);
_cursor += size;
}
template<typename T>
ASMJIT_FORCE_INLINE void emitValueLE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
@@ -111,8 +115,8 @@ public:
}
template<typename T>
ASMJIT_FORCE_INLINE void emitValueBE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
@@ -123,13 +127,13 @@ public:
_cursor += size;
}
ASMJIT_FORCE_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memset(_cursor, 0, size);
_cursor += size;
}
ASMJIT_FORCE_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_ASSERT(where < _cursor);
uint8_t* p = where;
@@ -139,7 +143,7 @@ public:
}
template<typename T>
ASMJIT_FORCE_INLINE void insert8(uint8_t* where, T val) noexcept {
ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
uint8_t* p = _cursor;
while (p != where) {
@@ -151,7 +155,7 @@ public:
_cursor++;
}
ASMJIT_FORCE_INLINE void done(BaseAssembler* a) noexcept {
ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
CodeBuffer& buffer = a->_section->_buffer;
size_t newSize = (size_t)(_cursor - a->_bufferData);
ASMJIT_ASSERT(newSize <= buffer.capacity());
@@ -164,9 +168,13 @@ public:
//! Code writer utilities.
namespace CodeWriterUtils {
[[nodiscard]]
bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
[[nodiscard]]
bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
[[nodiscard]]
bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
} // {CodeWriterUtils}

View File

@@ -22,11 +22,11 @@ ASMJIT_BEGIN_NAMESPACE
// ===================
class GlobalConstPoolPass : public Pass {
public:
typedef Pass Base;
public:
ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
public:
using Base = Pass;
GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
Error run(Zone* zone, Logger* logger) override {
@@ -73,27 +73,31 @@ Error BaseCompiler::newFuncNode(FuncNode** out, const FuncSignature& signature)
// Initialize the function's detail info.
Error err = funcNode->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
// If the Target guarantees greater stack alignment than required by the calling convention
// then override it as we can prevent having to perform dynamic stack alignment
uint32_t environmentStackAlignment = _environment.stackAlignment();
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment) {
funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
}
// Initialize the function frame.
err = funcNode->_frame.init(funcNode->_funcDetail);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
// Allocate space for function arguments.
funcNode->_args = nullptr;
if (funcNode->argCount() != 0) {
funcNode->_args = _allocator.allocT<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
if (ASMJIT_UNLIKELY(!funcNode->_args))
if (ASMJIT_UNLIKELY(!funcNode->_args)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
}
@@ -159,8 +163,9 @@ Error BaseCompiler::endFunc() {
FuncNode* func = _func;
resetState();
if (ASMJIT_UNLIKELY(!func))
if (ASMJIT_UNLIKELY(!func)) {
return reportError(DebugUtils::errored(kErrorInvalidState));
}
// Add the local constant pool at the end of the function (if exists).
ConstPoolNode* localConstPool = _constPools[uint32_t(ConstPoolScope::kLocal)];
@@ -191,15 +196,17 @@ Error BaseCompiler::newInvokeNode(InvokeNode** out, InstId instId, const Operand
node->resetOpRange(1, node->opCapacity());
Error err = node->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
// Skip the allocation if there are no arguments.
uint32_t argCount = signature.argCount();
if (argCount) {
node->_args = static_cast<InvokeNode::OperandPack*>(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack)));
if (!node->_args)
if (!node->_args) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
}
@@ -235,15 +242,18 @@ Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature si
*out = nullptr;
uint32_t index = _vRegArray.size();
if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount))) {
return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
}
if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk))
if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
if (ASMJIT_UNLIKELY(!vReg))
if (ASMJIT_UNLIKELY(!vReg)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
uint32_t size = TypeUtils::sizeOf(typeId);
uint32_t alignment = Support::min<uint32_t>(size, 64);
@@ -251,10 +261,12 @@ Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature si
vReg = new(Support::PlacementNew{vReg}) VirtReg(signature, Operand::indexToVirtId(index), size, alignment, typeId);
#ifndef ASMJIT_NO_LOGGING
if (name && name[0] != '\0')
if (name && name[0] != '\0') {
vReg->_name.setData(&_dataZone, name, SIZE_MAX);
else
}
else {
BaseCompiler_assignGenericName(this, vReg);
}
#else
DebugUtils::unused(name);
#endif
@@ -270,8 +282,9 @@ Error BaseCompiler::_newReg(BaseReg* out, TypeId typeId, const char* name) {
out->reset();
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
@@ -345,8 +358,9 @@ Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name)
}
}
if (typeId == TypeId::kVoid)
if (typeId == TypeId::kVoid) {
return reportError(DebugUtils::errored(kErrorInvalidState));
}
}
}
else {
@@ -354,8 +368,9 @@ Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name)
}
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
@@ -379,17 +394,21 @@ Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt
Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
out->reset();
if (size == 0)
if (size == 0) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (alignment == 0)
if (alignment == 0) {
alignment = 1;
}
if (!Support::isPowerOf2(alignment))
if (!Support::isPowerOf2(alignment)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (alignment > 64)
if (alignment > 64) {
alignment = 64;
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, TypeId::kVoid, OperandSignature{0}, name));
@@ -408,21 +427,26 @@ Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, c
}
Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
if (!isVirtIdValid(virtId))
if (!isVirtIdValid(virtId)) {
return DebugUtils::errored(kErrorInvalidVirtId);
}
if (newAlignment && !Support::isPowerOf2(newAlignment))
if (newAlignment && !Support::isPowerOf2(newAlignment)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (newAlignment > 64)
if (newAlignment > 64) {
newAlignment = 64;
}
VirtReg* vReg = virtRegById(virtId);
if (newSize)
if (newSize) {
vReg->_virtSize = newSize;
}
if (newAlignment)
if (newAlignment) {
vReg->_alignment = uint8_t(newAlignment);
}
// This is required if the RAPass is already running. There is a chance that a stack-slot has been already
// allocated and in that case it has to be updated as well, otherwise we would allocate wrong amount of memory.
@@ -438,18 +462,21 @@ Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t new
Error BaseCompiler::_newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size) {
out->reset();
if (uint32_t(scope) > 1)
if (uint32_t(scope) > 1) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (!_constPools[uint32_t(scope)])
if (!_constPools[uint32_t(scope)]) {
ASMJIT_PROPAGATE(newConstPoolNode(&_constPools[uint32_t(scope)]));
}
ConstPoolNode* pool = _constPools[uint32_t(scope)];
size_t off;
Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err))
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(RegType::kLabelTag) |
@@ -462,7 +489,9 @@ void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return;
VirtReg* vReg = virtRegById(reg.id());
if (!vReg) return;
if (!vReg) {
return;
}
if (fmt && fmt[0] != '\0') {
char buf[128];
@@ -487,8 +516,9 @@ Error BaseCompiler::newJumpNode(JumpNode** out, InstId instId, InstOptions instO
uint32_t opCount = 1;
*out = node;
if (ASMJIT_UNLIKELY(!node))
if (ASMJIT_UNLIKELY(!node)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
node = new(Support::PlacementNew{node}) JumpNode(this, instId, instOptions, opCount, annotation);
node->setOp(0, o0);

View File

@@ -51,7 +51,7 @@ class InvokeNode;
class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
public:
ASMJIT_NONCOPYABLE(BaseCompiler)
typedef BaseBuilder Base;
using Base = BaseBuilder;
//! \name Members
//! \{
@@ -96,6 +96,7 @@ public:
ASMJIT_API Error addFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1);
//! Returns the current function.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode with the given `signature` and returns it.
@@ -163,31 +164,38 @@ public:
ASMJIT_API Error _newRegFmt(BaseReg* ASMJIT_NONNULL(out), const BaseReg& ref, const char* fmt, ...);
//! Tests whether the given `id` is a valid virtual register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVirtIdValid(uint32_t id) const noexcept {
uint32_t index = Operand::virtIdToIndex(id);
return index < _vRegArray.size();
}
//! Tests whether the given `reg` is a virtual register having a valid id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVirtRegValid(const BaseReg& reg) const noexcept {
return isVirtIdValid(reg.id());
}
//! Returns \ref VirtReg associated with the given `id`.
[[nodiscard]]
inline VirtReg* virtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(isVirtIdValid(id));
return _vRegArray[Operand::virtIdToIndex(id)];
}
//! Returns \ref VirtReg associated with the given `reg`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); }
//! Returns \ref VirtReg associated with the given virtual register `index`.
//!
//! \note This is not the same as virtual register id. The conversion between id and its index is implemented
//! by \ref Operand_::virtIdToIndex() and \ref Operand_::indexToVirtId() functions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
//! Returns an array of all virtual registers managed by the Compiler.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
//! \name Stack
@@ -230,6 +238,7 @@ public:
//! \name Jump Annotations
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
return _jumpAnnotations;
}
@@ -239,6 +248,7 @@ public:
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate possible targets of a jump where the
//! target is not a label, for example to implement jump tables.
[[nodiscard]]
ASMJIT_API JumpAnnotation* newJumpAnnotation();
//! \}
@@ -287,15 +297,23 @@ public:
//! \{
//! Returns the compiler that owns this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* compiler() const noexcept { return _compiler; }
//! Returns the annotation id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t annotationId() const noexcept { return _annotationId; }
//! Returns a vector of label identifiers that lists all targets of the jump.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
//! Tests whether the given `label` is a target of this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
//! Tests whether the given `labelId` is a target of this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
//! \}
@@ -342,9 +360,13 @@ public:
//! \{
//! Tests whether this JumpNode has associated a \ref JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAnnotation() const noexcept { return _annotation != nullptr; }
//! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JumpAnnotation* annotation() const noexcept { return _annotation; }
//! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
ASMJIT_INLINE_NODEBUG void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
@@ -446,37 +468,54 @@ public:
//! \name Accessors
//! Returns function exit `LabelNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelNode* exitNode() const noexcept { return _exitNode; }
//! Returns function exit label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label exitLabel() const noexcept { return _exitNode->label(); }
//! Returns "End of Func" sentinel node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SentinelNode* endNode() const noexcept { return _end; }
//! Returns function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns function frame.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncFrame& frame() noexcept { return _frame; }
//! Returns function frame.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncFrame& frame() const noexcept { return _frame; }
//! Returns function attributes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes.
ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _frame.addAttributes(attrs); }
//! Returns arguments count.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns argument packs.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArgPack* argPacks() const noexcept { return _args; }
//! Tests whether the function has a return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns argument pack at `argIndex`.
[[nodiscard]]
inline ArgPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
@@ -557,12 +596,14 @@ public:
}
//! Returns an operand at the given `valueIndex`.
[[nodiscard]]
inline Operand& operator[](size_t valueIndex) noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
//! Returns an operand at the given `valueIndex` (const).
[[nodiscard]]
const inline Operand& operator[](size_t valueIndex) const noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
@@ -601,52 +642,74 @@ public:
//! \{
//! Sets the function signature.
[[nodiscard]]
inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
return _funcDetail.init(signature, environment);
}
//! Returns the function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns the function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns the target operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand& target() noexcept { return op(0); }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand& target() const noexcept { return op(0); }
//! Returns the number of function return values.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns the number of function arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandPack& retPack() noexcept { return _rets; }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OperandPack& retPack() const noexcept { return _rets; }
//! Returns the return value at the given `valueIndex`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
inline OperandPack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! \overload
[[nodiscard]]
inline const OperandPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Returns a function argument at the given `argIndex`.
[[nodiscard]]
inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! \overload
[[nodiscard]]
inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
@@ -680,7 +743,7 @@ public:
class ASMJIT_VIRTAPI FuncPass : public Pass {
public:
ASMJIT_NONCOPYABLE(FuncPass)
typedef Pass Base;
using Base = Pass;
//! \name Construction & Destruction
//! \{
@@ -693,6 +756,7 @@ public:
//! \{
//! Returns the associated `BaseCompiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
//! \}

View File

@@ -96,18 +96,27 @@ public:
//! \{
//! Returns the virtual register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
//! Returns the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); }
//! Returns the size of the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns a register signature of this virtual register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; }
//! Returns a virtual register type (maps to the physical register type as well).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); }
//! Returns a virtual register group (maps to the physical register group as well).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); }
//! Returns a real size of the register this virtual register maps to.
@@ -115,23 +124,29 @@ public:
//! For example if this is a 128-bit SIMD register used for a scalar single precision floating point value then
//! its virtSize would be 4, however, the `regSize` would still say 16 (128-bits), because it's the smallest size
//! of that register type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regSize() const noexcept { return _signature.size(); }
//! Returns the virtual register size.
//!
//! The virtual register size describes how many bytes the virtual register needs to store its content. It can be
//! smaller than the physical register size, see `regSize()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t virtSize() const noexcept { return _virtSize; }
//! Returns the virtual register alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
//! Returns the virtual register type id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _typeId; }
//! Returns the virtual register weight - the register allocator can use it as explicit hint for alloc/spill
//! decisions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t weight() const noexcept { return _weight; }
//! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for
//! alloc/spill decisions and initial bin-packing.
ASMJIT_INLINE_NODEBUG void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
@@ -139,17 +154,20 @@ public:
//! Returns whether the virtual register is always allocated to a fixed physical register (and never reallocated).
//!
//! \note This is only used for special purposes and it's mostly internal.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return bool(_isFixed); }
//! Tests whether the virtual register is in fact a stack that only uses the virtual register id.
//!
//! \note It's an error if a stack is accessed as a register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return bool(_isStack); }
//! Tests whether this virtual register (or stack) has assigned a stack offset.
//!
//! If this is a virtual register that was never allocated on stack, it would return false, otherwise if
//! it's a virtual register that was spilled or explicitly allocated stack, the return value would be true.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return bool(_hasStackSlot); }
//! Assigns a stack offset of this virtual register to `stackOffset` and sets `_hasStackSlot` to true.
@@ -159,9 +177,13 @@ public:
}
//! Tests whether this virtual register has assigned a physical register as a hint to the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasHomeIdHint() const noexcept { return _homeIdHint != BaseReg::kIdBad; }
//! Returns a physical register hint, which will be used by the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t homeIdHint() const noexcept { return _homeIdHint; }
//! Assigns a physical register hint, which will be used by the register allocator.
ASMJIT_INLINE_NODEBUG void setHomeIdHint(uint32_t homeId) noexcept { _homeIdHint = uint8_t(homeId); }
//! Resets a physical register hint.
@@ -171,14 +193,20 @@ public:
//!
//! \note Always verify that the stack offset has been assigned by calling \ref hasStackSlot(). The return
//! value will be zero when the stack offset was not assigned.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return _stackOffset; }
//! Tests whether the virtual register has an associated `RAWorkReg` at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasWorkReg() const noexcept { return _workReg != nullptr; }
//! Returns an associated RAWorkReg with this virtual register (only valid during register allocation).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workReg() const noexcept { return _workReg; }
//! Associates a RAWorkReg with this virtual register (used by register allocator).
ASMJIT_INLINE_NODEBUG void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
//! Reset the RAWorkReg association (used by register allocator).
ASMJIT_INLINE_NODEBUG void resetWorkReg() noexcept { _workReg = nullptr; }

View File

@@ -40,8 +40,9 @@ void ConstPool::reset(Zone* zone) noexcept {
static inline ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (!gap)
if (!gap) {
return self->_zone->allocT<ConstPool::Gap>();
}
self->_gapPool = gap->_next;
return gap;
@@ -87,8 +88,9 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce
// We don't have to check for errors here, if this failed nothing really happened (just the gap won't be
// visible) and it will fail again at place where the same check would generate `kErrorOutOfMemory` error.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap)
if (!gap) {
return;
}
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
@@ -102,24 +104,19 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex;
constexpr size_t kMaxSize = size_t(1) << (kIndexCount - 1);
if (size == 64)
treeIndex = kIndex64;
else if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
else if (size == 8)
treeIndex = kIndex8;
else if (size == 4)
treeIndex = kIndex4;
else if (size == 2)
treeIndex = kIndex2;
else if (size == 1)
treeIndex = kIndex1;
else
// Avoid sizes outside of the supported range.
if (ASMJIT_UNLIKELY(size == 0 || size > kMaxSize)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
size_t treeIndex = Support::ctz(size);
// Avoid sizes, which are not aligned to power of 2.
if (ASMJIT_UNLIKELY((size_t(1) << treeIndex) != size)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
ConstPool::Node* node = _tree[treeIndex].get(data);
if (node) {
@@ -147,8 +144,9 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
gapSize -= size;
if (gapSize > 0)
if (gapSize > 0) {
ConstPool_addGap(this, gapOffset, gapSize);
}
}
gapIndex++;
@@ -169,8 +167,9 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (ASMJIT_UNLIKELY(!node))
if (ASMJIT_UNLIKELY(!node)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
_tree[treeIndex].insert(node);
_alignment = Support::max<size_t>(_alignment, size);
@@ -192,18 +191,16 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += smallerSize) {
node = _tree[treeIndex].get(pData);
if (node) continue;
if (node) {
continue;
}
node = ConstPool::Tree::_newNode(_zone, pData, smallerSize, offset + (i * smallerSize), true);
_tree[treeIndex].insert(node);
}
}
if (_minItemSize == 0)
_minItemSize = size;
else
_minItemSize = Support::min(_minItemSize, size);
_minItemSize = !_minItemSize ? size : Support::min(_minItemSize, size);
return kErrorOk;
}
@@ -216,8 +213,9 @@ struct ConstPoolFill {
_dataSize(dataSize) {}
inline void operator()(const ConstPool::Node* node) noexcept {
if (!node->_shared)
if (!node->_shared) {
memcpy(_dst + node->_offset, node->data(), _dataSize);
}
}
uint8_t* _dst;

View File

@@ -73,6 +73,7 @@ public:
_shared(shared),
_offset(uint32_t(offset)) {}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* data() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
}
@@ -86,10 +87,12 @@ public:
ASMJIT_INLINE_NODEBUG Compare(size_t dataSize) noexcept
: _dataSize(dataSize) {}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const Node& b) const noexcept {
return ::memcmp(a.data(), b.data(), _dataSize);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const void* data) const noexcept {
return ::memcmp(a.data(), data, _dataSize);
}
@@ -114,7 +117,10 @@ public:
_size = 0;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
inline void setDataSize(size_t dataSize) noexcept {
@@ -122,6 +128,7 @@ public:
_dataSize = dataSize;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Node* get(const void* data) noexcept {
Compare cmp(_dataSize);
return _tree.get(data, cmp);
@@ -166,6 +173,7 @@ public:
}
}
[[nodiscard]]
static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(Support::alignUp(sizeof(Node) + size, alignof(Node)));
if (ASMJIT_UNLIKELY(!node)) return nullptr;
@@ -221,12 +229,19 @@ public:
//! \{
//! Tests whether the constant-pool is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
//! Returns the size of the constant-pool in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns minimum alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _alignment; }
//! Returns the minimum size of all items added to the constant pool.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t minItemSize() const noexcept { return _minItemSize; }
//! \}

View File

@@ -58,13 +58,6 @@
#include <unistd.h>
#endif
// Unfortunately when compiling in C++11 mode MSVC would warn about unused functions as
// [[maybe_unused]] attribute is not used in that case (it's used only by C++17 mode and later).
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4505) // unreferenced local function has been removed.
#endif // _MSC_VER
ASMJIT_BEGIN_NAMESPACE
// CpuInfo - Detect - Compatibility
@@ -128,7 +121,7 @@ static inline uint32_t detectHWThreadCount() noexcept {
namespace x86 {
typedef CpuFeatures::X86 Ext;
using Ext = CpuFeatures::X86;
struct cpuid_t { uint32_t eax, ebx, ecx, edx; };
struct xgetbv_t { uint32_t eax, edx; };
@@ -190,9 +183,11 @@ static inline void simplifyCpuVendor(CpuInfo& cpu, uint32_t d0, uint32_t d1, uin
};
uint32_t i;
for (i = 0; i < ASMJIT_ARRAY_SIZE(table) - 1; i++)
if (table[i].d[0] == d0 && table[i].d[1] == d1 && table[i].d[2] == d2)
for (i = 0; i < ASMJIT_ARRAY_SIZE(table) - 1; i++) {
if (table[i].d[0] == d0 && table[i].d[1] == d1 && table[i].d[2] == d2) {
break;
}
}
memcpy(cpu._vendor.str, table[i].normalized, 8);
}
@@ -207,8 +202,9 @@ static ASMJIT_FAVOR_SIZE void simplifyCpuBrand(char* s) noexcept {
s[0] = '\0';
for (;;) {
if (!c)
if (!c) {
break;
}
if (!(c == ' ' && (prev == '@' || s[1] == ' ' || s[1] == '@' || s[1] == '\0'))) {
*d++ = c;
@@ -258,11 +254,13 @@ static ASMJIT_FAVOR_SIZE void detectX86Cpu(CpuInfo& cpu) noexcept {
uint32_t familyId = (regs.eax >> 8) & 0x0F;
// Use extended family and model fields.
if (familyId == 0x06u || familyId == 0x0Fu)
if (familyId == 0x06u || familyId == 0x0Fu) {
modelId += (((regs.eax >> 16) & 0x0Fu) << 4);
}
if (familyId == 0x0Fu)
if (familyId == 0x0Fu) {
familyId += ((regs.eax >> 20) & 0xFFu);
}
cpu._modelId = modelId;
cpu._familyId = familyId;
@@ -621,7 +619,7 @@ static ASMJIT_FAVOR_SIZE void detectX86Cpu(CpuInfo& cpu) noexcept {
namespace arm {
// ARM commonly refers to CPU features using FEAT_ prefix, we use Ext:: to make it compatible with other parts.
typedef CpuFeatures::ARM Ext;
using Ext = CpuFeatures::ARM;
// CpuInfo - Detect - ARM - OS Kernel Version
// ==========================================
@@ -632,14 +630,12 @@ struct UNameKernelVersion {
inline bool atLeast(int major, int minor, int patch = 0) const noexcept {
if (parts[0] >= major) {
if (parts[0] > major)
if (parts[0] > major) {
return true;
}
if (parts[1] >= minor) {
if (parts[1] > minor)
return true;
return parts[2] >= patch;
return parts[1] > minor ? true : parts[2] >= patch;
}
}
@@ -647,14 +643,15 @@ struct UNameKernelVersion {
}
};
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static UNameKernelVersion getUNameKernelVersion() noexcept {
UNameKernelVersion ver{};
ver.parts[0] = -1;
utsname buffer;
if (uname(&buffer) != 0)
if (uname(&buffer) != 0) {
return ver;
}
size_t count = 0;
char* p = buffer.release;
@@ -662,8 +659,9 @@ static UNameKernelVersion getUNameKernelVersion() noexcept {
uint32_t c = uint8_t(*p);
if (c >= uint32_t('0') && c <= uint32_t('9')) {
ver.parts[count] = int(strtol(p, &p, 10));
if (++count == 3)
if (++count == 3) {
break;
}
}
else if (c == '.' || c == '-') {
p++;
@@ -680,13 +678,13 @@ static UNameKernelVersion getUNameKernelVersion() noexcept {
// CpuInfo - Detect - ARM - Baseline Features of ARM Architectures
// ===============================================================
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void populateBaseAArch32Features(CpuFeatures::ARM& features) noexcept {
// No baseline flags at the moment.
DebugUtils::unused(features);
}
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void populateBaseAArch64Features(CpuFeatures::ARM& features) noexcept {
// AArch64 is based on ARMv8.0 and later.
features.add(Ext::kARMv6);
@@ -711,40 +709,40 @@ static inline void populateBaseARMFeatures(CpuInfo& cpu) noexcept {
// ================================================================
// Populates mandatory ARMv8.[v]A features.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void populateARMv8AFeatures(CpuFeatures::ARM& features, uint32_t v) noexcept {
switch (v) {
default:
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 9: // ARMv8.9
features.add(Ext::kCLRBHB, Ext::kCSSC, Ext::kPRFMSLC, Ext::kSPECRES2, Ext::kRAS2);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 8: // ARMv8.8
features.add(Ext::kHBC, Ext::kMOPS, Ext::kNMI);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 7: // ARMv8.7
features.add(Ext::kHCX, Ext::kPAN3, Ext::kWFXT, Ext::kXS);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 6: // ARMv8.6
features.add(Ext::kAMU1_1, Ext::kBF16, Ext::kECV, Ext::kFGT, Ext::kI8MM);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 5: // ARMv8.5
features.add(Ext::kBTI, Ext::kCSV2, Ext::kDPB2, Ext::kFLAGM2, Ext::kFRINTTS, Ext::kSB, Ext::kSPECRES, Ext::kSSBS);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 4: // ARMv8.4
features.add(Ext::kAMU1, Ext::kDIT, Ext::kDOTPROD, Ext::kFLAGM,
Ext::kLRCPC2, Ext::kLSE2, Ext::kMPAM, Ext::kNV,
Ext::kSEL2, Ext::kTLBIOS, Ext::kTLBIRANGE, Ext::kTRF);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 3: // ARMv8.3
features.add(Ext::kCCIDX, Ext::kFCMA, Ext::kJSCVT, Ext::kLRCPC, Ext::kPAUTH);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 2: // ARMv8.2
features.add(Ext::kDPB, Ext::kPAN2, Ext::kRAS, Ext::kUAO);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 1: // ARMv8.1
features.add(Ext::kCRC32, Ext::kLOR, Ext::kLSE, Ext::kPAN, Ext::kRDM, Ext::kVHE);
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 0: // ARMv8.0
features.add(Ext::kASIMD, Ext::kFP, Ext::kIDIVA, Ext::kVFP_D32);
break;
@@ -752,21 +750,21 @@ static ASMJIT_FAVOR_SIZE void populateARMv8AFeatures(CpuFeatures::ARM& features,
}
// Populates mandatory ARMv9.[v] features.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void populateARMv9AFeatures(CpuFeatures::ARM& features, uint32_t v) noexcept {
populateARMv8AFeatures(features, v <= 4u ? 5u + v : 9u);
switch (v) {
default:
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 4: // ARMv9.4 - based on ARMv8.9.
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 3: // ARMv9.3 - based on ARMv8.8.
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 2: // ARMv9.2 - based on ARMv8.7.
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 1: // ARMv9.1 - based on ARMv8.6.
ASMJIT_FALLTHROUGH;
[[fallthrough]];
case 0: // ARMv9.0 - based on ARMv8.5.
features.add(Ext::kRME, Ext::kSVE, Ext::kSVE2);
break;
@@ -780,44 +778,45 @@ static ASMJIT_FAVOR_SIZE void populateARMv9AFeatures(CpuFeatures::ARM& features,
// of the registers so it's an implementation that can theoretically be tested / used in mocks.
// Merges a feature that contains 0b1111 when it doesn't exist and starts at 0b0000 when it does.
ASMJIT_MAYBE_UNUSED
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeatureNA(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset,
[[maybe_unused]]
static ASMJIT_INLINE void mergeAArch64CPUIDFeatureNA(
CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset,
Ext::Id f0,
Ext::Id f1 = Ext::kNone,
Ext::Id f2 = Ext::kNone,
Ext::Id f3 = Ext::kNone) noexcept {
uint32_t val = uint32_t((regBits >> offset) & 0xFu);
// If val == 0b1111 then the feature is not implemented in this case (some early extensions).
if (val == 0xFu)
if (val == 0xFu) {
// If val == 0b1111 then the feature is not implemented in this case (some early extensions).
return;
}
if (f0 != Ext::kNone) features.add(f0);
if (f1 != Ext::kNone) features.addIf(val >= 1, f1);
if (f2 != Ext::kNone) features.addIf(val >= 2, f2);
if (f3 != Ext::kNone) features.addIf(val >= 3, f3);
features.addIf(f0 != Ext::kNone, f0);
features.addIf(f1 != Ext::kNone && val >= 1, f1);
features.addIf(f2 != Ext::kNone && val >= 2, f2);
features.addIf(f3 != Ext::kNone && val >= 3, f3);
}
// Merges a feature identified by a single bit at `offset`.
ASMJIT_MAYBE_UNUSED
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature1B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1) noexcept {
[[maybe_unused]]
static ASMJIT_INLINE void mergeAArch64CPUIDFeature1B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1) noexcept {
features.addIf((regBits & (uint64_t(1) << offset)) != 0, f1);
}
// Merges a feature-list starting from 0b01 when it does (0b00 means feature not supported).
ASMJIT_MAYBE_UNUSED
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature2B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1, Ext::Id f2, Ext::Id f3) noexcept {
[[maybe_unused]]
static ASMJIT_INLINE void mergeAArch64CPUIDFeature2B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1, Ext::Id f2, Ext::Id f3) noexcept {
uint32_t val = uint32_t((regBits >> offset) & 0x3u);
if (f1 != Ext::kNone) features.addIf(val >= 1, f1);
if (f2 != Ext::kNone) features.addIf(val >= 2, f2);
if (f3 != Ext::kNone) features.addIf(val == 3, f3);
features.addIf(f1 != Ext::kNone && val >= 1, f1);
features.addIf(f2 != Ext::kNone && val >= 2, f2);
features.addIf(f3 != Ext::kNone && val == 3, f3);
}
// Merges a feature-list starting from 0b0001 when it does (0b0000 means feature not supported).
ASMJIT_MAYBE_UNUSED
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset,
[[maybe_unused]]
static ASMJIT_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset,
Ext::Id f1,
Ext::Id f2 = Ext::kNone,
Ext::Id f3 = Ext::kNone,
@@ -826,16 +825,15 @@ static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& fea
uint32_t val = uint32_t((regBits >> offset) & 0xFu);
// if val == 0 it means that this feature is not supported.
if (f1 != Ext::kNone) features.addIf(val >= 1, f1);
if (f2 != Ext::kNone) features.addIf(val >= 2, f2);
if (f3 != Ext::kNone) features.addIf(val >= 3, f3);
if (f4 != Ext::kNone) features.addIf(val >= 4, f4);
features.addIf(f1 != Ext::kNone && val >= 1, f1);
features.addIf(f2 != Ext::kNone && val >= 2, f2);
features.addIf(f3 != Ext::kNone && val >= 3, f3);
features.addIf(f4 != Ext::kNone && val >= 4, f4);
}
// Merges a feature that is identified by an exact bit-combination of 4 bits.
ASMJIT_MAYBE_UNUSED
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, uint32_t value, Ext::Id f1) noexcept {
[[maybe_unused]]
static ASMJIT_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, uint32_t value, Ext::Id f1) noexcept {
features.addIf(uint32_t((regBits >> offset) & 0xFu) == value, f1);
}
@@ -846,7 +844,7 @@ static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& fea
#define MERGE_FEATURE_4S(identifier, reg, offset, ...) mergeAArch64CPUIDFeature4S(cpu.features().arm(), reg, offset, __VA_ARGS__)
// Detects features based on the content of ID_AA64PFR0_EL1 and ID_AA64PFR1_EL1 registers.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(CpuInfo& cpu, uint64_t fpr0, uint64_t fpr1) noexcept {
// ID_AA64PFR0_EL1
// ===============
@@ -911,12 +909,13 @@ static inline void detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(CpuInfo& cpu,
uint32_t mpamMain = uint32_t((fpr0 >> 40) & 0xFu);
uint32_t mpamFrac = uint32_t((fpr1 >> 16) & 0xFu);
if (mpamMain || mpamFrac)
if (mpamMain || mpamFrac) {
cpu.features().arm().add(Ext::kMPAM);
}
}
// Detects features based on the content of ID_AA64ISAR0_EL1 and ID_AA64ISAR1_EL1 registers.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(CpuInfo& cpu, uint64_t isar0, uint64_t isar1) noexcept {
// ID_AA64ISAR0_EL1
// ================
@@ -965,7 +964,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(CpuInfo& cp
}
// Detects features based on the content of ID_AA64ISAR2_EL1 register.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64ISAR2(CpuInfo& cpu, uint64_t isar2) noexcept {
MERGE_FEATURE_4B("WFxT bits [3:0]" , isar2, 0, Ext::kNone, Ext::kWFXT);
MERGE_FEATURE_4B("RPRES bits [7:4]" , isar2, 4, Ext::kRPRES);
@@ -988,7 +987,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR2(CpuInfo& cpu, uint64_
// TODO: This register is not accessed at the moment.
#if 0
// Detects features based on the content of ID_AA64ISAR3_EL1register.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64ISAR3(CpuInfo& cpu, uint64_t isar3) noexcept {
// ID_AA64ISAR3_EL1
// ================
@@ -999,7 +998,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR3(CpuInfo& cpu, uint64_
}
#endif
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64MMFR0(CpuInfo& cpu, uint64_t mmfr0) noexcept {
// ID_AA64MMFR0_EL1
// ================
@@ -1022,7 +1021,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR0(CpuInfo& cpu, uint64_
MERGE_FEATURE_4B("ECV bits [63:60]" , mmfr0, 60, Ext::kECV);
}
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64MMFR1(CpuInfo& cpu, uint64_t mmfr1) noexcept {
// ID_AA64MMFR1_EL1
// ================
@@ -1051,7 +1050,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR1(CpuInfo& cpu, uint64_
MERGE_FEATURE_4B("ECBHB bits [63:60]" , mmfr1, 60, Ext::kECBHB);
}
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64MMFR2(CpuInfo& cpu, uint64_t mmfr2) noexcept {
// ID_AA64MMFR2_EL1
// ================
@@ -1082,7 +1081,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR2(CpuInfo& cpu, uint64_
}
// Detects features based on the content of ID_AA64ZFR0_EL1 register.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64ZFR0(CpuInfo& cpu, uint64_t zfr0) noexcept {
MERGE_FEATURE_4B("SVEver bits [3:0]" , zfr0, 0, Ext::kSVE2, Ext::kSVE2_1);
MERGE_FEATURE_4B("AES bits [7:4]" , zfr0, 4, Ext::kSVE_AES, Ext::kSVE_PMULL128);
@@ -1096,7 +1095,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ZFR0(CpuInfo& cpu, uint64_t
MERGE_FEATURE_4B("F64MM bits [59:56]" , zfr0, 56, Ext::kSVE_F64MM);
}
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64SMFR0(CpuInfo& cpu, uint64_t smfr0) noexcept {
MERGE_FEATURE_1B("SF8DP2 bit [28]" , smfr0, 29, Ext::kSSVE_FP8DOT2);
MERGE_FEATURE_1B("SF8DP4 bit [29]" , smfr0, 29, Ext::kSSVE_FP8DOT4);
@@ -1143,9 +1142,9 @@ enum class AppleFamilyId : uint32_t {
kEVEREST_SAWTOOTH = 0X8765EDEAu // Apple A16.
};
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE bool detectARMFeaturesViaAppleFamilyId(CpuInfo& cpu) noexcept {
typedef AppleFamilyId Id;
using Id = AppleFamilyId;
CpuFeatures::ARM& features = cpu.features().arm();
switch (cpu.familyId()) {
@@ -1219,7 +1218,7 @@ static ASMJIT_FAVOR_SIZE bool detectARMFeaturesViaAppleFamilyId(CpuInfo& cpu) no
// target it was compiled to.
#if ASMJIT_ARCH_ARM == 32
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void detectAArch32FeaturesViaCompilerFlags(CpuInfo& cpu) noexcept {
DebugUtils::unused(cpu);
@@ -1257,7 +1256,7 @@ static ASMJIT_FAVOR_SIZE void detectAArch32FeaturesViaCompilerFlags(CpuInfo& cpu
#endif // ASMJIT_ARCH_ARM == 32
#if ASMJIT_ARCH_ARM == 64
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCompilerFlags(CpuInfo& cpu) noexcept {
DebugUtils::unused(cpu);
@@ -1413,7 +1412,7 @@ static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCompilerFlags(CpuInfo& cpu
}
#endif // ASMJIT_ARCH_ARM == 64
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void detectARMFeaturesViaCompilerFlags(CpuInfo& cpu) noexcept {
#if ASMJIT_ARCH_ARM == 32
detectAArch32FeaturesViaCompilerFlags(cpu);
@@ -1426,7 +1425,7 @@ static ASMJIT_FAVOR_SIZE void detectARMFeaturesViaCompilerFlags(CpuInfo& cpu) no
// =====================================================
// Postprocesses AArch32 features.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void postProcessAArch32Features(CpuFeatures::ARM& features) noexcept {
DebugUtils::unused(features);
}
@@ -1434,22 +1433,26 @@ static ASMJIT_FAVOR_SIZE void postProcessAArch32Features(CpuFeatures::ARM& featu
// Postprocesses AArch64 features.
//
// The only reason to use this function is to deduce some flags from others.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void postProcessAArch64Features(CpuFeatures::ARM& features) noexcept {
if (features.hasFP16())
if (features.hasFP16()) {
features.add(Ext::kFP16CONV);
}
if (features.hasMTE3())
if (features.hasMTE3()) {
features.add(Ext::kMTE2);
}
if (features.hasMTE2())
if (features.hasMTE2()) {
features.add(Ext::kMTE);
}
if (features.hasSSBS2())
if (features.hasSSBS2()) {
features.add(Ext::kSSBS);
}
}
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void postProcessARMCpuInfo(CpuInfo& cpu) noexcept {
#if ASMJIT_ARCH_ARM == 32
postProcessAArch32Features(cpu.features().arm());
@@ -1466,7 +1469,7 @@ static ASMJIT_FAVOR_SIZE void postProcessARMCpuInfo(CpuInfo& cpu) noexcept {
// Since the register ID is encoded with the instruction we have to create a function for each register ID to read.
#define ASMJIT_AARCH64_DEFINE_CPUID_READ_FN(func, regId) \
ASMJIT_MAYBE_UNUSED \
[[maybe_unused]] \
static inline uint64_t func() noexcept { \
uint64_t output; \
__asm__ __volatile__("mrs %0, " #regId : "=r"(output)); \
@@ -1494,17 +1497,12 @@ ASMJIT_AARCH64_DEFINE_CPUID_READ_FN(aarch64ReadZFR0, S3_0_C0_C4_4) // ID_AA64ZFR
//
// References:
// - https://docs.kernel.org/arch/arm64/cpu-feature-registers.html
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCPUID(CpuInfo& cpu) noexcept {
populateBaseARMFeatures(cpu);
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu,
aarch64ReadPFR0(),
aarch64ReadPFR1());
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu,
aarch64ReadISAR0(),
aarch64ReadISAR1());
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, aarch64ReadPFR0(), aarch64ReadPFR1());
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, aarch64ReadISAR0(), aarch64ReadISAR1());
// TODO: Fix this on FreeBSD - I don't know what kernel version allows to access the registers below...
@@ -1867,13 +1865,8 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept {
const char sysctlCpuPath[] = "machdep.cpu0.cpu_id";
if (sysctlbyname(sysctlCpuPath, &regs, &len, nullptr, 0) == 0) {
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu,
regs.r64(Regs::k64_AA64PFR0),
regs.r64(Regs::k64_AA64PFR1));
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu,
regs.r64(Regs::k64_AA64ISAR0),
regs.r64(Regs::k64_AA64ISAR1));
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, regs.r64(Regs::k64_AA64PFR0), regs.r64(Regs::k64_AA64PFR1));
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, regs.r64(Regs::k64_AA64ISAR0), regs.r64(Regs::k64_AA64ISAR1));
// TODO: AA64ISAR2 should be added when it's provided by NetBSD.
// detectAArch64FeaturesViaCPUID_AA64ISAR2(cpu, regs.r64Regs::k64_AA64ISAR2));
@@ -1925,18 +1918,12 @@ static uint64_t openbsdReadAArch64CPUID(OpenBSDAArch64CPUID id) noexcept {
}
static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept {
typedef OpenBSDAArch64CPUID ID;
using ID = OpenBSDAArch64CPUID;
populateBaseARMFeatures(cpu);
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu,
openbsdReadAArch64CPUID(ID::kAA64PFR0),
openbsdReadAArch64CPUID(ID::kAA64PFR1));
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu,
openbsdReadAArch64CPUID(ID::kAA64ISAR0),
openbsdReadAArch64CPUID(ID::kAA64ISAR1));
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, openbsdReadAArch64CPUID(ID::kAA64PFR0), openbsdReadAArch64CPUID(ID::kAA64PFR1));
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, openbsdReadAArch64CPUID(ID::kAA64ISAR0), openbsdReadAArch64CPUID(ID::kAA64ISAR1));
detectAArch64FeaturesViaCPUID_AA64ISAR2(cpu, openbsdReadAArch64CPUID(ID::kAA64ISAR2));
detectAArch64FeaturesViaCPUID_AA64MMFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64MMFR0));
detectAArch64FeaturesViaCPUID_AA64MMFR1(cpu, openbsdReadAArch64CPUID(ID::kAA64MMFR1));
@@ -1946,8 +1933,9 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept {
if (cpu.features().arm().hasAny(Ext::kSVE, Ext::kSME)) {
detectAArch64FeaturesViaCPUID_AA64ZFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64ZFR0));
if (cpu.features().arm().hasSME())
if (cpu.features().arm().hasSME()) {
detectAArch64FeaturesViaCPUID_AA64SMFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64SMFR0));
}
}
postProcessARMCpuInfo(cpu);
@@ -1989,15 +1977,16 @@ static ASMJIT_FAVOR_SIZE long appleDetectARMFeatureViaSysctl(AppleFeatureType ty
memcpy(sysctlName + prefixSize, featureName, featureNameSize + 1u); // Include NULL terminator.
long val = 0;
if (appleSysctlByName<long>(sysctlName, &val))
if (appleSysctlByName<long>(sysctlName, &val)) {
return val;
}
}
return 0;
}
static ASMJIT_FAVOR_SIZE void appleDetectARMFeaturesViaSysctl(CpuInfo& cpu) noexcept {
typedef AppleFeatureType FT;
using FT = AppleFeatureType;
// Based on:
// - https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics
@@ -2069,8 +2058,9 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept {
memcpy(cpu._vendor.str, "APPLE", 6);
bool cpuFeaturesPopulated = detectARMFeaturesViaAppleFamilyId(cpu);
if (!cpuFeaturesPopulated)
if (!cpuFeaturesPopulated) {
appleDetectARMFeaturesViaSysctl(cpu);
}
postProcessARMCpuInfo(cpu);
}
@@ -2125,8 +2115,4 @@ const CpuInfo& CpuInfo::host() noexcept {
return cpuInfoGlobal;
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif // _MSC_VER
ASMJIT_END_NAMESPACE

View File

@@ -26,18 +26,21 @@ public:
//! \{
//! \cond INTERNAL
enum : uint32_t {
kMaxFeatures = 256,
kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits
};
static inline constexpr uint32_t kMaxFeatures = 256;
static inline constexpr uint32_t kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits;
//! \endcond
//! A word that is used to represents feature bits.
typedef Support::BitWord BitWord;
//! Iterator that can iterate all CPU features set.
typedef Support::BitVectorIterator<BitWord> Iterator;
//! \}
typedef Support::Array<BitWord, kNumBitWords> Bits;
//! \name Types
//! \{
//! A word that is used to represents feature bits.
using BitWord = Support::BitWord;
//! Iterator that can iterate all CPU features set.
using Iterator = Support::BitVectorIterator<BitWord>;
using Bits = Support::Array<BitWord, kNumBitWords>;
//! \}
@@ -57,7 +60,10 @@ public:
//! \name Overloaded Operators
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const Data& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const Data& other) const noexcept { return !equals(other); }
//! \}
@@ -66,21 +72,28 @@ public:
//! \{
//! Returns true if there are no features set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _bits.aggregate<Support::Or>(0) == 0; }
//! Returns all features as array of bitwords (see \ref Support::BitWord).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BitWord* bits() noexcept { return _bits.data(); }
//! Returns all features as array of bitwords (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BitWord* bits() const noexcept { return _bits.data(); }
//! Returns the number of BitWords returned by \ref bits().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t bitWordCount() const noexcept { return kNumBitWords; }
//! Returns \ref Support::BitVectorIterator, that can be used to iterate over all features efficiently.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Iterator iterator() const noexcept { return Iterator(_bits.data(), kNumBitWords); }
//! Tests whether the feature `featureId` is present.
template<typename FeatureId>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool has(const FeatureId& featureId) const noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
@@ -92,6 +105,7 @@ public:
//! \cond NONE
template<typename FeatureId>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAny(const FeatureId& featureId) const noexcept {
return has(featureId);
}
@@ -101,11 +115,13 @@ public:
//!
//! \note This is a variadic function template that can be used with multiple features.
template<typename FeatureId, typename... Args>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAny(const FeatureId& featureId, Args&&... otherFeatureIds) const noexcept {
return bool(unsigned(has(featureId)) | unsigned(hasAny(std::forward<Args>(otherFeatureIds)...)));
}
//! Tests whether all features as defined by `other` are present.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAll(const Data& other) const noexcept {
uint32_t result = 1;
for (uint32_t i = 0; i < kNumBitWords; i++)
@@ -123,7 +139,7 @@ public:
//! Adds the given CPU `featureId` to the list of features.
template<typename FeatureId>
ASMJIT_INLINE_NODEBUG void add(const FeatureId& featureId) noexcept {
inline void add(const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
@@ -133,13 +149,13 @@ public:
}
template<typename FeatureId, typename... Args>
ASMJIT_INLINE_NODEBUG void add(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
inline void add(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
add(featureId);
add(std::forward<Args>(otherFeatureIds)...);
}
template<typename FeatureId>
ASMJIT_INLINE_NODEBUG void addIf(bool condition, const FeatureId& featureId) noexcept {
inline void addIf(bool condition, const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
@@ -149,14 +165,14 @@ public:
}
template<typename FeatureId, typename... Args>
ASMJIT_INLINE_NODEBUG void addIf(bool condition, const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
inline void addIf(bool condition, const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
addIf(condition, featureId);
addIf(condition, std::forward<Args>(otherFeatureIds)...);
}
//! Removes the given CPU `featureId` from the list of features.
template<typename FeatureId>
ASMJIT_INLINE_NODEBUG void remove(const FeatureId& featureId) noexcept {
inline void remove(const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
@@ -166,7 +182,7 @@ public:
}
template<typename FeatureId, typename... Args>
ASMJIT_INLINE_NODEBUG void remove(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
inline void remove(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
remove(featureId);
remove(std::forward<Args>(otherFeatureIds)...);
}
@@ -1104,6 +1120,7 @@ public:
//! Returns the host CPU information.
//!
//! \note The returned reference is global - it's setup only once and then shared.
[[nodiscard]]
ASMJIT_API static const CpuInfo& host() noexcept;
//! \}
@@ -1134,15 +1151,18 @@ public:
//! \{
//! Returns the CPU architecture this information relates to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Returns the CPU sub-architecture this information relates to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; }
//! Returns whether the CPU was detected successfully.
//!
//! If the returned value is false it means that AsmJit either failed to detect the CPU or it doesn't have
//! implementation targeting the host architecture and operating system.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool wasDetected() const noexcept { return _wasDetected; }
//! Returns the CPU family ID.
@@ -1152,6 +1172,7 @@ public:
//! - Family identifier matches the FamilyId read by using CPUID.
//! - ARM:
//! - Apple - returns Apple Family identifier returned by sysctlbyname("hw.cpufamily").
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t familyId() const noexcept { return _familyId; }
//! Returns the CPU model ID.
@@ -1159,6 +1180,7 @@ public:
//! The information provided depends on architecture and OS:
//! - X86:
//! - Model identifier matches the ModelId read by using CPUID.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t modelId() const noexcept { return _modelId; }
//! Returns the CPU brand id.
@@ -1166,6 +1188,7 @@ public:
//! The information provided depends on architecture and OS:
//! - X86:
//! - Brand identifier matches the BrandId read by using CPUID.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t brandId() const noexcept { return _brandId; }
//! Returns the CPU stepping.
@@ -1173,6 +1196,7 @@ public:
//! The information provided depends on architecture and OS:
//! - X86:
//! - Stepping identifier matches the Stepping information read by using CPUID.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t stepping() const noexcept { return _stepping; }
//! Returns the processor type.
@@ -1180,34 +1204,46 @@ public:
//! The information provided depends on architecture and OS:
//! - X86:
//! - Processor type identifier matches the ProcessorType read by using CPUID.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t processorType() const noexcept { return _processorType; }
//! Returns the maximum number of logical processors.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; }
//! Returns the size of a CPU cache line.
//!
//! On a multi-architecture system this should return the smallest cache line of all CPUs.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t cacheLineSize() const noexcept { return _cacheLineSize; }
//! Returns number of hardware threads available.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hwThreadCount() const noexcept { return _hwThreadCount; }
//! Returns a CPU vendor string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* vendor() const noexcept { return _vendor.str; }
//! Tests whether the CPU vendor string is equal to `s`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVendor(const char* s) const noexcept { return _vendor.equals(s); }
//! Returns a CPU brand string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* brand() const noexcept { return _brand.str; }
//! Returns CPU features.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuFeatures& features() noexcept { return _features; }
//! Returns CPU features (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const CpuFeatures& features() const noexcept { return _features; }
//! Tests whether the CPU has the given `feature`.
template<typename FeatureId>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFeature(const FeatureId& featureId) const noexcept { return _features.has(featureId); }
//! Adds the given CPU `featureId` to the list of features.

View File

@@ -23,22 +23,27 @@ static void dumpFuncValue(String& sb, Arch arch, const FuncValue& value) noexcep
Formatter::formatTypeId(sb, value.typeId());
sb.append('@');
if (value.isIndirect())
if (value.isIndirect()) {
sb.append('[');
}
if (value.isReg())
if (value.isReg()) {
Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId());
else if (value.isStack())
}
else if (value.isStack()) {
sb.appendFormat("[%d]", value.stackOffset());
else
}
else {
sb.append("<none>");
}
if (value.isIndirect())
if (value.isIndirect()) {
sb.append(']');
}
}
static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
typedef FuncArgsContext::Var Var;
using Var = FuncArgsContext::Var;
Arch arch = ctx.arch();
uint32_t varCount = ctx.varCount();
@@ -53,8 +58,9 @@ static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
sb.append(" <- ");
dumpFuncValue(sb, arch, cur);
if (var.isDone())
if (var.isDone()) {
sb.append(" {Done}");
}
sb.append('\n');
}
@@ -83,8 +89,8 @@ Error BaseEmitHelper::emitArgMove(const BaseReg& dst_, TypeId dstTypeId, const O
// ===================================
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
typedef FuncArgsContext::Var Var;
typedef FuncArgsContext::WorkData WorkData;
using Var = FuncArgsContext::Var;
using WorkData = FuncArgsContext::WorkData;
enum WorkFlags : uint32_t {
kWorkNone = 0x00,
@@ -118,10 +124,12 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
BaseReg sa = sp;
if (frame.hasDynamicAlignment()) {
if (frame.hasPreservedFP())
if (frame.hasPreservedFP()) {
sa.setId(archTraits.fpRegId());
else
}
else {
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
}
// Register to stack and stack to stack moves must be first as now we have
@@ -135,8 +143,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (!var.out.isStack())
if (!var.out.isStack()) {
continue;
}
FuncValue& cur = var.cur;
FuncValue& out = var.out;
@@ -169,13 +178,15 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
// we follow the rule that IntToInt moves will use GP regs with possibility to signature or zero extend,
// and all other moves will either use GP or VEC regs depending on the size of the move.
OperandSignature signature = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid()))
if (ASMJIT_UNLIKELY(!signature.isValid())) {
return DebugUtils::errored(kErrorInvalidState);
}
WorkData& wd = workData[signature.regGroup()];
RegMask availableRegs = wd.availableRegs();
if (ASMJIT_UNLIKELY(!availableRegs))
if (ASMJIT_UNLIKELY(!availableRegs)) {
return DebugUtils::errored(kErrorInvalidState);
}
uint32_t availableId = Support::ctz(availableRegs);
reg.setSignatureAndId(signature, availableId);
@@ -183,8 +194,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
}
if (cur.isIndirect() && cur.isReg())
if (cur.isIndirect() && cur.isReg()) {
workData[RegGroup::kGp].unassign(varId, cur.regId());
}
// Register to stack move.
ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
@@ -198,8 +210,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
for (;;) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone() || !var.cur.isReg())
if (var.isDone() || !var.cur.isReg()) {
continue;
}
FuncValue& cur = var.cur;
FuncValue& out = var.out;
@@ -224,13 +237,15 @@ EmitMove:
BaseReg(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId()));
// Only reassign if this is not a sign/zero extension that happens on the same in/out register.
if (curId != outId)
if (curId != outId) {
wd.reassign(varId, outId, curId);
}
cur.initReg(out.regType(), outId, out.typeId());
if (outId == out.regId())
if (outId == out.regId()) {
var.markDone();
}
workFlags |= kWorkDidSome | kWorkPending;
}
else {
@@ -241,20 +256,21 @@ EmitMove:
// Only few architectures provide swap operations, and only for few register groups.
if (archTraits.hasInstRegSwap(curGroup)) {
RegType highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16))
if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16)) {
highestType = RegType::kGp32;
}
OperandSignature signature = archTraits.regTypeToSignature(highestType);
ASMJIT_PROPAGATE(
emitRegSwap(BaseReg(signature, outId), BaseReg(signature, curId)));
ASMJIT_PROPAGATE(emitRegSwap(BaseReg(signature, outId), BaseReg(signature, curId)));
wd.swap(varId, curId, altId, outId);
cur.setRegId(outId);
var.markDone();
altVar.cur.setRegId(curId);
if (altVar.out.isInitialized())
if (altVar.out.isInitialized()) {
altVar.markDone();
}
workFlags |= kWorkDidSome;
}
else {
@@ -262,8 +278,9 @@ EmitMove:
RegMask availableRegs = wd.availableRegs();
if (availableRegs) {
RegMask inOutRegs = wd.dstRegs();
if (availableRegs & ~inOutRegs)
if (availableRegs & ~inOutRegs) {
availableRegs &= ~inOutRegs;
}
outId = Support::ctz(availableRegs);
goto EmitMove;
}
@@ -279,12 +296,14 @@ EmitMove:
}
}
if (!(workFlags & kWorkPending))
if (!(workFlags & kWorkPending)) {
break;
}
// If we did nothing twice it means that something is really broken.
if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed)
if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed) {
return DebugUtils::errored(kErrorInvalidState);
}
workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
}
@@ -294,8 +313,9 @@ EmitMove:
if (ctx._hasStackSrc) {
uint32_t iterCount = 1;
if (frame.hasDynamicAlignment() && !frame.hasPreservedFP())
if (frame.hasDynamicAlignment() && !frame.hasPreservedFP()) {
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
@@ -303,8 +323,9 @@ EmitMove:
for (uint32_t iter = 0; iter < iterCount; iter++) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone())
if (var.isDone()) {
continue;
}
if (var.cur.isStack()) {
ASMJIT_ASSERT(var.out.isReg());

View File

@@ -26,7 +26,9 @@ public:
ASMJIT_INLINE_NODEBUG virtual ~BaseEmitHelper() noexcept = default;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseEmitter* emitter() const noexcept { return _emitter; }
ASMJIT_INLINE_NODEBUG void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
//! Emits a pure move operation between two registers or the same type or between a register and its home

View File

@@ -52,17 +52,21 @@ static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) n
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate);
}
if (emitComments)
if (emitComments) {
self->_addEmitterFlags(EmitterFlags::kLogComments);
else
}
else {
self->_clearEmitterFlags(EmitterFlags::kLogComments);
}
// The reserved option tells emitter (Assembler/Builder/Compiler) that there may be either a border
// case (CodeHolder not attached, for example) or that logging or validation is required.
if (self->_code == nullptr || self->_logger || hasDiagnosticOptions)
if (self->_code == nullptr || self->_logger || hasDiagnosticOptions) {
self->_forcedInstOptions |= InstOptions::kReserved;
else
}
else {
self->_forcedInstOptions &= ~InstOptions::kReserved;
}
}
// BaseEmitter - Diagnostic Options
@@ -90,8 +94,9 @@ void BaseEmitter::setLogger(Logger* logger) noexcept {
else {
_logger = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnLogger);
if (_code)
if (_code) {
_logger = _code->logger();
}
}
BaseEmitter_updateForcedOptions(this);
#else
@@ -110,16 +115,18 @@ void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
else {
_errorHandler = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnErrorHandler);
if (_code)
if (_code) {
_errorHandler = _code->errorHandler();
}
}
}
Error BaseEmitter::reportError(Error err, const char* message) {
ErrorHandler* eh = _errorHandler;
if (eh) {
if (!message)
if (!message) {
message = DebugUtils::errorAsString(err);
}
eh->handleError(err, message, this);
}
return err;
@@ -318,8 +325,9 @@ Error BaseEmitter::comment(const char* data, size_t size) {
Error BaseEmitter::commentf(const char* fmt, ...) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk;
}
@@ -341,8 +349,9 @@ Error BaseEmitter::commentf(const char* fmt, ...) {
Error BaseEmitter::commentv(const char* fmt, va_list ap) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk;
}
@@ -377,11 +386,13 @@ Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
DebugUtils::unused(code);
if (!hasOwnLogger())
if (!hasOwnLogger()) {
_logger = nullptr;
}
if (!hasOwnErrorHandler())
if (!hasOwnErrorHandler()) {
_errorHandler = nullptr;
}
_clearEmitterFlags(~kEmitterPreservedFlags);
_instructionAlignment = uint8_t(0);
@@ -403,11 +414,13 @@ void BaseEmitter::onSettingsUpdated() noexcept {
// Only called when attached to CodeHolder by CodeHolder.
ASMJIT_ASSERT(_code != nullptr);
if (!hasOwnLogger())
if (!hasOwnLogger()) {
_logger = _code->logger();
}
if (!hasOwnErrorHandler())
if (!hasOwnErrorHandler()) {
_errorHandler = _code->errorHandler();
}
BaseEmitter_updateForcedOptions(this);
}

View File

@@ -257,18 +257,18 @@ public:
//!
//! These are typically shared between Assembler/Builder/Compiler of a single backend.
struct Funcs {
typedef Error (ASMJIT_CDECL* EmitProlog)(BaseEmitter* emitter, const FuncFrame& frame);
typedef Error (ASMJIT_CDECL* EmitEpilog)(BaseEmitter* emitter, const FuncFrame& frame);
typedef Error (ASMJIT_CDECL* EmitArgsAssignment)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args);
using EmitProlog = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame);
using EmitEpilog = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame);
using EmitArgsAssignment = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args);
typedef Error (ASMJIT_CDECL* FormatInstruction)(
using FormatInstruction = Error (ASMJIT_CDECL*)(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) ASMJIT_NOEXCEPT_TYPE;
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
typedef Error (ASMJIT_CDECL* ValidateFunc)(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) ASMJIT_NOEXCEPT_TYPE;
using ValidateFunc = Error (ASMJIT_CDECL*)(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
//! Emit prolog implementation.
EmitProlog emitProlog;
@@ -306,9 +306,11 @@ public:
//! \{
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* as() noexcept { return reinterpret_cast<T*>(this); }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return reinterpret_cast<const T*>(this); }
//! \}
@@ -317,24 +319,37 @@ public:
//! \{
//! Returns the type of this emitter, see `EmitterType`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitterType emitterType() const noexcept { return _emitterType; }
//! Returns emitter flags , see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitterFlags emitterFlags() const noexcept { return _emitterFlags; }
//! Tests whether the emitter inherits from `BaseAssembler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAssembler() const noexcept { return _emitterType == EmitterType::kAssembler; }
//! Tests whether the emitter inherits from `BaseBuilder`.
//!
//! \note Both Builder and Compiler emitters would return `true`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBuilder() const noexcept { return uint32_t(_emitterType) >= uint32_t(EmitterType::kBuilder); }
//! Tests whether the emitter inherits from `BaseCompiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isCompiler() const noexcept { return _emitterType == EmitterType::kCompiler; }
//! Tests whether the emitter has the given `flag` enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEmitterFlag(EmitterFlags flag) const noexcept { return Support::test(_emitterFlags, flag); }
//! Tests whether the emitter is finalized.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFinalized() const noexcept { return hasEmitterFlag(EmitterFlags::kFinalized); }
//! Tests whether the emitter is destroyed (only used during destruction).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDestroyed() const noexcept { return hasEmitterFlag(EmitterFlags::kDestroyed); }
//! \}
@@ -353,27 +368,37 @@ public:
//! \{
//! Returns the CodeHolder this emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CodeHolder* code() const noexcept { return _code; }
//! Returns the target environment.
//!
//! The returned \ref Environment reference matches \ref CodeHolder::environment().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; }
//! Tests whether the target architecture is 32-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return environment().is32Bit(); }
//! Tests whether the target architecture is 64-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return environment().is64Bit(); }
//! Returns the target architecture type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); }
//! Returns the target architecture sub-type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); }
//! Returns the target architecture's GP register size (4 or 8 bytes).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return environment().registerSize(); }
//! Returns a signature of a native general purpose register (either 32-bit or 64-bit depending on the architecture).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature gpSignature() const noexcept { return _gpSignature; }
//! Returns instruction alignment.
@@ -382,6 +407,7 @@ public:
//! - X86 and X86_64 - instruction alignment is 1
//! - AArch32 - instruction alignment is 4 in A32 mode and 2 in THUMB mode.
//! - AArch64 - instruction alignment is 4
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t instructionAlignment() const noexcept { return _instructionAlignment; }
//! \}
@@ -390,6 +416,7 @@ public:
//! \{
//! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _code != nullptr; }
//! Finalizes this emitter.
@@ -407,18 +434,21 @@ public:
//! \{
//! Tests whether the emitter has a logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLogger() const noexcept { return _logger != nullptr; }
//! Tests whether the emitter has its own logger.
//!
//! Own logger means that it overrides the possible logger that may be used by \ref CodeHolder this emitter is
//! attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOwnLogger() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnLogger); }
//! Returns the logger this emitter uses.
//!
//! The returned logger is either the emitter's own logger or it's logger used by \ref CodeHolder this emitter
//! is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; }
//! Sets or resets the logger of the emitter.
@@ -440,18 +470,21 @@ public:
//! \{
//! Tests whether the emitter has an error handler attached.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Tests whether the emitter has its own error handler.
//!
//! Own error handler means that it overrides the possible error handler that may be used by \ref CodeHolder this
//! emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnErrorHandler); }
//! Returns the error handler this emitter uses.
//!
//! The returned error handler is either the emitter's own error handler or it's error handler used by
//! \ref CodeHolder this emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Sets or resets the error handler of the emitter.
@@ -472,8 +505,11 @@ public:
//! \{
//! Returns encoding options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EncodingOptions encodingOptions() const noexcept { return _encodingOptions; }
//! Tests whether the encoding `option` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEncodingOption(EncodingOptions option) const noexcept { return Support::test(_encodingOptions, option); }
//! Enables the given encoding `options`.
@@ -487,9 +523,11 @@ public:
//! \{
//! Returns the emitter's diagnostic options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG DiagnosticOptions diagnosticOptions() const noexcept { return _diagnosticOptions; }
//! Tests whether the given `option` is present in the emitter's diagnostic options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
//! Activates the given diagnostic `options`.
@@ -527,35 +565,49 @@ public:
//! Forced instruction options are merged with next instruction options before the instruction is encoded. These
//! options have some bits reserved that are used by error handling, logging, and instruction validation purposes.
//! Other options are globals that affect each instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions forcedInstOptions() const noexcept { return _forcedInstOptions; }
//! Returns options of the next instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions instOptions() const noexcept { return _instOptions; }
//! Returns options of the next instruction.
ASMJIT_INLINE_NODEBUG void setInstOptions(InstOptions options) noexcept { _instOptions = options; }
//! Adds options of the next instruction.
ASMJIT_INLINE_NODEBUG void addInstOptions(InstOptions options) noexcept { _instOptions |= options; }
//! Resets options of the next instruction.
ASMJIT_INLINE_NODEBUG void resetInstOptions() noexcept { _instOptions = InstOptions::kNone; }
//! Tests whether the extra register operand is valid.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
//! Returns an extra operand that will be used by the next instruction (architecture specific).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
//! Resets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
//! Returns comment/annotation of the next instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets comment/annotation of the next instruction.
//!
//! \note This string is set back to null by `_emit()`, but until that it has to remain valid as the Emitter is not
//! required to make a copy of it (and it would be slow to do that for each instruction).
ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets the comment/annotation to nullptr.
ASMJIT_INLINE_NODEBUG void resetInlineComment() noexcept { _inlineComment = nullptr; }
@@ -581,6 +633,7 @@ public:
//! Grabs the current emitter state and resets the emitter state at the same time, returning the state the emitter
//! had before the state was reset.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG State _grabState() noexcept {
State s{_instOptions | _forcedInstOptions, _extraReg, _inlineComment};
resetState();
@@ -604,13 +657,19 @@ public:
//! \{
//! Creates a new label.
[[nodiscard]]
ASMJIT_API virtual Label newLabel();
//! Creates a new named label.
[[nodiscard]]
ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId);
//! Creates a new anonymous label with a name, which can only be used for debugging purposes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label newAnonymousLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kAnonymous); }
//! Creates a new external label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kExternal); }
//! Returns `Label` by `name`.
@@ -619,6 +678,7 @@ public:
//!
//! \note This function doesn't trigger ErrorHandler in case the name is invalid or no such label exist. You must
//! always check the validity of the `Label` returned.
[[nodiscard]]
ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Binds the `label` to the current position of the current section.
@@ -627,8 +687,11 @@ public:
ASMJIT_API virtual Error bind(const Label& label);
//! Tests whether the label `id` is valid (i.e. registered).
[[nodiscard]]
ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
//! Tests whether the `label` is valid (i.e. registered).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
//! \}
@@ -676,7 +739,7 @@ public:
//! Similar to \ref emit(), but emits instruction with both instruction options and extra register, followed
//! by an array of `operands`.
ASMJIT_FORCE_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
ASMJIT_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
setInstOptions(inst.options());
setExtraReg(inst.extraReg());
return _emitOpArray(inst.id(), operands, opCount);
@@ -794,6 +857,7 @@ public:
//! Called after the emitter was attached to `CodeHolder`.
ASMJIT_API virtual Error onAttach(CodeHolder* ASMJIT_NONNULL(code)) noexcept;
//! Called after the emitter was detached from `CodeHolder`.
ASMJIT_API virtual Error onDetach(CodeHolder* ASMJIT_NONNULL(code)) noexcept;

View File

@@ -88,10 +88,12 @@ void logInstructionEmitted(
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode));
self->_funcs.formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (Support::test(formatFlags, FormatFlags::kMachineCode))
if (Support::test(formatFlags, FormatFlags::kMachineCode)) {
finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
else
}
else {
finishFormattedLine(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inlineComment());
}
logger->log(sb);
}

View File

@@ -31,7 +31,8 @@ enum kOpIndex : uint32_t {
kOp5 = 2
};
static ASMJIT_FORCE_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
[[nodiscard]]
static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
uint32_t opCount = 0;
if (opExt[kOp3].isNone()) {
@@ -49,7 +50,7 @@ static ASMJIT_FORCE_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, cons
return opCount;
}
static ASMJIT_FORCE_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
dst[0].copyFrom(o0);
dst[1].copyFrom(o1);
dst[2].copyFrom(o2);

View File

@@ -35,8 +35,9 @@ uint32_t Environment::stackAlignment() const noexcept {
return 16u;
}
if (isFamilyARM())
if (isFamilyARM()) {
return 8;
}
// Bail to 4-byte alignment if we don't know.
return 4;

View File

@@ -225,13 +225,13 @@ public:
//! \{
//! Creates a default initialized environment (all values either unknown or set to safe defaults).
ASMJIT_INLINE_NODEBUG constexpr Environment() noexcept = default;
ASMJIT_INLINE_CONSTEXPR Environment() noexcept = default;
//! Creates a copy of `other` instance.
ASMJIT_INLINE_NODEBUG constexpr Environment(const Environment& other) noexcept = default;
ASMJIT_INLINE_CONSTEXPR Environment(const Environment& other) noexcept = default;
//! Creates \ref Environment initialized to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`,
//! and `floatABI`.
ASMJIT_INLINE_NODEBUG constexpr explicit Environment(
ASMJIT_INLINE_CONSTEXPR explicit Environment(
Arch arch,
SubArch subArch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
@@ -251,7 +251,7 @@ public:
//!
//! The returned environment should precisely match the target host architecture, sub-architecture, platform,
//! and ABI.
static ASMJIT_INLINE_NODEBUG Environment host() noexcept {
static ASMJIT_INLINE_CONSTEXPR Environment host() noexcept {
return Environment(Arch::kHost, SubArch::kHost, Vendor::kHost, Platform::kHost, PlatformABI::kHost, ObjectFormat::kUnknown, FloatABI::kHost);
}
@@ -262,7 +262,10 @@ public:
ASMJIT_INLINE_NODEBUG Environment& operator=(const Environment& other) noexcept = default;
ASMJIT_INLINE_NODEBUG bool operator==(const Environment& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const Environment& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const Environment& other) const noexcept { return !equals(other); }
//! \}
@@ -273,6 +276,7 @@ public:
//! Tests whether the environment is not set up.
//!
//! Returns true if all members are zero, and thus unknown.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept {
// Unfortunately compilers won't optimize fields are checked one by one...
return _packed() == 0;
@@ -280,10 +284,12 @@ public:
//! Tests whether the environment is initialized, which means it must have
//! a valid architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept {
return _arch != Arch::kUnknown;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t _packed() const noexcept {
uint64_t x;
memcpy(&x, this, 8);
@@ -294,21 +300,35 @@ public:
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Environment{}; }
//! Tests whether this environment is equal to `other`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool equals(const Environment& other) const noexcept { return _packed() == other._packed(); }
//! Returns the architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Returns the sub-architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; }
//! Returns vendor.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vendor vendor() const noexcept { return _vendor; }
//! Returns target's platform or operating system.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Platform platform() const noexcept { return _platform; }
//! Returns target's ABI.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PlatformABI platformABI() const noexcept { return _platformABI; }
//! Returns target's object format.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ObjectFormat objectFormat() const noexcept { return _objectFormat; }
//! Returns floating point ABI.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FloatABI floatABI() const noexcept { return _floatABI; }
//! Initializes \ref Environment to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`,
@@ -333,57 +353,99 @@ public:
}
//! Tests whether this environment describes a 32-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchX86() const noexcept { return _arch == Arch::kX86; }
//! Tests whether this environment describes a 64-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchX64() const noexcept { return _arch == Arch::kX64; }
//! Tests whether this environment describes a 32-bit ARM.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchARM() const noexcept { return isArchARM(_arch); }
//! Tests whether this environment describes a 32-bit ARM in THUMB mode.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchThumb() const noexcept { return isArchThumb(_arch); }
//! Tests whether this environment describes a 64-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchAArch64() const noexcept { return isArchAArch64(_arch); }
//! Tests whether this environment describes a 32-bit MIPS.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchMIPS32() const noexcept { return isArchMIPS32(_arch); }
//! Tests whether this environment describes a 64-bit MIPS.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchMIPS64() const noexcept { return isArchMIPS64(_arch); }
//! Tests whether this environment describes a 32-bit RISC-V.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchRISCV32() const noexcept { return _arch == Arch::kRISCV32; }
//! Tests whether this environment describes a 64-bit RISC-V.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchRISCV64() const noexcept { return _arch == Arch::kRISCV64; }
//! Tests whether the architecture is 32-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return is32Bit(_arch); }
//! Tests whether the architecture is 64-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return is64Bit(_arch); }
//! Tests whether the architecture is little endian.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
//! Tests whether the architecture is big endian.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBigEndian() const noexcept { return isBigEndian(_arch); }
//! Tests whether this architecture is of X86 family.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
//! Tests whether this architecture family is ARM, THUMB, or AArch64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
//! Tests whether this architecture family is AArch32 (ARM or THUMB).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyAArch32() const noexcept { return isFamilyAArch32(_arch); }
//! Tests whether this architecture family is AArch64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyAArch64() const noexcept { return isFamilyAArch64(_arch); }
//! Tests whether this architecture family is MISP or MIPS64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
//! Tests whether the environment platform is Windows.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformWindows() const noexcept { return _platform == Platform::kWindows; }
//! Tests whether the environment platform is Linux.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformLinux() const noexcept { return _platform == Platform::kLinux; }
//! Tests whether the environment platform is Hurd.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformHurd() const noexcept { return _platform == Platform::kHurd; }
//! Tests whether the environment platform is Haiku.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformHaiku() const noexcept { return _platform == Platform::kHaiku; }
//! Tests whether the environment platform is any BSD.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformBSD() const noexcept {
return _platform == Platform::kFreeBSD ||
_platform == Platform::kOpenBSD ||
@@ -392,6 +454,7 @@ public:
}
//! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformApple() const noexcept {
return _platform == Platform::kOSX ||
_platform == Platform::kIOS ||
@@ -400,16 +463,23 @@ public:
}
//! Tests whether the ABI is MSVC.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMSVC() const noexcept { return _platformABI == PlatformABI::kMSVC; }
//! Tests whether the ABI is GNU.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isGNU() const noexcept { return _platformABI == PlatformABI::kGNU; }
//! Tests whether the ABI is GNU.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDarwin() const noexcept { return _platformABI == PlatformABI::kDarwin; }
//! Returns a calculated stack alignment for this environment.
[[nodiscard]]
ASMJIT_API uint32_t stackAlignment() const noexcept;
//! Returns a native register size of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
//! Sets the architecture to `arch`.
@@ -433,90 +503,108 @@ public:
//! \name Static Utilities
//! \{
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isDefinedArch(Arch arch) noexcept {
return uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isValidArch(Arch arch) noexcept {
return arch != Arch::kUnknown && uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
//! Tests whether the given architecture `arch` is 32-bit.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool is32Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == uint32_t(Arch::k32BitMask);
}
//! Tests whether the given architecture `arch` is 64-bit.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool is64Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == 0;
}
//! Tests whether the given architecture `arch` is little endian.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isLittleEndian(Arch arch) noexcept {
return uint32_t(arch) < uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture `arch` is big endian.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isBigEndian(Arch arch) noexcept {
return uint32_t(arch) >= uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture is Thumb or Thumb_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchThumb(Arch arch) noexcept {
return arch == Arch::kThumb || arch == Arch::kThumb_BE;
}
//! Tests whether the given architecture is ARM or ARM_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchARM(Arch arch) noexcept {
return arch == Arch::kARM || arch == Arch::kARM_BE;
}
//! Tests whether the given architecture is AArch64 or AArch64_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchAArch64(Arch arch) noexcept {
return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE;
}
//! Tests whether the given architecture is MIPS32_LE or MIPS32_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchMIPS32(Arch arch) noexcept {
return arch == Arch::kMIPS32_LE || arch == Arch::kMIPS32_BE;
}
//! Tests whether the given architecture is MIPS64_LE or MIPS64_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchMIPS64(Arch arch) noexcept {
return arch == Arch::kMIPS64_LE || arch == Arch::kMIPS64_BE;
}
//! Tests whether the given architecture family is X86 or X64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyX86(Arch arch) noexcept {
return arch == Arch::kX86 || arch == Arch::kX64;
}
//! Tests whether the given architecture family is AArch32 (ARM or THUMB).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch32(Arch arch) noexcept {
return isArchARM(arch) || isArchThumb(arch);
}
//! Tests whether the given architecture family is AArch64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch64(Arch arch) noexcept {
return isArchAArch64(arch);
}
//! Tests whether the given architecture family is ARM, THUMB, or AArch64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyARM(Arch arch) noexcept {
return isFamilyAArch32(arch) || isFamilyAArch64(arch);
}
//! Tests whether the given architecture family is MIPS or MIPS64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyMIPS(Arch arch) noexcept {
return isArchMIPS32(arch) || isArchMIPS64(arch);
}
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyRISCV(Arch arch) noexcept {
return arch == Arch::kRISCV32 || arch == Arch::kRISCV64;
}
//! Returns a native general purpose register size from the given architecture.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG uint32_t registerSizeFromArch(Arch arch) noexcept {
return is32Bit(arch) ? 4u : 8u;
}

View File

@@ -51,11 +51,13 @@ static const char wordNameTable[][8] = {
Error formatTypeId(String& sb, TypeId typeId) noexcept {
if (typeId == TypeId::kVoid)
if (typeId == TypeId::kVoid) {
return sb.append("void");
}
if (!TypeUtils::isValid(typeId))
if (!TypeUtils::isValid(typeId)) {
return sb.append("unknown");
}
const char* typeName = nullptr;
uint32_t typeSize = TypeUtils::sizeOf(typeId);
@@ -103,13 +105,15 @@ Error formatFeature(
uint32_t featureId) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatFeature(sb, featureId);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch))
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatFeature(sb, featureId);
}
#endif
return kErrorInvalidArch;
@@ -125,26 +129,31 @@ Error formatLabel(
if (emitter && emitter->code()) {
const LabelEntry* le = emitter->code()->labelEntry(labelId);
if (ASMJIT_UNLIKELY(!le))
if (ASMJIT_UNLIKELY(!le)) {
return sb.appendFormat("<InvalidLabel:%u>", labelId);
}
if (le->hasName()) {
if (le->hasParent()) {
uint32_t parentId = le->parentId();
const LabelEntry* pe = emitter->code()->labelEntry(parentId);
if (ASMJIT_UNLIKELY(!pe))
if (ASMJIT_UNLIKELY(!pe)) {
ASMJIT_PROPAGATE(sb.appendFormat("<InvalidLabel:%u>", labelId));
else if (ASMJIT_UNLIKELY(!pe->hasName()))
}
else if (ASMJIT_UNLIKELY(!pe->hasName())) {
ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
else
}
else {
ASMJIT_PROPAGATE(sb.append(pe->name()));
}
ASMJIT_PROPAGATE(sb.append('.'));
}
if (le->type() == LabelType::kAnonymous)
if (le->type() == LabelType::kAnonymous) {
ASMJIT_PROPAGATE(sb.appendFormat("L%u@", labelId));
}
return sb.append(le->name());
}
}
@@ -161,13 +170,15 @@ Error formatRegister(
uint32_t regId) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch))
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
}
#endif
return kErrorInvalidArch;
@@ -181,13 +192,15 @@ Error formatOperand(
const Operand_& op) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch))
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
}
#endif
return kErrorInvalidArch;
@@ -201,12 +214,14 @@ ASMJIT_API Error formatDataType(
{
DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue)))
if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue))) {
return DebugUtils::errored(kErrorInvalidArch);
}
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0 || typeSize > 8)
if (typeSize == 0 || typeSize > 8) {
return DebugUtils::errored(kErrorInvalidState);
}
uint32_t typeSizeLog2 = Support::ctz(typeSize);
return sb.append(wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]);
@@ -220,8 +235,9 @@ static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSiz
for (size_t i = 0; i < itemCount; i++) {
uint64_t v = 0;
if (i != 0)
if (i != 0) {
ASMJIT_PROPAGATE(sb.append(", ", 2));
}
switch (typeSize) {
case 1: v = data[0]; break;
@@ -241,16 +257,18 @@ Error formatData(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) noexcept
{
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount
) noexcept {
DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch)))
if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch))) {
return DebugUtils::errored(kErrorInvalidArch);
}
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0)
if (typeSize == 0) {
return DebugUtils::errored(kErrorInvalidState);
}
if (!Support::isPowerOf2(typeSize)) {
itemCount *= typeSize;
@@ -265,8 +283,9 @@ Error formatData(
uint32_t typeSizeLog2 = Support::ctz(typeSize);
const char* wordName = wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))];
if (repeatCount > 1)
if (repeatCount > 1) {
ASMJIT_PROPAGATE(sb.appendFormat(".repeat %zu ", repeatCount));
}
return formatDataHelper(sb, wordName, typeSize, static_cast<const uint8_t*>(data), itemCount);
}
@@ -279,13 +298,15 @@ Error formatInstruction(
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch))
if (Environment::isFamilyAArch64(arch)) {
return a64::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
}
#endif
return kErrorInvalidArch;
@@ -301,8 +322,9 @@ static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmit
if (value.isAssigned()) {
ASMJIT_PROPAGATE(sb.append('@'));
if (value.isIndirect())
if (value.isIndirect()) {
ASMJIT_PROPAGATE(sb.append('['));
}
// NOTE: It should be either reg or stack, but never both. We
// use two IFs on purpose so if the FuncValue is both it would
@@ -315,8 +337,9 @@ static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmit
ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset())));
}
if (value.isIndirect())
if (value.isIndirect()) {
ASMJIT_PROPAGATE(sb.append(']'));
}
}
return kErrorOk;
@@ -330,19 +353,23 @@ static Error formatFuncValuePack(
const RegOnly* vRegs) noexcept {
size_t count = pack.count();
if (!count)
if (!count) {
return sb.append("void");
}
if (count > 1)
sb.append('[');
if (count > 1) {
ASMJIT_PROPAGATE(sb.append('['));
}
for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) {
const FuncValue& value = pack[valueIndex];
if (!value)
if (!value) {
break;
}
if (valueIndex)
if (valueIndex) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, cc, value));
@@ -350,15 +377,17 @@ static Error formatFuncValuePack(
const VirtReg* virtReg = nullptr;
static const char nullReg[] = "<none>";
if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id()))
if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id())) {
virtReg = cc->virtRegById(vRegs[valueIndex].id());
}
ASMJIT_PROPAGATE(sb.appendFormat(" %s", virtReg ? virtReg->name() : nullReg));
}
}
if (count > 1)
sb.append(']');
if (count > 1) {
ASMJIT_PROPAGATE(sb.append(']'));
}
return kErrorOk;
}
@@ -380,13 +409,14 @@ static Error formatFuncArgs(
const FuncNode::ArgPack* argPacks) noexcept {
uint32_t argCount = fd.argCount();
if (!argCount)
if (!argCount) {
return sb.append("void");
}
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
if (argIndex)
if (argIndex) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, cc, fd.argPack(argIndex), argPacks[argIndex]._data));
}
@@ -400,8 +430,9 @@ Error formatNode(
const BaseBuilder* builder,
const BaseNode* node) noexcept {
if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions))
if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions)) {
ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
}
size_t startLineIndex = sb.size();
@@ -542,8 +573,9 @@ Error formatNode(
size_t requiredPadding = paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
size_t currentPadding = sb.size() - startLineIndex;
if (currentPadding < requiredPadding)
if (currentPadding < requiredPadding) {
ASMJIT_PROPAGATE(sb.appendChars(' ', requiredPadding - currentPadding));
}
ASMJIT_PROPAGATE(sb.append("; "));
ASMJIT_PROPAGATE(sb.append(node->inlineComment()));

View File

@@ -108,28 +108,39 @@ public:
//! \{
//! Returns format flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _flags; }
//! Tests whether the given `flag` is set in format flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Resets all format flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _flags &= ~flags; }
//! Returns indentation for the given indentation `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t indentation(FormatIndentationGroup group) const noexcept { return _indentation[group]; }
//! Sets indentation for the given indentation `group`.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); }
//! Resets indentation for the given indentation `group` to zero.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); }
//! Returns padding for the given padding `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup group) const noexcept { return _padding[group]; }
//! Sets padding for the given padding `group`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); }
//! Resets padding for the given padding `group` to zero, which means that a default padding will be used
//! based on the target architecture properties.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); }

View File

@@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE
namespace Formatter {
static ASMJIT_FORCE_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept {
static ASMJIT_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept {
static constexpr uint16_t _defaultPaddingTable[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 };
static_assert(uint32_t(FormatPaddingGroup::kMaxValue) + 1 == 2, "If a new group is defined it must be added here");

View File

@@ -27,13 +27,15 @@ ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId ccId, const Environment& envir
reset();
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86())
if (environment.isFamilyX86()) {
return x86::FuncInternal::initCallConv(*this, ccId, environment);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64())
if (environment.isFamilyAArch64()) {
return a64::FuncInternal::initCallConv(*this, ccId, environment);
}
#endif
return DebugUtils::errored(kErrorInvalidArgument);
@@ -46,8 +48,9 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
CallConvId ccId = signature.callConvId();
uint32_t argCount = signature.argCount();
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs))
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
CallConv& cc = _callConv;
ASMJIT_PROPAGATE(cc.init(ccId, environment));
@@ -65,17 +68,20 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
_vaIndex = uint8_t(signature.vaIndex());
TypeId ret = signature.ret();
if (ret != TypeId::kVoid)
if (ret != TypeId::kVoid) {
_rets[0].initTypeId(TypeUtils::deabstract(ret, deabstractDelta));
}
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86())
if (environment.isFamilyX86()) {
return x86::FuncInternal::initFuncDetail(*this, signature, registerSize);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64())
if (environment.isFamilyAArch64()) {
return a64::FuncInternal::initFuncDetail(*this, signature);
}
#endif
// We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current
@@ -88,8 +94,9 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
Arch arch = func.callConv().arch();
if (!Environment::isValidArch(arch))
if (!Environment::isValidArch(arch)) {
return DebugUtils::errored(kErrorInvalidArch);
}
const ArchTraits& archTraits = ArchTraits::byArch(arch);
@@ -104,8 +111,9 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment();
uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16);
if (minDynamicAlignment == naturalStackAlignment)
if (minDynamicAlignment == naturalStackAlignment) {
minDynamicAlignment <<= 1;
}
_naturalStackAlignment = uint8_t(naturalStackAlignment);
_minDynamicAlignment = uint8_t(minDynamicAlignment);
@@ -137,8 +145,9 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
// ====================
ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (!Environment::isValidArch(arch()))
if (!Environment::isValidArch(arch())) {
return DebugUtils::errored(kErrorInvalidArch);
}
const ArchTraits& archTraits = ArchTraits::byArch(arch());
@@ -148,9 +157,7 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
// The final stack alignment must be updated accordingly to call and local stack alignments.
uint32_t stackAlignment = _finalStackAlignment;
ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment,
_callStackAlignment,
_localStackAlignment));
ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment));
bool hasFP = hasPreservedFP();
bool hasDA = hasDynamicAlignment();
@@ -165,32 +172,37 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
// Currently required by ARM, if this works differently across architectures we would have to generalize most
// likely in CallConv.
if (kLr != BaseReg::kIdBad)
if (kLr != BaseReg::kIdBad) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kLr);
}
}
// These two are identical if the function doesn't align its stack dynamically.
uint32_t saRegId = _saRegId;
if (saRegId == BaseReg::kIdBad)
if (saRegId == BaseReg::kIdBad) {
saRegId = kSp;
}
// Fix stack arguments base-register from SP to FP in case it was not picked before and the function performs
// dynamic stack alignment.
if (hasDA && saRegId == kSp)
if (hasDA && saRegId == kSp) {
saRegId = kFp;
}
// Mark as dirty any register but SP if used as SA pointer.
if (saRegId != kSp)
if (saRegId != kSp) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(saRegId);
}
_spRegId = uint8_t(kSp);
_saRegId = uint8_t(saRegId);
// Setup stack size used to save preserved registers.
uint32_t saveRestoreSizes[2] {};
for (RegGroup group : RegGroupVirtValues{})
for (RegGroup group : RegGroupVirtValues{}) {
saveRestoreSizes[size_t(!archTraits.hasInstPushPop(group))]
+= Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group));
}
_pushPopSaveSize = uint16_t(saveRestoreSizes[0]);
_extraRegSaveSize = uint16_t(saveRestoreSizes[1]);
@@ -235,22 +247,25 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
// (basically the native register/pointer size). We don't adjust it now as `v` now contains the exact size
// that the function requires to adjust (call frame + stack frame, vec stack size). The stack (if we consider
// this size) is misaligned now, as it's always aligned before the function call - when `call()` is executed
// it pushes the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes (depending on the
// it pushes the current EIP|RIP onto the stack, and unaligns it by 12 or 8 bytes (depending on the
// architecture). So count number of bytes needed to align it up to the function's CallFrame (the beginning).
if (v || hasFuncCalls() || !returnAddressSize)
if (v || hasFuncCalls() || !returnAddressSize) {
v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment);
}
_pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here.
_stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'.
v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here.
_finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function.
if (!archTraits.hasLinkReg())
if (!archTraits.hasLinkReg()) {
v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack.
}
// If the function performs dynamic stack alignment then the stack-adjustment must be aligned.
if (hasDA)
if (hasDA) {
_stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment);
}
// Calculate where the function arguments start relative to SP.
_saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v;
@@ -269,8 +284,9 @@ ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) co
Arch arch = frame.arch();
const FuncDetail* func = funcDetail();
if (!func)
if (!func) {
return DebugUtils::errored(kErrorInvalidState);
}
RAConstraints constraints;
ASMJIT_PROPAGATE(constraints.init(arch));

View File

@@ -152,7 +152,7 @@ struct CallConv {
//! \note This is not really AsmJit's limitation, it's just the number that makes sense considering all common
//! calling conventions. Usually even conventions that use registers to pass function arguments are limited to 8
//! and less arguments passed via registers per group.
static constexpr uint32_t kMaxRegArgsPerGroup = 16;
static inline constexpr uint32_t kMaxRegArgsPerGroup = 16;
//! \}
@@ -228,46 +228,66 @@ struct CallConv {
//! \{
//! Returns the target architecture of this calling convention.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Sets the target architecture of this calling convention.
ASMJIT_INLINE_NODEBUG void setArch(Arch arch) noexcept { _arch = arch; }
//! Returns the calling convention id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CallConvId id() const noexcept { return _id; }
//! Sets the calling convention id.
ASMJIT_INLINE_NODEBUG void setId(CallConvId ccId) noexcept { _id = ccId; }
//! Returns the strategy used to assign registers to arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CallConvStrategy strategy() const noexcept { return _strategy; }
//! Sets the strategy used to assign registers to arguments.
ASMJIT_INLINE_NODEBUG void setStrategy(CallConvStrategy ccStrategy) noexcept { _strategy = ccStrategy; }
//! Tests whether the calling convention has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(CallConvFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Returns the calling convention flags, see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CallConvFlags flags() const noexcept { return _flags; }
//! Adds the calling convention flags, see `Flags`.
ASMJIT_INLINE_NODEBUG void setFlags(CallConvFlags flag) noexcept { _flags = flag; };
//! Adds the calling convention flags, see `Flags`.
ASMJIT_INLINE_NODEBUG void addFlags(CallConvFlags flags) noexcept { _flags |= flags; };
//! Tests whether this calling convention specifies 'RedZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRedZone() const noexcept { return _redZoneSize != 0; }
//! Tests whether this calling convention specifies 'SpillZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
//! Returns size of 'RedZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _redZoneSize; }
//! Returns size of 'SpillZone'.
ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
//! Sets size of 'RedZone'.
ASMJIT_INLINE_NODEBUG void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = uint8_t(size); }
//! Returns size of 'SpillZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
//! Sets size of 'SpillZone'.
ASMJIT_INLINE_NODEBUG void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = uint8_t(size); }
//! Returns a natural stack alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; }
//! Sets a natural stack alignment.
//!
//! This function can be used to override the default stack alignment in case that you know that it's alignment is
@@ -275,22 +295,28 @@ struct CallConv {
ASMJIT_INLINE_NODEBUG void setNaturalStackAlignment(uint32_t value) noexcept { _naturalStackAlignment = uint8_t(value); }
//! Returns the size of a register (or its part) to be saved and restored of the given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saveRestoreRegSize(RegGroup group) const noexcept { return _saveRestoreRegSize[group]; }
//! Sets the size of a vector register (or its part) to be saved and restored.
ASMJIT_INLINE_NODEBUG void setSaveRestoreRegSize(RegGroup group, uint32_t size) noexcept { _saveRestoreRegSize[group] = uint8_t(size); }
//! Returns the alignment of a save-restore area of the given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saveRestoreAlignment(RegGroup group) const noexcept { return _saveRestoreAlignment[group]; }
//! Sets the alignment of a save-restore area of the given `group`.
ASMJIT_INLINE_NODEBUG void setSaveRestoreAlignment(RegGroup group, uint32_t alignment) noexcept { _saveRestoreAlignment[group] = uint8_t(alignment); }
//! Returns the order of passed registers of the given `group`.
[[nodiscard]]
inline const uint8_t* passedOrder(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _passedOrder[size_t(group)].id;
}
//! Returns the mask of passed registers of the given `group`.
[[nodiscard]]
inline RegMask passedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _passedRegs[size_t(group)];
@@ -335,6 +361,7 @@ struct CallConv {
}
//! Returns preserved register mask of the given `group`.
[[nodiscard]]
inline RegMask preservedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _preservedRegs[group];
@@ -365,7 +392,7 @@ struct FuncSignature {
//! \{
//! Doesn't have variable number of arguments (`...`).
static constexpr uint8_t kNoVarArgs = 0xFFu;
static inline constexpr uint8_t kNoVarArgs = 0xFFu;
//! \}
@@ -391,19 +418,19 @@ struct FuncSignature {
//! \{
//! Default constructed function signature, initialized to \ref CallConvId::kCDecl, having no return value and no arguments.
ASMJIT_FORCE_INLINE constexpr FuncSignature() = default;
ASMJIT_INLINE_CONSTEXPR FuncSignature() = default;
//! Copy constructor, which is initialized to the same function signature as `other`.
ASMJIT_FORCE_INLINE constexpr FuncSignature(const FuncSignature& other) = default;
ASMJIT_INLINE_CONSTEXPR FuncSignature(const FuncSignature& other) = default;
//! Initializes the function signature with calling convention id `ccId` and variable argument's index `vaIndex`.
ASMJIT_FORCE_INLINE constexpr FuncSignature(CallConvId ccId, uint32_t vaIndex = kNoVarArgs) noexcept
ASMJIT_INLINE_CONSTEXPR FuncSignature(CallConvId ccId, uint32_t vaIndex = kNoVarArgs) noexcept
: _ccId(ccId),
_vaIndex(uint8_t(vaIndex)) {}
//! Initializes the function signature with calling convention id `ccId`, `vaIndex`, return value, and function arguments.
template<typename... Args>
ASMJIT_FORCE_INLINE constexpr FuncSignature(CallConvId ccId, uint32_t vaIndex, TypeId ret, Args&&...args) noexcept
ASMJIT_INLINE_CONSTEXPR FuncSignature(CallConvId ccId, uint32_t vaIndex, TypeId ret, Args&&...args) noexcept
: _ccId(ccId),
_argCount(uint8_t(sizeof...(args))),
_vaIndex(uint8_t(vaIndex)),
@@ -417,7 +444,8 @@ struct FuncSignature {
//! a convenience function that allows to build function signature statically based on types known at compile time,
//! which is common in JIT code generation.
template<typename... RetValueAndArgs>
static ASMJIT_INLINE_NODEBUG constexpr FuncSignature build(CallConvId ccId = CallConvId::kCDecl, uint32_t vaIndex = kNoVarArgs) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR FuncSignature build(CallConvId ccId = CallConvId::kCDecl, uint32_t vaIndex = kNoVarArgs) noexcept {
return FuncSignature(ccId, vaIndex, (TypeId(TypeUtils::TypeIdOfT<RetValueAndArgs>::kTypeId))... );
}
@@ -427,12 +455,15 @@ struct FuncSignature {
//! \{
//! Copy assignment - function signature can be copied by value.
ASMJIT_FORCE_INLINE FuncSignature& operator=(const FuncSignature& other) noexcept = default;
ASMJIT_INLINE FuncSignature& operator=(const FuncSignature& other) noexcept = default;
//! Compares this function signature with `other` for equality..
ASMJIT_FORCE_INLINE bool operator==(const FuncSignature& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE bool operator==(const FuncSignature& other) const noexcept { return equals(other); }
//! Compares this function signature with `other` for inequality..
ASMJIT_FORCE_INLINE bool operator!=(const FuncSignature& other) const noexcept { return !equals(other); }
[[nodiscard]]
ASMJIT_INLINE bool operator!=(const FuncSignature& other) const noexcept { return !equals(other); }
//! \}
@@ -448,6 +479,7 @@ struct FuncSignature {
//! \{
//! Compares this function signature with `other` for equality..
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool equals(const FuncSignature& other) const noexcept {
return _ccId == other._ccId &&
_argCount == other._argCount &&
@@ -462,27 +494,38 @@ struct FuncSignature {
//! \{
//! Returns the calling convention.
ASMJIT_INLINE_NODEBUG CallConvId callConvId() const noexcept { return _ccId; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR CallConvId callConvId() const noexcept { return _ccId; }
//! Sets the calling convention to `ccId`;
ASMJIT_INLINE_NODEBUG void setCallConvId(CallConvId ccId) noexcept { _ccId = ccId; }
ASMJIT_INLINE_CONSTEXPR void setCallConvId(CallConvId ccId) noexcept { _ccId = ccId; }
//! Tests whether the function signature has a return value.
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _ret != TypeId::kVoid; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool hasRet() const noexcept { return _ret != TypeId::kVoid; }
//! Returns the type of the return value.
ASMJIT_INLINE_NODEBUG TypeId ret() const noexcept { return _ret; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR TypeId ret() const noexcept { return _ret; }
//! Sets the return type to `retType`.
ASMJIT_INLINE_NODEBUG void setRet(TypeId retType) noexcept { _ret = retType; }
ASMJIT_INLINE_CONSTEXPR void setRet(TypeId retType) noexcept { _ret = retType; }
//! Sets the return type based on `T`.
template<typename T>
ASMJIT_INLINE_NODEBUG void setRetT() noexcept { setRet(TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); }
ASMJIT_INLINE_CONSTEXPR void setRetT() noexcept { setRet(TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); }
//! Returns the array of function arguments' types.
ASMJIT_INLINE_NODEBUG const TypeId* args() const noexcept { return _args; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR const TypeId* args() const noexcept { return _args; }
//! Returns the number of function arguments.
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _argCount; }
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR uint32_t argCount() const noexcept { return _argCount; }
//! Returns the type of the argument at index `i`.
[[nodiscard]]
inline TypeId arg(uint32_t i) const noexcept {
ASMJIT_ASSERT(i < _argCount);
return _args[i];
@@ -493,6 +536,7 @@ struct FuncSignature {
ASMJIT_ASSERT(index < _argCount);
_args[index] = argType;
}
//! Sets the argument at index `i` to the type based on `T`.
template<typename T>
inline void setArgT(uint32_t index) noexcept { setArg(index, TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); }
@@ -503,6 +547,7 @@ struct FuncSignature {
//! to use this function. However, if you are adding arguments based on user input, for example, then either check
//! the number of arguments before using function signature or use \ref canAddArg() before actually adding them to
//! the function signature.
[[nodiscard]]
inline bool canAddArg() const noexcept { return _argCount < Globals::kMaxFuncArgs; }
//! Appends an argument of `type` to the function prototype.
@@ -516,11 +561,16 @@ struct FuncSignature {
inline void addArgT() noexcept { addArg(TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); }
//! Tests whether the function has variable number of arguments (...).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
//! Returns the variable arguments (...) index, `kNoVarArgs` if none.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t vaIndex() const noexcept { return _vaIndex; }
//! Sets the variable arguments (...) index to `index`.
ASMJIT_INLINE_NODEBUG void setVaIndex(uint32_t index) noexcept { _vaIndex = uint8_t(index); }
//! Resets the variable arguments index (making it a non-va function).
ASMJIT_INLINE_NODEBUG void resetVaIndex() noexcept { _vaIndex = kNoVarArgs; }
@@ -620,45 +670,68 @@ struct FuncValue {
//! \endcond
//! Tests whether the `FuncValue` has a flag `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const noexcept { return Support::test(_data, flag); }
//! Adds `flags` to `FuncValue`.
ASMJIT_INLINE_NODEBUG void addFlags(uint32_t flags) noexcept { _data |= flags; }
//! Clears `flags` of `FuncValue`.
ASMJIT_INLINE_NODEBUG void clearFlags(uint32_t flags) noexcept { _data &= ~flags; }
//! Tests whether the value is initialized (i.e. contains a valid data).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _data != 0; }
//! Tests whether the argument is passed by register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReg() const noexcept { return hasFlag(kFlagIsReg); }
//! Tests whether the argument is passed by stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return hasFlag(kFlagIsStack); }
//! Tests whether the argument is passed by register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAssigned() const noexcept { return hasFlag(kFlagIsReg | kFlagIsStack); }
//! Tests whether the argument is passed through a pointer (used by WIN64 to pass XMM|YMM|ZMM).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isIndirect() const noexcept { return hasFlag(kFlagIsIndirect); }
//! Tests whether the argument was already processed (used internally).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDone() const noexcept { return hasFlag(kFlagIsDone); }
//! Returns a register type of the register used to pass function argument or return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegType regType() const noexcept { return RegType((_data & kRegTypeMask) >> kRegTypeShift); }
//! Sets a register type of the register used to pass function argument or return value.
ASMJIT_INLINE_NODEBUG void setRegType(RegType regType) noexcept { _replaceValue(kRegTypeMask, uint32_t(regType) << kRegTypeShift); }
//! Returns a physical id of the register used to pass function argument or return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regId() const noexcept { return (_data & kRegIdMask) >> kRegIdShift; }
//! Sets a physical id of the register used to pass function argument or return value.
ASMJIT_INLINE_NODEBUG void setRegId(uint32_t regId) noexcept { _replaceValue(kRegIdMask, regId << kRegIdShift); }
//! Returns a stack offset of this argument.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return int32_t(_data & kStackOffsetMask) >> kStackOffsetShift; }
//! Sets a stack offset of this argument.
ASMJIT_INLINE_NODEBUG void setStackOffset(int32_t offset) noexcept { _replaceValue(kStackOffsetMask, uint32_t(offset) << kStackOffsetShift); }
//! Tests whether the argument or return value has associated `TypeId`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasTypeId() const noexcept { return Support::test(_data, kTypeIdMask); }
//! Returns a TypeId of this argument or return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return TypeId((_data & kTypeIdMask) >> kTypeIdShift); }
//! Sets a TypeId of this argument or return value.
ASMJIT_INLINE_NODEBUG void setTypeId(TypeId typeId) noexcept { _replaceValue(kTypeIdMask, uint32_t(typeId) << kTypeIdShift); }
@@ -692,6 +765,7 @@ public:
//! \{
//! Calculates how many values are in the pack, checking for non-values from the end.
[[nodiscard]]
inline uint32_t count() const noexcept {
uint32_t n = Globals::kMaxValuePack;
while (n && !_values[n - 1])
@@ -702,8 +776,11 @@ public:
//! Returns values in this value in the pack.
//!
//! \note The returned array has exactly \ref Globals::kMaxValuePack elements.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncValue* values() noexcept { return _values; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncValue* values() const noexcept { return _values; }
//! Resets a value at the given `index` in the pack, which makes it unassigned.
@@ -740,11 +817,14 @@ public:
//! Accesses the value in the pack at the given `index`.
//!
//! \note The maximum index value is `Globals::kMaxValuePack - 1`.
[[nodiscard]]
inline FuncValue& operator[](size_t index) {
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
return _values[index];
}
//! \overload
[[nodiscard]]
inline const FuncValue& operator[](size_t index) const {
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
return _values[index];
@@ -809,7 +889,7 @@ public:
//! \{
//! Function doesn't have a variable number of arguments (`...`).
static constexpr uint8_t kNoVarArgs = 0xFFu;
static inline constexpr uint8_t kNoVarArgs = 0xFFu;
//! \}
@@ -871,52 +951,72 @@ public:
//! \{
//! Returns the function's calling convention, see `CallConv`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const CallConv& callConv() const noexcept { return _callConv; }
//! Returns the associated calling convention flags, see `CallConv::Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CallConvFlags flags() const noexcept { return _callConv.flags(); }
//! Checks whether a CallConv `flag` is set, see `CallConv::Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(CallConvFlags ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); }
//! Tests whether the function has a return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return bool(_rets[0]); }
//! Returns the number of function arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _argCount; }
//! Returns function return values.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncValuePack& retPack() noexcept { return _rets; }
//! Returns function return values.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncValuePack& retPack() const noexcept { return _rets; }
//! Returns a function return value associated with the given `valueIndex`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncValue& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
//! Returns a function return value associated with the given `valueIndex` (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncValue& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
//! Returns function argument packs array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncValuePack* argPacks() noexcept { return _args; }
//! Returns function argument packs array (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncValuePack* argPacks() const noexcept { return _args; }
//! Returns function argument pack at the given `argIndex`.
[[nodiscard]]
inline FuncValuePack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
return _args[argIndex];
}
//! Returns function argument pack at the given `argIndex` (const).
[[nodiscard]]
inline const FuncValuePack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
return _args[argIndex];
}
//! Returns an argument at `valueIndex` from the argument pack at the given `argIndex`.
[[nodiscard]]
inline FuncValue& arg(size_t argIndex, size_t valueIndex = 0) noexcept {
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
return _args[argIndex][valueIndex];
}
//! Returns an argument at `valueIndex` from the argument pack at the given `argIndex` (const).
[[nodiscard]]
inline const FuncValue& arg(size_t argIndex, size_t valueIndex = 0) const noexcept {
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
return _args[argIndex][valueIndex];
@@ -931,28 +1031,43 @@ public:
}
//! Tests whether the function has variable arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
//! Returns an index of a first variable argument.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t vaIndex() const noexcept { return _vaIndex; }
//! Tests whether the function passes one or more argument by stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackArgs() const noexcept { return _argStackSize != 0; }
//! Returns stack size needed for function arguments passed on the stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argStackSize() const noexcept { return _argStackSize; }
//! Returns red zone size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _callConv.redZoneSize(); }
//! Returns spill zone size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _callConv.spillZoneSize(); }
//! Returns natural stack alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _callConv.naturalStackAlignment(); }
//! Returns a mask of all passed registers of the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask passedRegs(RegGroup group) const noexcept { return _callConv.passedRegs(group); }
//! Returns a mask of all preserved registers of the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask preservedRegs(RegGroup group) const noexcept { return _callConv.preservedRegs(group); }
//! Returns a mask of all used registers of the given register `group`.
[[nodiscard]]
inline RegMask usedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _usedRegs[size_t(group)];
@@ -1012,10 +1127,8 @@ public:
//! \name Constants
//! \{
enum : uint32_t {
//! Tag used to inform that some offset is invalid.
kTagInvalidOffset = 0xFFFFFFFFu
};
//! Tag used to inform that some offset is invalid.
static inline constexpr uint32_t kTagInvalidOffset = 0xFFFFFFFFu;
//! \}
@@ -1129,90 +1242,133 @@ public:
//! \{
//! Returns the target architecture of the function frame.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Returns function frame attributes, see `Attributes`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _attributes; }
//! Checks whether the FuncFame contains an attribute `attr`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAttribute(FuncAttributes attr) const noexcept { return Support::test(_attributes, attr); }
//! Adds attributes `attrs` to the FuncFrame.
ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _attributes |= attrs; }
//! Clears attributes `attrs` from the FrameFrame.
ASMJIT_INLINE_NODEBUG void clearAttributes(FuncAttributes attrs) noexcept { _attributes &= ~attrs; }
//! Tests whether the function has variable number of arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return hasAttribute(FuncAttributes::kHasVarArgs); }
//! Sets the variable arguments flag.
ASMJIT_INLINE_NODEBUG void setVarArgs() noexcept { addAttributes(FuncAttributes::kHasVarArgs); }
//! Resets variable arguments flag.
ASMJIT_INLINE_NODEBUG void resetVarArgs() noexcept { clearAttributes(FuncAttributes::kHasVarArgs); }
//! Tests whether the function preserves frame pointer (EBP|ESP on X86).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPreservedFP() const noexcept { return hasAttribute(FuncAttributes::kHasPreservedFP); }
//! Enables preserved frame pointer.
ASMJIT_INLINE_NODEBUG void setPreservedFP() noexcept { addAttributes(FuncAttributes::kHasPreservedFP); }
//! Disables preserved frame pointer.
ASMJIT_INLINE_NODEBUG void resetPreservedFP() noexcept { clearAttributes(FuncAttributes::kHasPreservedFP); }
//! Tests whether the function calls other functions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFuncCalls() const noexcept { return hasAttribute(FuncAttributes::kHasFuncCalls); }
//! Sets `FuncAttributes::kHasFuncCalls` to true.
ASMJIT_INLINE_NODEBUG void setFuncCalls() noexcept { addAttributes(FuncAttributes::kHasFuncCalls); }
//! Sets `FuncAttributes::kHasFuncCalls` to false.
ASMJIT_INLINE_NODEBUG void resetFuncCalls() noexcept { clearAttributes(FuncAttributes::kHasFuncCalls); }
//! Tests whether the function uses indirect branch protection, see \ref FuncAttributes::kIndirectBranchProtection.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasIndirectBranchProtection() const noexcept { return hasAttribute(FuncAttributes::kIndirectBranchProtection); }
//! Enabled indirect branch protection (sets `FuncAttributes::kIndirectBranchProtection` attribute to true).
ASMJIT_INLINE_NODEBUG void setIndirectBranchProtection() noexcept { addAttributes(FuncAttributes::kIndirectBranchProtection); }
//! Disables indirect branch protection (sets `FuncAttributes::kIndirectBranchProtection` attribute to false).
ASMJIT_INLINE_NODEBUG void resetIndirectBranchProtection() noexcept { clearAttributes(FuncAttributes::kIndirectBranchProtection); }
//! Tests whether the function has AVX enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAvxEnabled() const noexcept { return hasAttribute(FuncAttributes::kX86_AVXEnabled); }
//! Enables AVX use.
ASMJIT_INLINE_NODEBUG void setAvxEnabled() noexcept { addAttributes(FuncAttributes::kX86_AVXEnabled); }
//! Disables AVX use.
ASMJIT_INLINE_NODEBUG void resetAvxEnabled() noexcept { clearAttributes(FuncAttributes::kX86_AVXEnabled); }
//! Tests whether the function has AVX-512 enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAvx512Enabled() const noexcept { return hasAttribute(FuncAttributes::kX86_AVX512Enabled); }
//! Enables AVX-512 use.
ASMJIT_INLINE_NODEBUG void setAvx512Enabled() noexcept { addAttributes(FuncAttributes::kX86_AVX512Enabled); }
//! Disables AVX-512 use.
ASMJIT_INLINE_NODEBUG void resetAvx512Enabled() noexcept { clearAttributes(FuncAttributes::kX86_AVX512Enabled); }
//! Tests whether the function has MMX cleanup - 'emms' instruction in epilog.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasMmxCleanup() const noexcept { return hasAttribute(FuncAttributes::kX86_MMXCleanup); }
//! Enables MMX cleanup.
ASMJIT_INLINE_NODEBUG void setMmxCleanup() noexcept { addAttributes(FuncAttributes::kX86_MMXCleanup); }
//! Disables MMX cleanup.
ASMJIT_INLINE_NODEBUG void resetMmxCleanup() noexcept { clearAttributes(FuncAttributes::kX86_MMXCleanup); }
//! Tests whether the function has AVX cleanup - 'vzeroupper' instruction in epilog.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAvxCleanup() const noexcept { return hasAttribute(FuncAttributes::kX86_AVXCleanup); }
//! Enables AVX cleanup.
ASMJIT_INLINE_NODEBUG void setAvxCleanup() noexcept { addAttributes(FuncAttributes::kX86_AVXCleanup); }
//! Disables AVX cleanup.
ASMJIT_INLINE_NODEBUG void resetAvxCleanup() noexcept { clearAttributes(FuncAttributes::kX86_AVXCleanup); }
//! Tests whether the function uses call stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasCallStack() const noexcept { return _callStackSize != 0; }
//! Tests whether the function uses local stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLocalStack() const noexcept { return _localStackSize != 0; }
//! Tests whether vector registers can be saved and restored by using aligned reads and writes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAlignedVecSR() const noexcept { return hasAttribute(FuncAttributes::kAlignedVecSR); }
//! Tests whether the function has to align stack dynamically.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDynamicAlignment() const noexcept { return _finalStackAlignment >= _minDynamicAlignment; }
//! Tests whether the calling convention specifies 'RedZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRedZone() const noexcept { return _redZoneSize != 0; }
//! Tests whether the calling convention specifies 'SpillZone'.
ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
//! Returns the size of 'RedZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _redZoneSize; }
//! Tests whether the calling convention specifies 'SpillZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
//! Returns the size of 'SpillZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
//! Resets the size of red zone, which would disable it entirely.
@@ -1224,20 +1380,31 @@ public:
ASMJIT_INLINE_NODEBUG void resetRedZone() noexcept { _redZoneSize = 0; }
//! Returns natural stack alignment (guaranteed stack alignment upon entry).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; }
//! Returns natural stack alignment (guaranteed stack alignment upon entry).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t minDynamicAlignment() const noexcept { return _minDynamicAlignment; }
//! Tests whether the callee must adjust SP before returning (X86-STDCALL only)
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasCalleeStackCleanup() const noexcept { return _calleeStackCleanup != 0; }
//! Returns home many bytes of the stack the callee must adjust before returning (X86-STDCALL only)
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t calleeStackCleanup() const noexcept { return _calleeStackCleanup; }
//! Returns call stack alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t callStackAlignment() const noexcept { return _callStackAlignment; }
//! Returns local stack alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t localStackAlignment() const noexcept { return _localStackAlignment; }
//! Returns final stack alignment (the maximum value of call, local, and natural stack alignments).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t finalStackAlignment() const noexcept { return _finalStackAlignment; }
//! Sets call stack alignment.
@@ -1273,42 +1440,57 @@ public:
}
//! Returns call stack size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t callStackSize() const noexcept { return _callStackSize; }
//! Returns local stack size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t localStackSize() const noexcept { return _localStackSize; }
//! Sets call stack size.
ASMJIT_INLINE_NODEBUG void setCallStackSize(uint32_t size) noexcept { _callStackSize = size; }
//! Sets local stack size.
ASMJIT_INLINE_NODEBUG void setLocalStackSize(uint32_t size) noexcept { _localStackSize = size; }
//! Combines call stack size with `size`, updating it to the greater value.
ASMJIT_INLINE_NODEBUG void updateCallStackSize(uint32_t size) noexcept { _callStackSize = Support::max(_callStackSize, size); }
//! Combines local stack size with `size`, updating it to the greater value.
ASMJIT_INLINE_NODEBUG void updateLocalStackSize(uint32_t size) noexcept { _localStackSize = Support::max(_localStackSize, size); }
//! Returns final stack size (only valid after the FuncFrame is finalized).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t finalStackSize() const noexcept { return _finalStackSize; }
//! Returns an offset to access the local stack (non-zero only if call stack is used).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t localStackOffset() const noexcept { return _localStackOffset; }
//! Tests whether the function prolog/epilog requires a memory slot for storing unaligned SP.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDAOffset() const noexcept { return _daOffset != kTagInvalidOffset; }
//! Returns a memory offset used to store DA (dynamic alignment) slot (relative to SP).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t daOffset() const noexcept { return _daOffset; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saOffset(uint32_t regId) const noexcept {
return regId == _spRegId ? saOffsetFromSP()
: saOffsetFromSA();
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saOffsetFromSP() const noexcept { return _saOffsetFromSP; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saOffsetFromSA() const noexcept { return _saOffsetFromSA; }
//! Returns mask of registers of the given register `group` that are modified by the function. The engine would
//! then calculate which registers must be saved & restored by the function by using the data provided by the
//! calling convention.
[[nodiscard]]
inline RegMask dirtyRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _dirtyRegs[group];
@@ -1360,61 +1542,82 @@ public:
//! Returns a calculated mask of registers of the given `group` that will be saved and restored in the function's
//! prolog and epilog, respectively. The register mask is calculated from both `dirtyRegs` (provided by user) and
//! `preservedMask` (provided by the calling convention).
[[nodiscard]]
inline RegMask savedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _dirtyRegs[group] & _preservedRegs[group];
}
//! Returns all dirty registers as a Support::Array<> type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegMasks& dirtyRegs() const noexcept { return _dirtyRegs; }
//! Returns all preserved registers as a Support::Array<> type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegMasks& preservedRegs() const noexcept { return _preservedRegs; }
//! Returns the mask of preserved registers of the given register `group`.
//!
//! Preserved registers are those that must survive the function call unmodified. The function can only modify
//! preserved registers it they are saved and restored in function's prolog and epilog, respectively.
[[nodiscard]]
inline RegMask preservedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _preservedRegs[group];
}
//! Returns the size of a save-restore are for the required register `group`.
[[nodiscard]]
inline uint32_t saveRestoreRegSize(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _saveRestoreRegSize[group];
}
//! Returns the alignment that must be guaranteed to save/restore the required register `group`.
[[nodiscard]]
inline uint32_t saveRestoreAlignment(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _saveRestoreAlignment[group];
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saRegId() const noexcept { return _saRegId; }
ASMJIT_INLINE_NODEBUG void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
ASMJIT_INLINE_NODEBUG void resetSARegId() { setSARegId(BaseReg::kIdBad); }
//! Returns stack size required to save/restore registers via push/pop.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t pushPopSaveSize() const noexcept { return _pushPopSaveSize; }
//! Returns an offset to the stack where registers are saved via push/pop.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t pushPopSaveOffset() const noexcept { return _pushPopSaveOffset; }
//! Returns stack size required to save/restore extra registers that don't use push/pop/
//!
//! \note On X86 this covers all registers except GP registers, on other architectures it can be always
//! zero (for example AArch64 saves all registers via push/pop like instructions, so this would be zero).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t extraRegSaveSize() const noexcept { return _extraRegSaveSize; }
//! Returns an offset to the stack where extra registers are saved.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t extraRegSaveOffset() const noexcept { return _extraRegSaveOffset; }
//! Tests whether the functions contains stack adjustment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackAdjustment() const noexcept { return _stackAdjustment != 0; }
//! Returns function's stack adjustment used in function's prolog and epilog.
//!
//! If the returned value is zero it means that the stack is not adjusted. This can mean both that the stack
//! is not used and/or the stack is only adjusted by instructions that pust/pop registers into/from stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t stackAdjustment() const noexcept { return _stackAdjustment; }
//! \}
@@ -1477,30 +1680,41 @@ public:
//! \{
//! Returns the associated \ref FuncDetail of this `FuncArgsAssignment`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail* funcDetail() const noexcept { return _funcDetail; }
//! Associates \ref FuncDetails with this `FuncArgsAssignment`.
ASMJIT_INLINE_NODEBUG void setFuncDetail(const FuncDetail* fd) noexcept { _funcDetail = fd; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saRegId() const noexcept { return _saRegId; }
ASMJIT_INLINE_NODEBUG void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
ASMJIT_INLINE_NODEBUG void resetSARegId() { _saRegId = uint8_t(BaseReg::kIdBad); }
//! Returns assigned argument at `argIndex` and `valueIndex`.
//!
//! \note `argIndex` refers to he function argument and `valueIndex` refers to a value pack (in case multiple
//! values are passed as a single argument).
[[nodiscard]]
inline FuncValue& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
return _argPacks[argIndex][valueIndex];
}
//! \overload
[[nodiscard]]
inline const FuncValue& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
return _argPacks[argIndex][valueIndex];
}
//! Tests whether argument at `argIndex` and `valueIndex` has been assigned.
[[nodiscard]]
inline bool isAssigned(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
return _argPacks[argIndex][valueIndex].isAssigned();

View File

@@ -28,26 +28,29 @@ static inline OperandSignature getSuitableRegForMemToMemMove(Arch arch, TypeId d
uint32_t regSize = Environment::registerSizeFromArch(arch);
OperandSignature signature{0};
if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId)))
if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId))) {
signature = maxSize <= 4 ? archTraits.regTypeToSignature(RegType::kGp32)
: archTraits.regTypeToSignature(RegType::kGp64);
else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64))
}
else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64)) {
signature = archTraits.regTypeToSignature(RegType::kVec64);
else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128))
}
else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128)) {
signature = archTraits.regTypeToSignature(RegType::kVec128);
else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256))
}
else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256)) {
signature = archTraits.regTypeToSignature(RegType::kVec256);
else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512))
}
else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512)) {
signature = archTraits.regTypeToSignature(RegType::kVec512);
}
return signature;
}
class FuncArgsContext {
public:
enum VarId : uint32_t {
kVarIdNone = 0xFF
};
static inline constexpr uint32_t kVarIdNone = 0xFF;
//! Contains information about a single argument or SA register that may need shuffling.
struct Var {
@@ -107,6 +110,7 @@ public:
memset(_physToVarId, kVarIdNone, 32);
}
[[nodiscard]]
inline bool isAssigned(uint32_t regId) const noexcept {
ASMJIT_ASSERT(regId < 32);
return Support::bitTest(_assignedRegs, regId);
@@ -150,11 +154,22 @@ public:
_assignedRegs ^= Support::bitMask(regId);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask archRegs() const noexcept { return _archRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask workRegs() const noexcept { return _workRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask usedRegs() const noexcept { return _usedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask assignedRegs() const noexcept { return _assignedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask dstRegs() const noexcept { return _dstRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
};
@@ -179,13 +194,22 @@ public:
FuncArgsContext() noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ArchTraits& archTraits() const noexcept { return *_archTraits; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t varCount() const noexcept { return _varCount; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Var& var(size_t varId) noexcept { return _vars[varId]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Var& var(size_t varId) const noexcept { return _vars[varId]; }
Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept;

View File

@@ -26,19 +26,19 @@ struct PlacementNew { void* ptr; };
#if defined(ASMJIT_NO_STDCXX)
namespace Support {
ASMJIT_FORCE_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_FORCE_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
} // {Support}
#define ASMJIT_BASE_CLASS(TYPE) \
ASMJIT_FORCE_INLINE void* operator new(size_t n) noexcept { return Support::operatorNew(n); } \
ASMJIT_FORCE_INLINE void operator delete(void* ptr) noexcept { Support::operatorDelete(ptr); } \
ASMJIT_INLINE void* operator new(size_t n) noexcept { return Support::operatorNew(n); } \
ASMJIT_INLINE void operator delete(void* ptr) noexcept { Support::operatorDelete(ptr); } \
\
ASMJIT_FORCE_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \
ASMJIT_FORCE_INLINE void operator delete(void*, void*) noexcept {} \
ASMJIT_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \
ASMJIT_INLINE void operator delete(void*, void*) noexcept {} \
\
ASMJIT_FORCE_INLINE void* operator new(size_t, Support::PlacementNew ptr) noexcept { return ptr.ptr; } \
ASMJIT_FORCE_INLINE void operator delete(void*, Support::PlacementNew) noexcept {}
ASMJIT_INLINE void* operator new(size_t, Support::PlacementNew ptr) noexcept { return ptr.ptr; } \
ASMJIT_INLINE void operator delete(void*, Support::PlacementNew) noexcept {}
#else
#define ASMJIT_BASE_CLASS(TYPE)
#endif
@@ -69,7 +69,7 @@ enum class ResetPolicy : uint32_t {
kHard = 1
};
//! Contains typedefs, constants, and variables used globally by AsmJit.
//! Contains constants and variables used globally across AsmJit.
namespace Globals {
//! Host memory allocator overhead.
@@ -152,7 +152,7 @@ static ASMJIT_INLINE_NODEBUG void* func_as_ptr(Func func) noexcept { return Supp
//! \{
//! AsmJit error type (uint32_t).
typedef uint32_t Error;
using Error = uint32_t;
//! AsmJit error codes.
enum ErrorCode : uint32_t {
@@ -357,9 +357,11 @@ static ASMJIT_INLINE_NODEBUG void unused(Args&&...) noexcept {}
//!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can help with tracing the origin of any
//! error reported / returned by AsmJit.
[[nodiscard]]
static constexpr Error errored(Error err) noexcept { return err; }
//! Returns a printable version of `asmjit::Error` code.
[[nodiscard]]
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! Called to output debugging message(s).
@@ -375,7 +377,8 @@ ASMJIT_API void debugOutput(const char* str) noexcept;
//! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit
//! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out
//! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc...
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
[[noreturn]]
ASMJIT_API void assertionFailed(const char* file, int line, const char* msg) noexcept;
} // {DebugUtils}
@@ -385,9 +388,9 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons
#if defined(ASMJIT_BUILD_DEBUG)
#define ASMJIT_ASSERT(...) \
do { \
if (ASMJIT_LIKELY(__VA_ARGS__)) \
break; \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
if (ASMJIT_UNLIKELY(!(__VA_ARGS__))) { \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
} \
} while (0)
#else
#define ASMJIT_ASSERT(...) ((void)0)
@@ -399,9 +402,10 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons
//! internally, but kept public for users that want to use the same technique to propagate errors to the caller.
#define ASMJIT_PROPAGATE(...) \
do { \
::asmjit::Error _err = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err)) \
return _err; \
::asmjit::Error _err_ = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err_)) { \
return _err_; \
} \
} while (0)
//! \}

View File

@@ -26,7 +26,7 @@ ASMJIT_BEGIN_NAMESPACE
//!
//! - \ref x86::Inst (X86 and X86_64)
//! - \ref a64::Inst (AArch64)
typedef uint32_t InstId;
using InstId = uint32_t;
//! Instruction id parts.
//!
@@ -267,16 +267,21 @@ public:
//! \{
//! Returns the instruction id with modifiers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _id; }
//! Sets the instruction id and modiiers from `id`.
ASMJIT_INLINE_NODEBUG void setId(InstId id) noexcept { _id = id; }
//! Resets the instruction id and modifiers to zero, see \ref kIdNone.
ASMJIT_INLINE_NODEBUG void resetId() noexcept { _id = 0; }
//! Returns a real instruction id that doesn't contain any modifiers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _id & uint32_t(InstIdParts::kRealId); }
template<InstIdParts kPart>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t getInstIdPart() const noexcept {
return (uint32_t(_id) & uint32_t(kPart)) >> Support::ConstCTZ<uint32_t(kPart)>::value;
}
@@ -291,11 +296,24 @@ public:
//! \name Instruction Options
//! \{
//! Returns instruction options associated with this instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _options; }
//! Tests whether the given instruction `option` is enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return Support::test(_options, option); }
//! Replaces all instruction options by the given `options`.
ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _options = options; }
//! Adds instruction options provided by `options`.
ASMJIT_INLINE_NODEBUG void addOptions(InstOptions options) noexcept { _options |= options; }
//! Clears instruction options provided by `options`.
ASMJIT_INLINE_NODEBUG void clearOptions(InstOptions options) noexcept { _options &= ~options; }
//! Resets all instruction options to `InstOptions::kNone` (there will be no instruction options active after reset).
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options = InstOptions::kNone; }
//! \}
@@ -303,11 +321,23 @@ public:
//! \name Extra Register
//! \{
//! Tests whether the instruction has associated an extra register.
//!
//! \note Extra registers are currently only used on X86 by AVX-512 masking such as `{k}` and `{k}{z}` and by repeated
//! instructions to explicitly assign a virtual register that would be ECX/RCX.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _extraReg; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
//! \}
@@ -315,10 +345,15 @@ public:
//! \name ARM Specific
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG arm::CondCode armCondCode() const noexcept { return (arm::CondCode)getInstIdPart<InstIdParts::kARM_Cond>(); }
ASMJIT_INLINE_NODEBUG void setArmCondCode(arm::CondCode cc) noexcept { setInstIdPart<InstIdParts::kARM_Cond>(uint32_t(cc)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG a32::DataType armDt() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT>(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG a32::DataType armDt2() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT2>(); }
//! \}
@@ -326,26 +361,31 @@ public:
//! \name Statics
//! \{
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept {
return id | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(dt2) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT2)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
static ASMJIT_INLINE_NODEBUG constexpr InstId extractRealId(uint32_t id) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId extractRealId(uint32_t id) noexcept {
return id & uint32_t(InstIdParts::kRealId);
}
static ASMJIT_INLINE_NODEBUG constexpr arm::CondCode extractARMCondCode(uint32_t id) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR arm::CondCode extractARMCondCode(uint32_t id) noexcept {
return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
@@ -543,39 +583,56 @@ struct OpRWInfo {
//! \{
//! Returns operand flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OpRWFlags opFlags() const noexcept { return _opFlags; }
//! Tests whether operand flags contain the given `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOpFlag(OpRWFlags flag) const noexcept { return Support::test(_opFlags, flag); }
//! Adds the given `flags` to operand flags.
ASMJIT_INLINE_NODEBUG void addOpFlags(OpRWFlags flags) noexcept { _opFlags |= flags; }
//! Removes the given `flags` from operand flags.
ASMJIT_INLINE_NODEBUG void clearOpFlags(OpRWFlags flags) noexcept { _opFlags &= ~flags; }
//! Tests whether this operand is read from.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasOpFlag(OpRWFlags::kRead); }
//! Tests whether this operand is written to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasOpFlag(OpRWFlags::kWrite); }
//! Tests whether this operand is both read and write.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRW; }
//! Tests whether this operand is read only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRead; }
//! Tests whether this operand is write only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kWrite; }
//! Returns the type of a lead register, which is followed by consecutive registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t consecutiveLeadCount() const noexcept { return _consecutiveLeadCount; }
//! Tests whether this operand is Reg/Mem
//!
//! Reg/Mem operands can use either register or memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRm() const noexcept { return hasOpFlag(OpRWFlags::kRegMem); }
//! Tests whether the operand will be zero extended.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isZExt() const noexcept { return hasOpFlag(OpRWFlags::kZExt); }
//! Tests whether the operand must have allocated a unique physical id that cannot be shared with other register
//! operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasOpFlag(OpRWFlags::kUnique); }
//! \}
@@ -585,37 +642,63 @@ struct OpRWInfo {
//! Tests whether this is a fake memory operand, which is only used, because of encoding. Fake memory operands do
//! not access any memory, they are only used to encode registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemFake() const noexcept { return hasOpFlag(OpRWFlags::kMemFake); }
//! Tests whether the instruction's memory BASE register is used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRW); }
//! Tests whether the instruction reads from its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseRead() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRead); }
//! Tests whether the instruction writes to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseWrite); }
//! Tests whether the instruction reads and writes from/to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; }
//! Tests whether the instruction only reads from its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; }
//! Tests whether the instruction only writes to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; }
//! Tests whether the instruction modifies the BASE register before it uses it to calculate the target address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBasePreModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePreModify); }
//! Tests whether the instruction modifies the BASE register after it uses it to calculate the target address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBasePostModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePostModify); }
//! Tests whether the instruction's memory INDEX register is used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRW); }
//! Tests whether the instruction reads the INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexRead() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRead); }
//! Tests whether the instruction writes to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexWrite); }
//! Tests whether the instruction reads and writes from/to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; }
//! Tests whether the instruction only reads from its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; }
//! Tests whether the instruction only writes to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; }
//! \}
@@ -626,9 +709,13 @@ struct OpRWInfo {
//! Returns a physical id of the register that is fixed for this operand.
//!
//! Returns \ref BaseReg::kIdBad if any register can be used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t physId() const noexcept { return _physId; }
//! Tests whether \ref physId() would return a valid physical register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; }
//! Sets physical register id, which would be fixed for this operand.
ASMJIT_INLINE_NODEBUG void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); }
@@ -638,7 +725,9 @@ struct OpRWInfo {
//! \{
//! Returns Reg/Mem size of the operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; }
//! Sets Reg/Mem size of the operand.
ASMJIT_INLINE_NODEBUG void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); }
@@ -648,16 +737,23 @@ struct OpRWInfo {
//! \{
//! Returns read mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t readByteMask() const noexcept { return _readByteMask; }
//! Returns write mask.
ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; }
//! Returns extend mask.
ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; }
//! Sets read mask.
ASMJIT_INLINE_NODEBUG void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; }
//! Returns write mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; }
//! Sets write mask.
ASMJIT_INLINE_NODEBUG void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; }
//! Returns extend mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; }
//! Sets extend mask.
ASMJIT_INLINE_NODEBUG void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; }
@@ -712,12 +808,15 @@ struct InstRWInfo {
//! \{
//! Returns flags associated with the instruction, see \ref InstRWFlags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstRWFlags instFlags() const noexcept { return _instFlags; }
//! Tests whether the instruction flags contain `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstFlag(InstRWFlags flag) const noexcept { return Support::test(_instFlags, flag); }
//! Tests whether the instruction flags contain \ref InstRWFlags::kMovOp.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMovOp() const noexcept { return hasInstFlag(InstRWFlags::kMovOp); }
//! \}
@@ -726,8 +825,11 @@ struct InstRWInfo {
//! \{
//! Returns a mask of CPU flags read.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuRWFlags readFlags() const noexcept { return _readFlags; }
//! Returns a mask of CPU flags written.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuRWFlags writeFlags() const noexcept { return _writeFlags; }
//! \}
@@ -745,6 +847,7 @@ struct InstRWInfo {
//! Some AVX+ instructions may require extra features for replacing registers with memory operands, for example
//! VPSLLDQ instruction only supports `vpslldq reg, reg, imm` combination on AVX/AVX2 capable CPUs and requires
//! AVX-512 for `vpslldq reg, mem, imm` combination.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmFeature() const noexcept { return _rmFeature; }
//! \}
@@ -753,18 +856,22 @@ struct InstRWInfo {
//! \{
//! Returns RW information of extra register operand (extraReg).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OpRWInfo& extraReg() const noexcept { return _extraReg; }
//! Returns RW information of all instruction's operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OpRWInfo* operands() const noexcept { return _operands; }
//! Returns RW information of the operand at the given `index`.
[[nodiscard]]
inline const OpRWInfo& operand(size_t index) const noexcept {
ASMJIT_ASSERT(index < Globals::kMaxOpCount);
return _operands[index];
}
//! Returns the number of operands this instruction has.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _opCount; }
//! \}
@@ -789,7 +896,7 @@ namespace InstAPI {
//! If `instOptions` is zero then only raw instruction name (without any additional text) will be appended.
ASMJIT_API Error instIdToString(Arch arch, InstId instId, InstStringifyOptions options, String& output) noexcept;
ASMJIT_DEPRECATED("Use `instIdToString()` with `InstStringifyOptions` parameter")
[[deprecated("Use `instIdToString()` with `InstStringifyOptions` parameter")]]
static inline Error instIdToString(Arch arch, InstId instId, String& output) noexcept {
return instIdToString(arch, instId, InstStringifyOptions::kNone, output);
}
@@ -798,11 +905,13 @@ static inline Error instIdToString(Arch arch, InstId instId, String& output) noe
//! `SIZE_MAX` if `s` is known to be null terminated.
//!
//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such instruction exists.
[[nodiscard]]
ASMJIT_API InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
//! Validates the given instruction considering the given `validationFlags`.
[[nodiscard]]
ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags = ValidationFlags::kNone) noexcept;
#endif // !ASMJIT_NO_VALIDATION

View File

@@ -12,12 +12,12 @@ namespace InstNameUtils {
static constexpr uint32_t kBufferSize = 32;
static ASMJIT_FORCE_INLINE char decode5BitChar(uint32_t c) noexcept {
static ASMJIT_INLINE_CONSTEXPR char decode5BitChar(uint32_t c) noexcept {
uint32_t base = c <= 26 ? uint32_t('a') - 1u : uint32_t('0') - 27u;
return char(base + c);
}
static ASMJIT_FORCE_INLINE size_t decodeToBuffer(char nameOut[kBufferSize], uint32_t nameValue, InstStringifyOptions options, const char* stringTable) noexcept {
static ASMJIT_INLINE size_t decodeToBuffer(char nameOut[kBufferSize], uint32_t nameValue, InstStringifyOptions options, const char* stringTable) noexcept {
size_t i;
if (nameValue & 0x80000000u) {

View File

@@ -57,22 +57,22 @@ public:
size_t _end;
T _bitWord;
enum : uint32_t { kBitWordSize = Support::bitSizeOf<T>() };
enum : T { kXorMask = B == 0 ? Support::allOnes<T>() : T(0) };
static inline constexpr uint32_t kBitWordSize = Support::bitSizeOf<T>();
static inline constexpr T kXorMask = B == 0 ? Support::allOnes<T>() : T(0);
ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept {
ASMJIT_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept {
init(data, numBitWords);
}
ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
ASMJIT_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
init(data, numBitWords, start, end);
}
ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords) noexcept {
ASMJIT_INLINE void init(const T* data, size_t numBitWords) noexcept {
init(data, numBitWords, 0, numBitWords * kBitWordSize);
}
ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
ASMJIT_ASSERT(numBitWords >= (end + kBitWordSize - 1) / kBitWordSize);
DebugUtils::unused(numBitWords);
@@ -80,8 +80,9 @@ public:
const T* ptr = data + (idx / kBitWordSize);
T bitWord = 0;
if (idx < end)
if (idx < end) {
bitWord = (*ptr ^ kXorMask) & (Support::allOnes<T>() << (start % kBitWordSize));
}
_ptr = ptr;
_idx = idx;
@@ -89,12 +90,13 @@ public:
_bitWord = bitWord;
}
ASMJIT_FORCE_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits<size_t>::max()) noexcept {
ASMJIT_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits<size_t>::max()) noexcept {
// Skip all empty BitWords.
while (_bitWord == 0) {
_idx += kBitWordSize;
if (_idx >= _end)
if (_idx >= _end) {
return false;
}
_bitWord = (*++_ptr) ^ kXorMask;
}
@@ -107,8 +109,9 @@ public:
*rangeEnd = Support::min(_idx + kBitWordSize, _end);
while (*rangeEnd - *rangeStart < rangeHint) {
_idx += kBitWordSize;
if (_idx >= _end)
if (_idx >= _end) {
break;
}
_bitWord = (*++_ptr) ^ kXorMask;
if (_bitWord != Support::allOnes<T>()) {
@@ -248,7 +251,8 @@ public:
uint32_t blockFlags,
Support::BitWord* usedBitVector,
Support::BitWord* stopBitVector,
uint32_t areaSize) noexcept
uint32_t areaSize
) noexcept
: ZoneTreeNodeT(),
_pool(pool),
_mapping(mapping),
@@ -265,29 +269,51 @@ public:
clearBlock();
}
[[nodiscard]]
inline JitAllocatorPool* pool() const noexcept { return _pool; }
[[nodiscard]]
inline uint8_t* rxPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rx); }
[[nodiscard]]
inline uint8_t* rwPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rw); }
[[nodiscard]]
inline bool hasFlag(uint32_t f) const noexcept { return (_flags & f) != 0; }
inline void addFlags(uint32_t f) noexcept { _flags |= f; }
inline void clearFlags(uint32_t f) noexcept { _flags &= ~f; }
[[nodiscard]]
inline bool empty() const noexcept { return hasFlag(kFlagEmpty); }
[[nodiscard]]
inline bool isDirty() const noexcept { return hasFlag(kFlagDirty); }
inline void makeDirty() noexcept { addFlags(kFlagDirty); }
[[nodiscard]]
inline bool hasLargePages() const noexcept { return hasFlag(kFlagLargePages); }
[[nodiscard]]
inline bool hasInitialPadding() const noexcept { return hasFlag(kFlagInitialPadding); }
[[nodiscard]]
inline uint32_t initialAreaStart() const noexcept { return initialAreaStartByFlags(_flags); }
[[nodiscard]]
inline size_t blockSize() const noexcept { return _blockSize; }
[[nodiscard]]
inline uint32_t areaSize() const noexcept { return _areaSize; }
[[nodiscard]]
inline uint32_t areaUsed() const noexcept { return _areaUsed; }
[[nodiscard]]
inline uint32_t areaAvailable() const noexcept { return _areaSize - _areaUsed; }
[[nodiscard]]
inline uint32_t largestUnusedArea() const noexcept { return _largestUnusedArea; }
inline void decreaseUsedArea(uint32_t value) noexcept {
@@ -334,10 +360,13 @@ public:
clearFlags(kFlagDirty | kFlagEmpty);
}
else {
if (_searchStart == allocatedAreaStart)
if (_searchStart == allocatedAreaStart) {
_searchStart = allocatedAreaEnd;
if (_searchEnd == allocatedAreaEnd)
}
if (_searchEnd == allocatedAreaEnd) {
_searchEnd = allocatedAreaStart;
}
addFlags(kFlagDirty);
clearFlags(kFlagEmpty);
@@ -447,33 +476,40 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::
// Setup pool count to [1..3].
size_t poolCount = 1;
if (Support::test(options, JitAllocatorOptions::kUseMultiplePools))
if (Support::test(options, JitAllocatorOptions::kUseMultiplePools)) {
poolCount = kJitAllocatorMultiPoolCount;
}
// Setup block size [64kB..256MB].
if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize))
if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize)) {
blockSize = vmInfo.pageGranularity;
}
// Setup granularity [64..256].
if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity))
if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity)) {
granularity = kJitAllocatorBaseGranularity;
}
// Setup fill-pattern.
if (uint32_t(options & JitAllocatorOptions::kCustomFillPattern) == 0)
if (uint32_t(options & JitAllocatorOptions::kCustomFillPattern) == 0) {
fillPattern = JitAllocator_defaultFillPattern();
}
size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount;
void* p = ::malloc(size);
if (ASMJIT_UNLIKELY(!p))
if (ASMJIT_UNLIKELY(!p)) {
return nullptr;
}
VirtMem::HardenedRuntimeInfo hardenedRtInfo = VirtMem::hardenedRuntimeInfo();
if (Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kEnabled)) {
// If we are running within a hardened environment (mapping RWX is not allowed) then we have to use dual mapping
// or other runtime capabilities like Apple specific MAP_JIT. There is no point in not enabling these as otherwise
// the allocation would fail and JitAllocator would not be able to allocate memory.
if (!Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kMapJit))
if (!Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kMapJit)) {
options |= JitAllocatorOptions::kUseDualMapping;
}
}
JitAllocatorPool* pools = reinterpret_cast<JitAllocatorPool*>((uint8_t*)p + sizeof(JitAllocatorPrivateImpl));
@@ -485,8 +521,9 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::
impl->fillPattern = fillPattern;
impl->pageSize = vmInfo.pageSize;
for (size_t poolId = 0; poolId < poolCount; poolId++)
for (size_t poolId = 0; poolId < poolCount; poolId++) {
new(Support::PlacementNew{&pools[poolId]}) JitAllocatorPool(granularity << poolId);
}
return impl;
}
@@ -501,8 +538,9 @@ static inline size_t JitAllocatorImpl_sizeToPoolId(const JitAllocatorPrivateImpl
size_t granularity = size_t(impl->granularity) << poolId;
while (poolId) {
if (Support::alignUp(size, granularity) == size)
if (Support::alignUp(size, granularity) == size) {
break;
}
poolId--;
granularity >>= 1;
}
@@ -522,18 +560,21 @@ static inline size_t JitAllocatorImpl_calculateIdealBlockSize(JitAllocatorPrivat
// We have to increase the allocationSize if we know that the block must provide padding.
if (!Support::test(impl->options, JitAllocatorOptions::kDisableInitialPadding)) {
size_t granularity = pool->granularity;
if (SIZE_MAX - allocationSize < granularity)
if (SIZE_MAX - allocationSize < granularity) {
return 0; // Overflown
}
allocationSize += granularity;
}
if (blockSize < kJitAllocatorMaxBlockSize)
if (blockSize < kJitAllocatorMaxBlockSize) {
blockSize *= 2u;
}
if (allocationSize > blockSize) {
blockSize = Support::alignUp(allocationSize, impl->blockSize);
if (ASMJIT_UNLIKELY(blockSize < allocationSize))
if (ASMJIT_UNLIKELY(blockSize < allocationSize)) {
return 0; // Overflown.
}
}
return blockSize;
@@ -562,8 +603,9 @@ ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t
uint32_t* mem32 = static_cast<uint32_t*>(mem);
size_t n = byteSize / 4u;
for (size_t i = 0; i < n; i++)
for (size_t i = 0; i < n; i++) {
mem32[i] = pattern;
}
}
// Allocate a new `JitAllocatorBlock` for the given `blockSize`.
@@ -619,10 +661,12 @@ static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocat
// Out of memory...
if (ASMJIT_UNLIKELY(blockPtr == nullptr)) {
if (Support::test(impl->options, JitAllocatorOptions::kUseDualMapping))
VirtMem::releaseDualMapping(&virtMem, blockSize);
else
VirtMem::release(virtMem.rx, blockSize);
if (Support::test(impl->options, JitAllocatorOptions::kUseDualMapping)) {
(void)VirtMem::releaseDualMapping(&virtMem, blockSize);
}
else {
(void)VirtMem::release(virtMem.rx, blockSize);
}
return DebugUtils::errored(kErrorOutOfMemory);
}
@@ -640,10 +684,12 @@ static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocat
static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
DebugUtils::unused(impl);
if (block->hasFlag(JitAllocatorBlock::kFlagDualMapped))
VirtMem::releaseDualMapping(&block->_mapping, block->blockSize());
else
VirtMem::release(block->rxPtr(), block->blockSize());
if (block->hasFlag(JitAllocatorBlock::kFlagDualMapped)) {
(void)VirtMem::releaseDualMapping(&block->_mapping, block->blockSize());
}
else {
(void)VirtMem::release(block->rxPtr(), block->blockSize());
}
::free(block);
}
@@ -651,8 +697,9 @@ static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAlloc
static void JitAllocatorImpl_insertBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
JitAllocatorPool* pool = block->pool();
if (!pool->cursor)
if (!pool->cursor) {
pool->cursor = block;
}
// Add to RBTree and List.
impl->tree.insert(block);
@@ -670,8 +717,9 @@ static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAlloc
JitAllocatorPool* pool = block->pool();
// Remove from RBTree and List.
if (pool->cursor == block)
if (pool->cursor == block) {
pool->cursor = block->hasPrev() ? block->prev() : block->next();
}
impl->tree.remove(block);
pool->blocks.unlink(block);
@@ -685,8 +733,9 @@ static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAlloc
}
static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
if (block->hasFlag(JitAllocatorBlock::kFlagEmpty))
if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) {
return;
}
JitAllocatorPool* pool = block->pool();
if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
@@ -717,13 +766,15 @@ static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllo
JitAllocator::JitAllocator(const CreateParams* params) noexcept {
_impl = JitAllocatorImpl_new(params);
if (ASMJIT_UNLIKELY(!_impl))
if (ASMJIT_UNLIKELY(!_impl)) {
_impl = const_cast<JitAllocator::Impl*>(&JitAllocatorImpl_none);
}
}
JitAllocator::~JitAllocator() noexcept {
if (_impl == &JitAllocatorImpl_none)
if (_impl == &JitAllocatorImpl_none) {
return;
}
reset(ResetPolicy::kHard);
JitAllocatorImpl_destroy(static_cast<JitAllocatorPrivateImpl*>(_impl));
@@ -733,8 +784,9 @@ JitAllocator::~JitAllocator() noexcept {
// ====================
void JitAllocator::reset(ResetPolicy resetPolicy) noexcept {
if (_impl == &JitAllocatorImpl_none)
if (_impl == &JitAllocatorImpl_none) {
return;
}
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
impl->tree.reset();
@@ -802,19 +854,22 @@ JitAllocator::Statistics JitAllocator::statistics() const noexcept {
Error JitAllocator::alloc(Span& out, size_t size) noexcept {
out = Span{};
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) {
return DebugUtils::errored(kErrorNotInitialized);
}
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
constexpr uint32_t kNoIndex = std::numeric_limits<uint32_t>::max();
// Align to the minimum granularity by default.
size = Support::alignUp<size_t>(size, impl->granularity);
if (ASMJIT_UNLIKELY(size == 0))
if (ASMJIT_UNLIKELY(size == 0)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(size > std::numeric_limits<uint32_t>::max() / 2))
if (ASMJIT_UNLIKELY(size > std::numeric_limits<uint32_t>::max() / 2)) {
return DebugUtils::errored(kErrorTooLarge);
}
LockGuard guard(impl->lock);
JitAllocatorPool* pool = &impl->pools[JitAllocatorImpl_sizeToPoolId(impl, size)];
@@ -849,8 +904,9 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept {
largestArea = Support::max(largestArea, rangeSize);
}
if (areaIndex != kNoIndex)
if (areaIndex != kNoIndex) {
break;
}
if (searchStart != SIZE_MAX) {
// Because we have iterated over the entire block, we can now mark the
@@ -872,8 +928,9 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept {
// Allocate a new block if there is no region of a required size.
if (areaIndex == kNoIndex) {
size_t blockSize = JitAllocatorImpl_calculateIdealBlockSize(impl, pool, size);
if (ASMJIT_UNLIKELY(!blockSize))
if (ASMJIT_UNLIKELY(!blockSize)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
ASMJIT_PROPAGATE(JitAllocatorImpl_newBlock(impl, &block, pool, blockSize));
areaIndex = block->initialAreaStart();
@@ -904,18 +961,21 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept {
}
Error JitAllocator::release(void* rx) noexcept {
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(!rx))
if (ASMJIT_UNLIKELY(!rx)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
LockGuard guard(impl->lock);
JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rx));
if (ASMJIT_UNLIKELY(!block))
if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorInvalidState);
}
// Offset relative to the start of the block.
JitAllocatorPool* pool = block->pool();
@@ -954,8 +1014,9 @@ Error JitAllocator::release(void* rx) noexcept {
static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator::Span& span, size_t newSize, bool alreadyUnderWriteScope) noexcept {
JitAllocatorBlock* block = static_cast<JitAllocatorBlock*>(span._block);
if (ASMJIT_UNLIKELY(!block))
if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
LockGuard guard(impl->lock);
@@ -968,16 +1029,18 @@ static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator
// Don't trust `span.size()` - if it has been already truncated we would be off...
bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart);
if (ASMJIT_UNLIKELY(!isUsed))
if (ASMJIT_UNLIKELY(!isUsed)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1;
uint32_t areaPrevSize = areaEnd - areaStart;
uint32_t spanPrevSize = areaPrevSize * pool->granularity;
uint32_t areaShrunkSize = pool->areaSizeFromByteSize(newSize);
if (ASMJIT_UNLIKELY(areaShrunkSize > areaPrevSize))
if (ASMJIT_UNLIKELY(areaShrunkSize > areaPrevSize)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
uint32_t areaDiff = areaPrevSize - areaShrunkSize;
if (areaDiff) {
@@ -1003,11 +1066,13 @@ static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator
}
Error JitAllocator::shrink(Span& span, size_t newSize) noexcept {
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(!span.rx()))
if (ASMJIT_UNLIKELY(!span.rx())) {
return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(newSize == 0)) {
Error err = release(span.rx());
@@ -1021,15 +1086,17 @@ Error JitAllocator::shrink(Span& span, size_t newSize) noexcept {
Error JitAllocator::query(Span& out, void* rx) const noexcept {
out = Span{};
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) {
return DebugUtils::errored(kErrorNotInitialized);
}
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
LockGuard guard(impl->lock);
JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rx));
if (ASMJIT_UNLIKELY(!block))
if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
// Offset relative to the start of the block.
JitAllocatorPool* pool = block->pool();
@@ -1039,8 +1106,9 @@ Error JitAllocator::query(Span& out, void* rx) const noexcept {
uint32_t areaStart = uint32_t(offset >> pool->granularityLog2);
bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart);
if (ASMJIT_UNLIKELY(!isUsed))
if (ASMJIT_UNLIKELY(!isUsed)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1;
size_t byteOffset = pool->byteSizeFromAreaSize(areaStart);
@@ -1057,22 +1125,27 @@ Error JitAllocator::query(Span& out, void* rx) const noexcept {
// JitAllocator - Write
// ====================
static ASMJIT_FORCE_INLINE VirtMem::CachePolicy JitAllocator_defaultPolicyForSpan(const JitAllocator::Span& span) noexcept {
if (Support::test(span.flags(), JitAllocator::Span::Flags::kInstructionCacheClean))
static ASMJIT_INLINE VirtMem::CachePolicy JitAllocator_defaultPolicyForSpan(const JitAllocator::Span& span) noexcept {
if (Support::test(span.flags(), JitAllocator::Span::Flags::kInstructionCacheClean)) {
return VirtMem::CachePolicy::kNeverFlush;
else
}
else {
return VirtMem::CachePolicy::kFlushAfterWrite;
}
}
Error JitAllocator::write(Span& span, size_t offset, const void* src, size_t size, VirtMem::CachePolicy policy) noexcept {
if (ASMJIT_UNLIKELY(span._block == nullptr || offset > span.size() || span.size() - offset < size))
if (ASMJIT_UNLIKELY(span._block == nullptr || offset > span.size() || span.size() - offset < size)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(size == 0))
if (ASMJIT_UNLIKELY(size == 0)) {
return kErrorOk;
}
if (policy == VirtMem::CachePolicy::kDefault)
if (policy == VirtMem::CachePolicy::kDefault) {
policy = JitAllocator_defaultPolicyForSpan(span);
}
VirtMem::ProtectJitReadWriteScope writeScope(span.rx(), span.size(), policy);
memcpy(static_cast<uint8_t*>(span.rw()) + offset, src, size);
@@ -1080,15 +1153,18 @@ Error JitAllocator::write(Span& span, size_t offset, const void* src, size_t siz
}
Error JitAllocator::write(Span& span, WriteFunc writeFunc, void* userData, VirtMem::CachePolicy policy) noexcept {
if (ASMJIT_UNLIKELY(span._block == nullptr) || span.size() == 0)
if (ASMJIT_UNLIKELY(span._block == nullptr) || span.size() == 0) {
return DebugUtils::errored(kErrorInvalidArgument);
}
size_t size = span.size();
if (ASMJIT_UNLIKELY(size == 0))
if (ASMJIT_UNLIKELY(size == 0)) {
return kErrorOk;
}
if (policy == VirtMem::CachePolicy::kDefault)
if (policy == VirtMem::CachePolicy::kDefault) {
policy = JitAllocator_defaultPolicyForSpan(span);
}
VirtMem::ProtectJitReadWriteScope writeScope(span.rx(), span.size(), policy);
ASMJIT_PROPAGATE(writeFunc(span, userData));
@@ -1113,30 +1189,34 @@ Error JitAllocator::beginWriteScope(WriteScopeData& scope, VirtMem::CachePolicy
}
Error JitAllocator::endWriteScope(WriteScopeData& scope) noexcept {
if (ASMJIT_UNLIKELY(!scope._allocator))
if (ASMJIT_UNLIKELY(!scope._allocator)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
return kErrorOk;
}
Error JitAllocator::flushWriteScope(WriteScopeData& scope) noexcept {
if (ASMJIT_UNLIKELY(!scope._allocator))
if (ASMJIT_UNLIKELY(!scope._allocator)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
return kErrorOk;
}
Error JitAllocator::scopedWrite(WriteScopeData& scope, Span& span, size_t offset, const void* src, size_t size) noexcept {
if (ASMJIT_UNLIKELY(!scope._allocator))
if (ASMJIT_UNLIKELY(!scope._allocator)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
VirtMem::CachePolicy policy = VirtMem::CachePolicy(scope._data[0]);
return scope._allocator->write(span, offset, src, size, policy);
}
Error JitAllocator::scopedWrite(WriteScopeData& scope, Span& span, WriteFunc writeFunc, void* userData) noexcept {
if (ASMJIT_UNLIKELY(!scope._allocator))
if (ASMJIT_UNLIKELY(!scope._allocator)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
VirtMem::CachePolicy policy = VirtMem::CachePolicy(scope._data[0]);
return scope._allocator->write(span, writeFunc, userData, policy);
@@ -1334,8 +1414,9 @@ public:
Record* record = _records.get(static_cast<uint8_t*>(p));
EXPECT_NOT_NULL(record);
if (!newSize)
if (!newSize) {
return release(p);
}
JitAllocator::Span span;
EXPECT_EQ(_allocator.query(span, p), kErrorOk);
@@ -1375,10 +1456,12 @@ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcep
BitVectorRangeIterator<T, Bit> it(in, kPatternSize);
size_t rangeStart, rangeEnd;
while (it.nextRange(&rangeStart, &rangeEnd)) {
if (Bit)
if (Bit) {
Support::bitVectorFill(out, rangeStart, rangeEnd - rangeStart);
else
}
else {
Support::bitVectorClear(out, rangeStart, rangeEnd - rangeStart);
}
}
}
@@ -1463,80 +1546,94 @@ static void test_jit_allocator_alloc_release() noexcept {
// Random blocks tests...
INFO(" Allocating random blocks...");
for (i = 0; i < kCount; i++)
for (i = 0; i < kCount; i++) {
ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Releasing all allocated blocks from the beginning...");
for (i = 0; i < kCount; i++)
for (i = 0; i < kCount; i++) {
wrapper.release(ptrArray[i]);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating random blocks again...", kCount);
for (i = 0; i < kCount; i++)
for (i = 0; i < kCount; i++) {
ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Shuffling allocated blocks...");
JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng);
INFO(" Releasing 50%% of allocated blocks...");
for (i = 0; i < kCount / 2; i++)
for (i = 0; i < kCount / 2; i++) {
wrapper.release(ptrArray[i]);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating 50%% more blocks again...");
for (i = 0; i < kCount / 2; i++)
for (i = 0; i < kCount / 2; i++) {
ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Releasing all allocated blocks from the end...");
for (i = 0; i < kCount; i++)
for (i = 0; i < kCount; i++) {
wrapper.release(ptrArray[kCount - i - 1]);
}
JitAllocatorTest_usage(wrapper._allocator);
// Fixed blocks tests...
INFO(" Allocating %zuB blocks...", fixedBlockSize);
for (i = 0; i < kCount / 2; i++)
for (i = 0; i < kCount / 2; i++) {
ptrArray[i] = wrapper.alloc(fixedBlockSize);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Shrinking each %zuB block to 1 byte", fixedBlockSize);
for (i = 0; i < kCount / 2; i++)
for (i = 0; i < kCount / 2; i++) {
wrapper.shrink(ptrArray[i], 1);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating more 64B blocks...", 64);
for (i = kCount / 2; i < kCount; i++)
for (i = kCount / 2; i < kCount; i++) {
ptrArray[i] = wrapper.alloc(64);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Releasing all blocks from the beginning...");
for (i = 0; i < kCount; i++)
for (i = 0; i < kCount; i++) {
wrapper.release(ptrArray[i]);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating %zuB blocks...", fixedBlockSize);
for (i = 0; i < kCount; i++)
for (i = 0; i < kCount; i++) {
ptrArray[i] = wrapper.alloc(fixedBlockSize);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Shuffling allocated blocks...");
JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng);
INFO(" Releasing 50%% of allocated blocks...");
for (i = 0; i < kCount / 2; i++)
for (i = 0; i < kCount / 2; i++) {
wrapper.release(ptrArray[i]);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating 50%% more %zuB blocks again...", fixedBlockSize);
for (i = 0; i < kCount / 2; i++)
for (i = 0; i < kCount / 2; i++) {
ptrArray[i] = wrapper.alloc(fixedBlockSize);
}
JitAllocatorTest_usage(wrapper._allocator);
INFO(" Releasing all allocated blocks from the end...");
for (i = 0; i < kCount; i++)
for (i = 0; i < kCount; i++) {
wrapper.release(ptrArray[kCount - i - 1]);
}
JitAllocatorTest_usage(wrapper._allocator);
::free(ptrArray);

View File

@@ -182,6 +182,7 @@ public:
//! Destroys the `JitAllocator` instance and release all blocks held.
ASMJIT_API ~JitAllocator() noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _impl->blockSize == 0; }
//! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
@@ -196,15 +197,23 @@ public:
//! \{
//! Returns allocator options, see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocatorOptions options() const noexcept { return _impl->options; }
//! Tests whether the allocator has the given `option` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(JitAllocatorOptions option) const noexcept { return uint32_t(_impl->options & option) != 0; }
//! Returns a base block size (a minimum size of block that the allocator would allocate).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t blockSize() const noexcept { return _impl->blockSize; }
//! Returns granularity of the allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t granularity() const noexcept { return _impl->granularity; }
//! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
//! \}
@@ -265,6 +274,7 @@ public:
//! Returns a pointer having Read & Execute permissions (references executable memory).
//!
//! This pointer is never NULL if the allocation succeeded, it points to an executable memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* rx() const noexcept { return _rx; }
//! Returns a pointer having Read & Write permissions (references writable memory).
@@ -284,12 +294,15 @@ public:
//!
//! If \ref VirtMem::ProtectJitReadWriteScope is not used it's important to clear the instruction cache via
//! \ref VirtMem::flushInstructionCache() after the write is done.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* rw() const noexcept { return _rw; }
//! Returns size of this span, aligned to the allocator granularity.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns span flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Flags flags() const noexcept { return _flags; }
//! Shrinks this span to `newSize`.
@@ -300,12 +313,14 @@ public:
ASMJIT_INLINE_NODEBUG void shrink(size_t newSize) noexcept { _size = Support::min(_size, newSize); }
//! Returns whether \ref rw() returns a non-null pointer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDirectlyWritable() const noexcept { return _rw != nullptr; }
//! \}
};
//! Allocates a new memory span of the requested `size`.
[[nodiscard]]
ASMJIT_API Error alloc(Span& out, size_t size) noexcept;
//! Releases a memory block returned by `alloc()`.
@@ -321,6 +336,7 @@ public:
//! Queries information about an allocated memory block that contains the given `rx`, and writes it to `out`.
//!
//! If the pointer is matched, the function returns `kErrorOk` and fills `out` with the corresponding span.
[[nodiscard]]
ASMJIT_API Error query(Span& out, void* rx) const noexcept;
//! \}
@@ -328,7 +344,7 @@ public:
//! \name Write Operations
//! \{
typedef Error (ASMJIT_CDECL* WriteFunc)(Span& span, void* userData) ASMJIT_NOEXCEPT_TYPE;
using WriteFunc = Error (ASMJIT_CDECL*)(Span& span, void* userData) noexcept;
ASMJIT_API Error write(
Span& span,
@@ -344,7 +360,7 @@ public:
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
template<class Lambda>
ASMJIT_FORCE_INLINE Error write(
ASMJIT_INLINE Error write(
Span& span,
Lambda&& lambdaFunc,
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept {
@@ -445,7 +461,12 @@ public:
//! \name Accessors
//! \{
//! Returns \ref JitAllocator associated with this write scope.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return _allocator; }
//! Returns cache policy this write scope is using.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtMem::CachePolicy policy() const noexcept { return _policy; }
//! \}
@@ -499,27 +520,40 @@ public:
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Statistics{}; }
//! Returns count of blocks managed by `JitAllocator` at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t blockCount() const noexcept { return _blockCount; }
//! Returns the number of active allocations.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t allocationCount() const noexcept { return _allocationCount; }
//! Returns how many bytes are currently used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t usedSize() const noexcept { return _usedSize; }
//! Returns the number of bytes unused by the allocator at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
//! Returns the total number of bytes reserved by the allocator (sum of sizes of all blocks).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t reservedSize() const noexcept { return _reservedSize; }
//! Returns the number of bytes the allocator needs to manage the allocated memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t overheadSize() const noexcept { return _overheadSize; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double usedSizeAsPercent() const noexcept {
return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double unusedSizeAsPercent() const noexcept {
return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double overheadSizeAsPercent() const noexcept {
return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
@@ -528,6 +562,7 @@ public:
//! Returns JIT allocator statistics.
//!
//! \remarks This function is thread-safe.
[[nodiscard]]
ASMJIT_API Statistics statistics() const noexcept;
//! \}

View File

@@ -27,8 +27,9 @@ Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(code->resolveUnresolvedLinks());
size_t estimatedCodeSize = code->codeSize();
if (ASMJIT_UNLIKELY(estimatedCodeSize == 0))
if (ASMJIT_UNLIKELY(estimatedCodeSize == 0)) {
return DebugUtils::errored(kErrorNoCodeGenerated);
}
JitAllocator::Span span;
ASMJIT_PROPAGATE(_allocator.alloc(span, estimatedCodeSize));

View File

@@ -59,6 +59,7 @@ public:
//! \{
//! Returns the associated `JitAllocator`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return const_cast<JitAllocator*>(&_allocator); }
//! \}

View File

@@ -52,11 +52,13 @@ FileLogger::FileLogger(FILE* file) noexcept
FileLogger::~FileLogger() noexcept {}
Error FileLogger::_log(const char* data, size_t size) noexcept {
if (!_file)
if (!_file) {
return kErrorOk;
}
if (size == SIZE_MAX)
if (size == SIZE_MAX) {
size = strlen(data);
}
fwrite(data, 1, size, _file);
return kErrorOk;

View File

@@ -47,36 +47,53 @@ public:
//! \{
//! Returns \ref FormatOptions of this logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatOptions& options() noexcept { return _options; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FormatOptions& options() const noexcept { return _options; }
//! Sets formatting options of this Logger to `options`.
ASMJIT_INLINE_NODEBUG void setOptions(const FormatOptions& options) noexcept { _options = options; }
//! Resets formatting options of this Logger to defaults.
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options.reset(); }
//! Returns formatting flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _options.flags(); }
//! Tests whether the logger has the given `flag` enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return _options.hasFlag(flag); }
//! Sets formatting flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _options.setFlags(flags); }
//! Enables the given formatting `flags`.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _options.addFlags(flags); }
//! Disables the given formatting `flags`.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _options.clearFlags(flags); }
//! Returns indentation of a given indentation `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t indentation(FormatIndentationGroup type) const noexcept { return _options.indentation(type); }
//! Sets indentation of the given indentation `group` to `n` spaces.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup type, uint32_t n) noexcept { _options.setIndentation(type, n); }
//! Resets indentation of the given indentation `group` to 0 spaces.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup type) noexcept { _options.resetIndentation(type); }
//! Returns padding of a given padding `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup type) const noexcept { return _options.padding(type); }
//! Sets padding of a given padding `group` to `n`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup type, uint32_t n) noexcept { _options.setPadding(type, n); }
//! Resets padding of a given padding `group` to 0, which means that a default will be used.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup type) noexcept { _options.resetPadding(type); }
@@ -127,6 +144,7 @@ public:
//! \{
//! Returns the logging output stream or null if the logger has no output stream.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FILE* file() const noexcept { return _file; }
//! Sets the logging output stream to `stream` or null.
@@ -165,15 +183,21 @@ public:
//! Returns the content of the logger as \ref String.
//!
//! It can be moved, if desired.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG String& content() noexcept { return _content; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const String& content() const noexcept { return _content; }
//! Returns aggregated logger data as `char*` pointer.
//!
//! The pointer is owned by `StringLogger`, it can't be modified or freed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _content.data(); }
//! Returns size of the data returned by `data()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return _content.size(); }
//! \}

File diff suppressed because it is too large Load Diff

View File

@@ -17,8 +17,9 @@ ASMJIT_BEGIN_NAMESPACE
#if !defined(_WIN32)
Error OSUtils::readFile(const char* name, String& dst, size_t maxSize) noexcept {
char* buffer = dst.prepare(String::ModifyOp::kAssign, maxSize);
if (ASMJIT_UNLIKELY(!buffer))
if (ASMJIT_UNLIKELY(!buffer)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
int fd = ASMJIT_FILE64_API(::open)(name, O_RDONLY);
if (fd < 0) {

View File

@@ -35,7 +35,7 @@ public:
Handle _handle;
#pragma pack(pop)
#elif !defined(__EMSCRIPTEN__)
typedef pthread_mutex_t Handle;
using Handle = pthread_mutex_t;
Handle _handle;
#endif

View File

@@ -66,6 +66,7 @@ public:
//! PhysReg to WorkReg mapping.
uint32_t workIds[1 /* ... */];
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG size_t sizeOf(size_t count) noexcept {
return sizeof(PhysToWorkMap) - sizeof(uint32_t) + count * sizeof(uint32_t);
}
@@ -74,8 +75,9 @@ public:
assigned.reset();
dirty.reset();
for (size_t i = 0; i < count; i++)
for (size_t i = 0; i < count; i++) {
workIds[i] = kWorkNone;
}
}
inline void copyFrom(const PhysToWorkMap* other, size_t count) noexcept {
@@ -94,19 +96,22 @@ public:
//! WorkReg to PhysReg mapping
uint8_t physIds[1 /* ... */];
[[nodiscard]]
static inline size_t sizeOf(size_t count) noexcept {
return size_t(count) * sizeof(uint8_t);
}
inline void reset(size_t count) noexcept {
for (size_t i = 0; i < count; i++)
for (size_t i = 0; i < count; i++) {
physIds[i] = kPhysNone;
}
}
inline void copyFrom(const WorkToPhysMap* other, size_t count) noexcept {
size_t size = sizeOf(count);
if (ASMJIT_LIKELY(size))
if (ASMJIT_LIKELY(size)) {
memcpy(this, other, size);
}
}
};
@@ -132,7 +137,7 @@ public:
resetMaps();
}
ASMJIT_FORCE_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
ASMJIT_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
// Layout must be initialized before data.
ASMJIT_ASSERT(_physToWorkMap == nullptr);
ASMJIT_ASSERT(_workToPhysMap == nullptr);
@@ -145,14 +150,15 @@ public:
_layout.workRegs = &workRegs;
}
ASMJIT_FORCE_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
ASMJIT_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
_physToWorkMap = physToWorkMap;
_workToPhysMap = workToPhysMap;
for (RegGroup group : RegGroupVirtValues{})
for (RegGroup group : RegGroupVirtValues{}) {
_physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group);
}
}
ASMJIT_FORCE_INLINE void resetMaps() noexcept {
ASMJIT_INLINE void resetMaps() noexcept {
_physToWorkMap = nullptr;
_workToPhysMap = nullptr;
_physToWorkIds.fill(nullptr);
@@ -163,17 +169,31 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RARegMask& assigned() noexcept { return _physToWorkMap->assigned; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t assigned(RegGroup group) const noexcept { return _physToWorkMap->assigned[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RARegMask& dirty() noexcept { return _physToWorkMap->dirty; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask dirty(RegGroup group) const noexcept { return _physToWorkMap->dirty[group]; }
[[nodiscard]]
inline uint32_t workToPhysId(RegGroup group, uint32_t workId) const noexcept {
DebugUtils::unused(group);
ASMJIT_ASSERT(workId != kWorkNone);
@@ -181,16 +201,19 @@ public:
return _workToPhysMap->physIds[workId];
}
[[nodiscard]]
inline uint32_t physToWorkId(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return _physToWorkIds[group][physId];
}
[[nodiscard]]
inline bool isPhysAssigned(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->assigned[group], physId);
}
[[nodiscard]]
inline bool isPhysDirty(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->dirty[group], physId);
@@ -304,7 +327,7 @@ public:
//! \name Utilities
//! \{
ASMJIT_FORCE_INLINE void swap(RAAssignment& other) noexcept {
ASMJIT_INLINE void swap(RAAssignment& other) noexcept {
std::swap(_workToPhysMap, other._workToPhysMap);
std::swap(_physToWorkMap, other._physToWorkMap);
_physToWorkIds.swap(other._physToWorkIds);
@@ -342,6 +365,7 @@ public:
}
// Not really useful outside of debugging.
[[nodiscard]]
bool equals(const RAAssignment& other) const noexcept {
// Layout should always match.
if (_layout.physIndex != other._layout.physIndex ||
@@ -357,15 +381,17 @@ public:
for (uint32_t physId = 0; physId < physTotal; physId++) {
uint32_t thisWorkId = _physToWorkMap->workIds[physId];
uint32_t otherWorkId = other._physToWorkMap->workIds[physId];
if (thisWorkId != otherWorkId)
if (thisWorkId != otherWorkId) {
return false;
}
}
for (uint32_t workId = 0; workId < workCount; workId++) {
uint32_t thisPhysId = _workToPhysMap->physIds[workId];
uint32_t otherPhysId = other._workToPhysMap->physIds[workId];
if (thisPhysId != otherPhysId)
if (thisPhysId != otherPhysId) {
return false;
}
}
if (_physToWorkMap->assigned != other._physToWorkMap->assigned ||

View File

@@ -21,15 +21,18 @@ ASMJIT_BEGIN_NAMESPACE
template<typename This>
class RACFGBuilderT {
public:
enum : uint32_t {
kRootIndentation = 2,
kCodeIndentation = 4,
//! \name Constants
//! \{
// NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and
// `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these
// we just use their position that is [at that time] unassigned.
kNodePositionDidOnBefore = 0xFFFFFFFFu
};
static inline constexpr uint32_t kRootIndentation = 2;
static inline constexpr uint32_t kCodeIndentation = 4;
// NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and
// `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these
// we just use their position that is [at that time] unassigned.
static inline constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu;
//! \}
//! \name Members
//! \{
@@ -60,17 +63,20 @@ public:
_cc(pass->cc()) {
#ifndef ASMJIT_NO_LOGGING
_logger = _pass->hasDiagnosticOption(DiagnosticOptions::kRADebugCFG) ? _pass->logger() : nullptr;
if (_logger)
if (_logger) {
_formatOptions = _logger->options();
}
#endif
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return _cc; }
//! \name Run
//! \{
//! Called per function by an architecture-specific CFG builder.
[[nodiscard]]
Error run() noexcept {
log("[BuildCFG]\n");
ASMJIT_PROPAGATE(prepare());
@@ -289,8 +295,9 @@ public:
_hasCode = false;
_blockRegStats.reset();
if (_curBlock->isConstructed())
if (_curBlock->isConstructed()) {
break;
}
ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock));
logBlock(_curBlock, kRootIndentation);
@@ -417,16 +424,18 @@ public:
if (node->type() == NodeType::kSentinel) {
if (node == _funcNode->endNode()) {
// Make sure we didn't flow here if this is the end of the function sentinel.
if (ASMJIT_UNLIKELY(_curBlock && _hasCode))
if (ASMJIT_UNLIKELY(_curBlock && _hasCode)) {
return DebugUtils::errored(kErrorInvalidState);
}
break;
}
}
else if (node->type() == NodeType::kFunc) {
// RAPass can only compile a single function at a time. If we
// encountered a function it must be the current one, bail if not.
if (ASMJIT_UNLIKELY(node != _funcNode))
if (ASMJIT_UNLIKELY(node != _funcNode)) {
return DebugUtils::errored(kErrorInvalidState);
}
// PASS if this is the first node.
}
else {
@@ -440,15 +449,18 @@ public:
// NOTE: We cannot encounter a NULL node, because every function must be terminated by a sentinel (`stop`)
// node. If we encountered a NULL node it means that something went wrong and this node list is corrupted;
// bail in such case.
if (ASMJIT_UNLIKELY(!node))
if (ASMJIT_UNLIKELY(!node)) {
return DebugUtils::errored(kErrorInvalidState);
}
}
if (_pass->hasDanglingBlocks())
if (_pass->hasDanglingBlocks()) {
return DebugUtils::errored(kErrorInvalidState);
}
for (RABlock* block : blocksWithUnknownJumps)
handleBlockWithUnknownJump(block);
for (RABlock* block : blocksWithUnknownJumps) {
ASMJIT_PROPAGATE(handleBlockWithUnknownJump(block));
}
return _pass->initSharedAssignments(_sharedAssignmentsMap);
}
@@ -459,6 +471,7 @@ public:
//! \{
//! Prepares the CFG builder of the current function.
[[nodiscard]]
Error prepare() noexcept {
FuncNode* func = _pass->func();
BaseNode* node = nullptr;
@@ -508,6 +521,7 @@ public:
//!
//! If we encounter such block we basically insert all existing blocks as successors except the function entry
//! block and a natural successor, if such block exists.
[[nodiscard]]
Error handleBlockWithUnknownJump(RABlock* block) noexcept {
RABlocks& blocks = _pass->blocks();
size_t blockCount = blocks.size();
@@ -517,40 +531,48 @@ public:
RABlock* consecutive = block->consecutive();
for (size_t i = 1; i < blockCount; i++) {
RABlock* candidate = blocks[i];
if (candidate == consecutive || !candidate->isTargetable())
if (candidate == consecutive || !candidate->isTargetable()) {
continue;
block->appendSuccessor(candidate);
}
ASMJIT_PROPAGATE(block->appendSuccessor(candidate));
}
return shareAssignmentAcrossSuccessors(block);
}
[[nodiscard]]
Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept {
if (block->successors().size() <= 1)
if (block->successors().size() <= 1) {
return kErrorOk;
}
RABlock* consecutive = block->consecutive();
uint32_t sharedAssignmentId = Globals::kInvalidId;
for (RABlock* successor : block->successors()) {
if (successor == consecutive)
if (successor == consecutive) {
continue;
}
if (successor->hasSharedAssignmentId()) {
if (sharedAssignmentId == Globals::kInvalidId)
if (sharedAssignmentId == Globals::kInvalidId) {
sharedAssignmentId = successor->sharedAssignmentId();
else
}
else {
_sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId;
}
}
else {
if (sharedAssignmentId == Globals::kInvalidId)
if (sharedAssignmentId == Globals::kInvalidId) {
ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId));
}
successor->setSharedAssignmentId(sharedAssignmentId);
}
}
return kErrorOk;
}
[[nodiscard]]
Error newSharedAssignmentId(uint32_t* out) noexcept {
uint32_t id = _sharedAssignmentsMap.size();
ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id));
@@ -567,18 +589,21 @@ public:
#ifndef ASMJIT_NO_LOGGING
template<typename... Args>
inline void log(const char* fmt, Args&&... args) noexcept {
if (_logger)
if (_logger) {
_logger->logf(fmt, std::forward<Args>(args)...);
}
}
inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
if (_logger)
if (_logger) {
_logBlock(block, indentation);
}
}
inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
if (_logger)
if (_logger) {
_logNode(node, indentation, action);
}
}
void _logBlock(RABlock* block, uint32_t indentation) noexcept {

View File

@@ -44,8 +44,8 @@ class RABlock;
class BaseNode;
struct RAStackSlot;
typedef ZoneVector<RABlock*> RABlocks;
typedef ZoneVector<RAWorkReg*> RAWorkRegs;
using RABlocks = ZoneVector<RABlock*>;
using RAWorkRegs = ZoneVector<RAWorkReg*>;
//! Maximum number of consecutive registers aggregated from all supported backends.
static constexpr uint32_t kMaxConsecutiveRegs = 4;
@@ -60,6 +60,7 @@ public:
//! \}
[[nodiscard]]
ASMJIT_NOINLINE Error init(Arch arch) noexcept {
switch (arch) {
case Arch::kX86:
@@ -85,6 +86,7 @@ public:
}
}
[[nodiscard]]
inline RegMask availableRegs(RegGroup group) const noexcept { return _availableRegs[group]; }
};
@@ -120,14 +122,23 @@ struct RAStrategy {
_flags = RAStrategyFlags::kNone;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAStrategyType type() const noexcept { return _type; }
ASMJIT_INLINE_NODEBUG void setType(RAStrategyType type) noexcept { _type = type; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isSimple() const noexcept { return _type == RAStrategyType::kSimple; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isComplex() const noexcept { return _type >= RAStrategyType::kComplex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAStrategyFlags flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RAStrategyFlags flag) const noexcept { return Support::test(_flags, flag); }
ASMJIT_INLINE_NODEBUG void addFlags(RAStrategyFlags flags) noexcept { _flags |= flags; }
//! \}
@@ -160,17 +171,22 @@ struct RARegCount {
//! \name Overloaded Operators
//! \{
[[nodiscard]]
inline uint8_t& operator[](RegGroup group) noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _regs[size_t(group)];
}
[[nodiscard]]
inline const uint8_t& operator[](RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _regs[size_t(group)];
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const RARegCount& other) const noexcept { return _packed == other._packed; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const RARegCount& other) const noexcept { return _packed != other._packed; }
//! \}
@@ -179,6 +195,7 @@ struct RARegCount {
//! \{
//! Returns the count of registers by the given register `group`.
[[nodiscard]]
inline uint32_t get(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
@@ -210,7 +227,7 @@ struct RARegCount {
//! Provides mapping that can be used to fast index architecture register groups.
struct RARegIndex : public RARegCount {
//! Build register indexes based on the given `count` of registers.
ASMJIT_FORCE_INLINE void buildIndexes(const RARegCount& count) noexcept {
ASMJIT_INLINE void buildIndexes(const RARegCount& count) noexcept {
uint32_t x = uint32_t(count._regs[0]);
uint32_t y = uint32_t(count._regs[1]) + x;
uint32_t z = uint32_t(count._regs[2]) + y;
@@ -253,13 +270,18 @@ struct RARegMask {
//! \name Overloaded Operators
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const RARegMask& other) const noexcept { return _masks == other._masks; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const RARegMask& other) const noexcept { return _masks != other._masks; }
template<typename Index>
[[nodiscard]]
inline uint32_t& operator[](const Index& index) noexcept { return _masks[index]; }
template<typename Index>
[[nodiscard]]
inline const uint32_t& operator[](const Index& index) const noexcept { return _masks[index]; }
//! \}
@@ -268,10 +290,12 @@ struct RARegMask {
//! \{
//! Tests whether all register masks are zero (empty).
[[nodiscard]]
inline bool empty() const noexcept {
return _masks.aggregate<Support::Or>() == 0;
}
[[nodiscard]]
inline bool has(RegGroup group, RegMask mask = 0xFFFFFFFFu) const noexcept {
return (_masks[group] & mask) != 0;
}
@@ -329,16 +353,28 @@ public:
ASMJIT_INLINE_NODEBUG void reset() noexcept { _packed = 0; }
ASMJIT_INLINE_NODEBUG void combineWith(const RARegsStats& other) noexcept { _packed |= other._packed; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUsed() const noexcept { return (_packed & kMaskUsed) != 0u; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUsed(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexUsed + uint32_t(group))) != 0u; }
ASMJIT_INLINE_NODEBUG void makeUsed(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexUsed + uint32_t(group)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFixed() const noexcept { return (_packed & kMaskFixed) != 0u; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFixed(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexFixed + uint32_t(group))) != 0u; }
ASMJIT_INLINE_NODEBUG void makeFixed(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexFixed + uint32_t(group)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasClobbered() const noexcept { return (_packed & kMaskClobbered) != 0u; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasClobbered(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexClobbered + uint32_t(group))) != 0u; }
ASMJIT_INLINE_NODEBUG void makeClobbered(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexClobbered + uint32_t(group)); }
//! \}
@@ -370,8 +406,11 @@ public:
ASMJIT_INLINE_NODEBUG RALiveCount& operator=(const RALiveCount& other) noexcept = default;
inline uint32_t& operator[](RegGroup group) noexcept { return n[group]; }
inline const uint32_t& operator[](RegGroup group) const noexcept { return n[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t& operator[](RegGroup group) noexcept { return n[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint32_t& operator[](RegGroup group) const noexcept { return n[group]; }
//! \}
@@ -388,10 +427,8 @@ struct RALiveInterval {
//! \name Constants
//! \{
enum : uint32_t {
kNaN = 0,
kInf = 0xFFFFFFFFu
};
static inline constexpr uint32_t kNaN = 0;
static inline constexpr uint32_t kInf = 0xFFFFFFFFu;
//! \}
@@ -428,7 +465,10 @@ struct RALiveInterval {
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isValid() const noexcept { return a < b; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t width() const noexcept { return b - a; }
//! \}
@@ -441,7 +481,7 @@ public:
//! \name Types
//! \{
typedef T DataType;
using DataType = T;
//! \}
@@ -488,9 +528,20 @@ class RALiveSpans {
public:
ASMJIT_NONCOPYABLE(RALiveSpans)
typedef typename T::DataType DataType;
//! \name Types
//! \{
using DataType = typename T::DataType;
//! \}
//! \name Members
//! \{
ZoneVector<T> _data;
//! \}
//! \name Construction & Destruction
//! \{
@@ -504,12 +555,19 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _data.empty(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _data.size(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* data() noexcept { return _data.data(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return _data.data(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOpen() const noexcept {
uint32_t size = _data.size();
return size > 0 && _data[size - 1].b == RALiveInterval::kInf;
@@ -523,12 +581,12 @@ public:
ASMJIT_INLINE_NODEBUG void swap(RALiveSpans<T>& other) noexcept { _data.swap(other._data); }
//! Open the current live span.
ASMJIT_FORCE_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end) noexcept {
ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end) noexcept {
bool wasOpen;
return openAt(allocator, start, end, wasOpen);
}
ASMJIT_FORCE_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end, bool& wasOpen) noexcept {
ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end, bool& wasOpen) noexcept {
uint32_t size = _data.size();
wasOpen = false;
@@ -544,7 +602,7 @@ public:
return _data.append(allocator, T(start, end));
}
ASMJIT_FORCE_INLINE void closeAt(uint32_t end) noexcept {
ASMJIT_INLINE void closeAt(uint32_t end) noexcept {
ASMJIT_ASSERT(!empty());
uint32_t size = _data.size();
@@ -561,14 +619,19 @@ public:
return width;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T& operator[](uint32_t index) noexcept { return _data[index]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T& operator[](uint32_t index) const noexcept { return _data[index]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool intersects(const RALiveSpans<T>& other) const noexcept {
return intersects(*this, other);
}
ASMJIT_FORCE_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans<T>& x, const RALiveSpans<T>& y, const DataType& yData) noexcept {
[[nodiscard]]
ASMJIT_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans<T>& x, const RALiveSpans<T>& y, const DataType& yData) noexcept {
uint32_t finalSize = x.size() + y.size();
ASMJIT_PROPAGATE(_data.growingReserve(allocator, finalSize));
@@ -588,21 +651,24 @@ public:
while (ySpan->b <= xa) {
dstPtr->init(*ySpan, yData);
dstPtr++;
if (++ySpan == yEnd)
if (++ySpan == yEnd) {
goto Done;
}
}
ya = ySpan->a;
while (xSpan->b <= ya) {
*dstPtr++ = *xSpan;
if (++xSpan == xEnd)
if (++xSpan == xEnd) {
goto Done;
}
}
// We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`.
xa = xSpan->a;
if (ySpan->b > xa)
if (ySpan->b > xa) {
return 0xFFFFFFFFu;
}
}
}
@@ -621,7 +687,8 @@ public:
return kErrorOk;
}
static ASMJIT_FORCE_INLINE bool intersects(const RALiveSpans<T>& x, const RALiveSpans<T>& y) noexcept {
[[nodiscard]]
static ASMJIT_INLINE bool intersects(const RALiveSpans<T>& x, const RALiveSpans<T>& y) noexcept {
const T* xSpan = x.data();
const T* ySpan = y.data();
@@ -630,26 +697,32 @@ public:
// Loop until we have intersection or either `xSpan == xEnd` or `ySpan == yEnd`, which means that there is no
// intersection. We advance either `xSpan` or `ySpan` depending on their end positions.
if (xSpan == xEnd || ySpan == yEnd)
if (xSpan == xEnd || ySpan == yEnd) {
return false;
}
uint32_t xa, ya;
xa = xSpan->a;
for (;;) {
while (ySpan->b <= xa)
if (++ySpan == yEnd)
while (ySpan->b <= xa) {
if (++ySpan == yEnd) {
return false;
}
}
ya = ySpan->a;
while (xSpan->b <= ya)
if (++xSpan == xEnd)
while (xSpan->b <= ya) {
if (++xSpan == xEnd) {
return false;
}
}
// We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`.
xa = xSpan->a;
if (ySpan->b > xa)
if (ySpan->b > xa) {
return true;
}
}
}
@@ -666,8 +739,13 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t width() const noexcept { return _width; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG float freq() const noexcept { return _freq; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG float priority() const noexcept { return _priority; }
//! \}
@@ -681,12 +759,15 @@ struct LiveRegData {
ASMJIT_INLINE_NODEBUG void init(const LiveRegData& other) noexcept { id = other.id; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const LiveRegData& other) const noexcept { return id == other.id; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const LiveRegData& other) const noexcept { return id != other.id; }
};
typedef RALiveSpan<LiveRegData> LiveRegSpan;
typedef RALiveSpans<LiveRegSpan> LiveRegSpans;
using LiveRegSpan = RALiveSpan<LiveRegData>;
using LiveRegSpans = RALiveSpans<LiveRegSpan>;
//! Flags used by \ref RATiedReg.
//!
@@ -875,53 +956,86 @@ struct RATiedReg {
//! \{
//! Returns the associated WorkReg id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t workId() const noexcept { return _workId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasConsecutiveParent() const noexcept { return _consecutiveParent != Globals::kInvalidId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t consecutiveParent() const noexcept { return _consecutiveParent; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t consecutiveData() const noexcept { return consecutiveDataFromFlags(_flags); }
//! Returns TiedReg flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedFlags flags() const noexcept { return _flags; }
//! Checks if the given `flag` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Adds tied register flags.
ASMJIT_INLINE_NODEBUG void addFlags(RATiedFlags flags) noexcept { _flags |= flags; }
//! Tests whether the register is read (writes `true` also if it's Read/Write).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasFlag(RATiedFlags::kRead); }
//! Tests whether the register is written (writes `true` also if it's Read/Write).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasFlag(RATiedFlags::kWrite); }
//! Tests whether the register is read only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kRead; }
//! Tests whether the register is write only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kWrite; }
//! Tests whether the register is read and written.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kRW; }
//! Tests whether the tied register has use operand (Read/ReadWrite).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUse() const noexcept { return hasFlag(RATiedFlags::kUse); }
//! Tests whether the tied register has out operand (Write).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOut() const noexcept { return hasFlag(RATiedFlags::kOut); }
//! Tests whether the tied register has \ref RATiedFlags::kLeadConsecutive flag set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLeadConsecutive() const noexcept { return hasFlag(RATiedFlags::kLeadConsecutive); }
//! Tests whether the tied register has \ref RATiedFlags::kUseConsecutive flag set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUseConsecutive() const noexcept { return hasFlag(RATiedFlags::kUseConsecutive); }
//! Tests whether the tied register has \ref RATiedFlags::kOutConsecutive flag set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOutConsecutive() const noexcept { return hasFlag(RATiedFlags::kOutConsecutive); }
//! Tests whether the tied register must be unique (cannot be allocated to any other allocated register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasFlag(RATiedFlags::kUnique); }
//! Tests whether the tied register has any consecutive flag.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAnyConsecutiveFlag() const noexcept { return hasFlag(RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive); }
//! Tests whether the USE slot can be patched to memory operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUseRM() const noexcept { return hasFlag(RATiedFlags::kUseRM); }
//! Tests whether the OUT slot can be patched to memory operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOutRM() const noexcept { return hasFlag(RATiedFlags::kOutRM); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; }
inline void makeReadOnly() noexcept {
@@ -937,46 +1051,70 @@ struct RATiedReg {
}
//! Tests whether the register would duplicate.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDuplicate() const noexcept { return hasFlag(RATiedFlags::kDuplicate); }
//! Tests whether the register (and the instruction it's part of) appears last in the basic block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLast() const noexcept { return hasFlag(RATiedFlags::kLast); }
//! Tests whether the register should be killed after USEd and/or OUTed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isKill() const noexcept { return hasFlag(RATiedFlags::kKill); }
//! Tests whether the register is OUT or KILL (used internally by local register allocator).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOutOrKill() const noexcept { return hasFlag(RATiedFlags::kOut | RATiedFlags::kKill); }
//! Returns a register mask that describes allocable USE registers (Read/ReadWrite access).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask useRegMask() const noexcept { return _useRegMask; }
//! Returns a register mask that describes allocable OUT registers (WriteOnly access).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask outRegMask() const noexcept { return _outRegMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t refCount() const noexcept { return _refCount; }
ASMJIT_INLINE_NODEBUG void addRefCount(uint32_t n = 1) noexcept { _refCount = uint8_t(_refCount + n); }
//! Tests whether the register must be allocated to a fixed physical register before it's used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUseId() const noexcept { return _useId != BaseReg::kIdBad; }
//! Tests whether the register must be allocated to a fixed physical register before it's written.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOutId() const noexcept { return _outId != BaseReg::kIdBad; }
//! Returns a physical register id used for 'use' operation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t useId() const noexcept { return _useId; }
//! Returns a physical register id used for 'out' operation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t outId() const noexcept { return _outId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t useRewriteMask() const noexcept { return _useRewriteMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t outRewriteMask() const noexcept { return _outRewriteMask; }
//! Sets a physical register used for 'use' operation.
ASMJIT_INLINE_NODEBUG void setUseId(uint32_t index) noexcept { _useId = uint8_t(index); }
//! Sets a physical register used for 'out' operation.
ASMJIT_INLINE_NODEBUG void setOutId(uint32_t index) noexcept { _outId = uint8_t(index); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUseDone() const noexcept { return hasFlag(RATiedFlags::kUseDone); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOutDone() const noexcept { return hasFlag(RATiedFlags::kOutDone); }
ASMJIT_INLINE_NODEBUG void markUseDone() noexcept { addFlags(RATiedFlags::kUseDone); }
ASMJIT_INLINE_NODEBUG void markOutDone() noexcept { addFlags(RATiedFlags::kOutDone); }
//! \}
@@ -1024,13 +1162,8 @@ public:
//! \name Constants
//! \{
enum : uint32_t {
kIdNone = 0xFFFFFFFFu
};
enum : uint32_t {
kNoArgIndex = 0xFFu
};
static inline constexpr uint32_t kIdNone = 0xFFFFFFFFu;
static inline constexpr uint32_t kNoArgIndex = 0xFFu;
//! \}
@@ -1128,22 +1261,38 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t workId() const noexcept { return _workId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t virtId() const noexcept { return _virtId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _virtReg->name(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _virtReg->nameSize(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _virtReg->typeId(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkRegFlags flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RAWorkRegFlags flag) const noexcept { return Support::test(_flags, flag); }
ASMJIT_INLINE_NODEBUG void addFlags(RAWorkRegFlags flags) noexcept { _flags |= flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return hasFlag(RAWorkRegFlags::kAllocated); }
ASMJIT_INLINE_NODEBUG void markAllocated() noexcept { addFlags(RAWorkRegFlags::kAllocated); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWithinSingleBasicBlock() const noexcept { return !hasFlag(RAWorkRegFlags::kMultipleBasicBlocks); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t singleBasicBlockId() const noexcept { return _singleBasicBlockId; }
//! Called when this register appeared in a basic block having `blockId`.
@@ -1160,43 +1309,77 @@ public:
addFlags(RAWorkRegFlags::kMultipleBasicBlocks);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLeadConsecutive() const noexcept { return hasFlag(RAWorkRegFlags::kLeadConsecutive); }
ASMJIT_INLINE_NODEBUG void markLeadConsecutive() noexcept { addFlags(RAWorkRegFlags::kLeadConsecutive); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isProcessedConsecutive() const noexcept { return hasFlag(RAWorkRegFlags::kProcessedConsecutive); }
ASMJIT_INLINE_NODEBUG void markProcessedConsecutive() noexcept { addFlags(RAWorkRegFlags::kProcessedConsecutive); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStackUsed() const noexcept { return hasFlag(RAWorkRegFlags::kStackUsed); }
ASMJIT_INLINE_NODEBUG void markStackUsed() noexcept { addFlags(RAWorkRegFlags::kStackUsed); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStackPreferred() const noexcept { return hasFlag(RAWorkRegFlags::kStackPreferred); }
ASMJIT_INLINE_NODEBUG void markStackPreferred() noexcept { addFlags(RAWorkRegFlags::kStackPreferred); }
//! Tests whether this RAWorkReg has been coalesced with another one (cannot be used anymore).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isCoalesced() const noexcept { return hasFlag(RAWorkRegFlags::kCoalesced); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtReg() const noexcept { return _virtReg; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasTiedReg() const noexcept { return _tiedReg != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedReg() const noexcept { return _tiedReg; }
ASMJIT_INLINE_NODEBUG void setTiedReg(RATiedReg* tiedReg) noexcept { _tiedReg = tiedReg; }
ASMJIT_INLINE_NODEBUG void resetTiedReg() noexcept { _tiedReg = nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return _stackSlot != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAStackSlot* stackSlot() const noexcept { return _stackSlot; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LiveRegSpans& liveSpans() noexcept { return _liveSpans; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const LiveRegSpans& liveSpans() const noexcept { return _liveSpans; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RALiveStats& liveStats() noexcept { return _liveStats; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RALiveStats& liveStats() const noexcept { return _liveStats; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasArgIndex() const noexcept { return _argIndex != kNoArgIndex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argIndex() const noexcept { return _argIndex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argValueIndex() const noexcept { return _argValueIndex; }
inline void setArgIndex(uint32_t argIndex, uint32_t valueIndex) noexcept {
@@ -1204,39 +1387,71 @@ public:
_argValueIndex = uint8_t(valueIndex);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasHomeRegId() const noexcept { return _homeRegId != BaseReg::kIdBad; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t homeRegId() const noexcept { return _homeRegId; }
ASMJIT_INLINE_NODEBUG void setHomeRegId(uint32_t physId) noexcept { _homeRegId = uint8_t(physId); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasHintRegId() const noexcept { return _hintRegId != BaseReg::kIdBad; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hintRegId() const noexcept { return _hintRegId; }
ASMJIT_INLINE_NODEBUG void setHintRegId(uint32_t physId) noexcept { _hintRegId = uint8_t(physId); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask useIdMask() const noexcept { return _useIdMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUseIdMask() const noexcept { return _useIdMask != 0u; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasMultipleUseIds() const noexcept { return _useIdMask != 0u && !Support::isPowerOf2(_useIdMask); }
ASMJIT_INLINE_NODEBUG void addUseIdMask(RegMask mask) noexcept { _useIdMask |= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask preferredMask() const noexcept { return _preferredMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPreferredMask() const noexcept { return _preferredMask != 0xFFFFFFFFu; }
ASMJIT_INLINE_NODEBUG void restrictPreferredMask(RegMask mask) noexcept { _preferredMask &= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask consecutiveMask() const noexcept { return _consecutiveMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasConsecutiveMask() const noexcept { return _consecutiveMask != 0xFFFFFFFFu; }
ASMJIT_INLINE_NODEBUG void restrictConsecutiveMask(RegMask mask) noexcept { _consecutiveMask &= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask clobberSurvivalMask() const noexcept { return _clobberSurvivalMask; }
ASMJIT_INLINE_NODEBUG void addClobberSurvivalMask(RegMask mask) noexcept { _clobberSurvivalMask |= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask allocatedMask() const noexcept { return _allocatedMask; }
ASMJIT_INLINE_NODEBUG void addAllocatedMask(RegMask mask) noexcept { _allocatedMask |= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t regByteMask() const noexcept { return _regByteMask; }
ASMJIT_INLINE_NODEBUG void setRegByteMask(uint64_t mask) noexcept { _regByteMask = mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasImmediateConsecutives() const noexcept { return !_immediateConsecutives.empty(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& immediateConsecutives() const noexcept { return _immediateConsecutives; }
[[nodiscard]]
inline Error addImmediateConsecutive(ZoneAllocator* allocator, uint32_t workId) noexcept {
if (_immediateConsecutives.size() <= workId)
ASMJIT_PROPAGATE(_immediateConsecutives.resize(allocator, workId + 1));

View File

@@ -14,7 +14,7 @@ ASMJIT_BEGIN_NAMESPACE
// RALocalAllocator - Utilities
// ============================
static ASMJIT_FORCE_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept {
static ASMJIT_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept {
for (size_t i = 0; i < count; i++)
if (tiedRegs[i].workId() == workId)
return &tiedRegs[i];
@@ -65,24 +65,28 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
// Unassigned argument.
const RegOnly& regArg = func->argPack(argIndex)[valueIndex];
if (!regArg.isReg() || !_cc->isVirtIdValid(regArg.id()))
if (!regArg.isReg() || !_cc->isVirtIdValid(regArg.id())) {
continue;
}
VirtReg* virtReg = _cc->virtRegById(regArg.id());
// Unreferenced argument.
RAWorkReg* workReg = virtReg->workReg();
if (!workReg)
if (!workReg) {
continue;
}
// Overwritten argument.
uint32_t workId = workReg->workId();
if (!liveIn.bitAt(workId))
if (!liveIn.bitAt(workId)) {
continue;
}
RegGroup group = workReg->group();
if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone)
if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone) {
continue;
}
RegMask allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group);
if (iter == 0) {
@@ -109,8 +113,9 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
// This register will definitely need stack, create the slot now and assign also `argIndex`
// to it. We will patch `_argsAssignment` later after RAStackAllocator finishes.
RAStackSlot* slot = _pass->getOrCreateStackSlot(workReg);
if (ASMJIT_UNLIKELY(!slot))
if (ASMJIT_UNLIKELY(!slot)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
// This means STACK_ARG may be moved to STACK.
workReg->addFlags(RAWorkRegFlags::kStackArgToStack);
@@ -182,8 +187,9 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons
while (affectedRegs) {
if (++runId == 2) {
if (!tryMode)
if (!tryMode) {
return DebugUtils::errored(kErrorInvalidState);
}
// Stop in `tryMode` if we haven't done anything in past two rounds.
break;
@@ -204,12 +210,14 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons
// Both assigned.
if (curWorkId != dstWorkId) {
// Wait a bit if this is the first run, we may avoid this if `curWorkId` moves out.
if (runId <= 0)
if (runId <= 0) {
continue;
}
uint32_t altPhysId = cur.workToPhysId(group, dstWorkId);
if (altPhysId == RAAssignment::kPhysNone)
if (altPhysId == RAAssignment::kPhysNone) {
continue;
}
// Reset as we will do some changes to the current assignment.
runId = -1;
@@ -220,14 +228,15 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons
else {
// SPILL the reg if it's not dirty in DST, otherwise try to MOVE.
if (!cur.isPhysDirty(group, physId)) {
ASMJIT_PROPAGATE(onKillReg(group, curWorkId, physId));
onKillReg(group, curWorkId, physId);
}
else {
RegMask allocableRegs = _pass->_availableRegs[group] & ~cur.assigned(group);
// If possible don't conflict with assigned regs at DST.
if (allocableRegs & ~dst.assigned(group))
if (allocableRegs & ~dst.assigned(group)) {
allocableRegs &= ~dst.assigned(group);
}
if (allocableRegs) {
// MOVE is possible, thus preferred.
@@ -251,8 +260,9 @@ Cleared:
// DST assigned, CUR unassigned.
uint32_t altPhysId = cur.workToPhysId(group, dstWorkId);
if (altPhysId == RAAssignment::kPhysNone) {
if (liveIn.bitAt(dstWorkId))
if (liveIn.bitAt(dstWorkId)) {
willLoadRegs |= physMask; // Scheduled for `onLoadReg()`.
}
affectedRegs &= ~physMask; // Unaffected from now.
continue;
}
@@ -267,10 +277,12 @@ Cleared:
// If `dstReadOnly` is true it means that that block was already processed and we cannot change from
// CLEAN to DIRTY. In that case the register has to be saved as it cannot enter the block DIRTY.
if (dstReadOnly)
if (dstReadOnly) {
ASMJIT_PROPAGATE(onSaveReg(group, dstWorkId, physId));
else
}
else {
dst.makeDirty(group, dstWorkId, physId);
}
}
else {
// DST dirty, CUR not dirty (the assert is just to visualize the condition).
@@ -306,8 +318,9 @@ Cleared:
ASMJIT_ASSERT(liveIn.bitAt(workId) == true);
ASMJIT_PROPAGATE(onLoadReg(group, workId, physId));
if (dst.isPhysDirty(group, physId))
if (dst.isPhysDirty(group, physId)) {
cur.makeDirty(group, workId, physId);
}
ASMJIT_ASSERT(dst.isPhysDirty(group, physId) == cur.isPhysDirty(group, physId));
}
else {
@@ -413,19 +426,22 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (tiedReg->hasAnyConsecutiveFlag()) {
uint32_t consecutiveOffset = tiedReg->isLeadConsecutive() ? uint32_t(0) : tiedReg->consecutiveData();
if (ASMJIT_UNLIKELY(Support::bitTest(consecutiveMask, consecutiveOffset)))
if (ASMJIT_UNLIKELY(Support::bitTest(consecutiveMask, consecutiveOffset))) {
return DebugUtils::errored(kErrorInvalidState);
}
consecutiveMask |= Support::bitMask(consecutiveOffset);
consecutiveRegs[consecutiveOffset] = tiedReg;
}
// Add OUT and KILL to `outPending` for CLOBBERing and/or OUT assignment.
if (tiedReg->isOutOrKill())
if (tiedReg->isOutOrKill()) {
outTiedRegs[outTiedCount++] = tiedReg;
}
if (tiedReg->isDuplicate())
if (tiedReg->isDuplicate()) {
dupTiedRegs[dupTiedCount++] = tiedReg;
}
if (!tiedReg->isUse()) {
tiedReg->markUseDone();
@@ -434,8 +450,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
}
// Don't assign anything here if this is a consecutive USE - we will handle this in STEP 2 instead.
if (tiedReg->isUseConsecutive())
if (tiedReg->isUseConsecutive()) {
continue;
}
uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
@@ -450,8 +467,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (assignedId == tiedReg->useId()) {
// If the register is already allocated in this one, mark it done and continue.
tiedReg->markUseDone();
if (tiedReg->isWrite())
if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, assignedId);
}
usePending--;
willUse |= useMask;
}
@@ -467,8 +485,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if ((allocableRegs & ~willUse) & assignedMask) {
tiedReg->setUseId(assignedId);
tiedReg->markUseDone();
if (tiedReg->isWrite())
if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, assignedId);
}
usePending--;
willUse |= assignedMask;
}
@@ -488,8 +507,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
uint32_t consecutiveCount = 0;
if (consecutiveMask) {
if ((consecutiveMask & (consecutiveMask + 1u)) != 0)
if ((consecutiveMask & (consecutiveMask + 1u)) != 0) {
return DebugUtils::errored(kErrorInvalidState);
}
// Count of trailing ones is the count of consecutive registers. There cannot be gap.
consecutiveCount = Support::ctz(~consecutiveMask);
@@ -505,8 +525,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
uint32_t assignments[kMaxConsecutiveRegs];
for (i = 0; i < consecutiveCount; i++)
for (i = 0; i < consecutiveCount; i++) {
assignments[i] = _curAssignment.workToPhysId(group, consecutiveRegs[i]->workId());
}
Support::BitWordIterator<uint32_t> it(lead->useRegMask());
while (it.hasNext()) {
@@ -533,8 +554,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
}
}
if (bestLeadReg == 0xFFFFFFFF)
if (bestLeadReg == 0xFFFFFFFF) {
return DebugUtils::errored(kErrorConsecutiveRegsAllocation);
}
for (i = 0; i < consecutiveCount; i++) {
uint32_t consecutiveIndex = bestLeadReg + i;
@@ -550,8 +572,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (assignedId == consecutiveIndex) {
// If the register is already allocated in this one, mark it done and continue.
tiedReg->markUseDone();
if (tiedReg->isWrite())
if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, assignedId);
}
usePending--;
willUse |= useMask;
}
@@ -578,8 +601,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
for (i = 0; i < count; i++) {
RATiedReg* tiedReg = &tiedRegs[i];
if (tiedReg->isUseDone())
if (tiedReg->isUseDone()) {
continue;
}
uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
@@ -630,8 +654,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (!(liveRegs & useMask)) {
ASMJIT_PROPAGATE(onMoveReg(group, workId, useId, assignedId));
tiedReg->markUseDone();
if (tiedReg->isWrite())
if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, useId);
}
usePending--;
}
}
@@ -640,8 +665,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (!(liveRegs & useMask)) {
ASMJIT_PROPAGATE(onLoadReg(group, workId, useId));
tiedReg->markUseDone();
if (tiedReg->isWrite())
if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, useId);
}
usePending--;
}
}
@@ -743,8 +769,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
continue;
}
if (!mustSwap)
if (!mustSwap) {
continue;
}
// Only branched here if the previous iteration did nothing. This is essentially a SWAP operation without
// having a dedicated instruction for that purpose (vector registers, etc...). The simplest way to handle
@@ -805,7 +832,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
// Must check if it's allocated as KILL can be related to OUT (like KILL immediately after OUT, which could
// mean the register is not assigned).
if (physId != RAAssignment::kPhysNone) {
ASMJIT_PROPAGATE(onKillReg(group, workId, physId));
onKillReg(group, workId, physId);
willOut &= ~Support::bitMask(physId);
}
@@ -851,7 +878,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (dstId == srcId) {
continue;
}
_pass->emitMove(workId, dstId, srcId);
ASMJIT_PROPAGATE(_pass->emitMove(workId, dstId, srcId));
}
}
@@ -947,18 +974,20 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
// Allocate OUT registers.
for (i = 0; i < outTiedCount; i++) {
RATiedReg* tiedReg = outTiedRegs[i];
if (!tiedReg->isOut())
if (!tiedReg->isOut()) {
continue;
}
RegMask avoidOut = avoidRegs;
if (tiedReg->isUnique())
if (tiedReg->isUnique()) {
avoidOut |= willUse;
}
uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
if (assignedId != RAAssignment::kPhysNone) {
ASMJIT_PROPAGATE(onKillReg(group, workId, assignedId));
onKillReg(group, workId, assignedId);
}
uint32_t physId = tiedReg->outId();
@@ -1087,16 +1116,18 @@ Error RALocalAllocator::allocJumpTable(InstNode* node, const RABlocks& targets,
// TODO: Do we really need to use `cont`?
DebugUtils::unused(cont);
if (targets.empty())
if (targets.empty()) {
return DebugUtils::errored(kErrorInvalidState);
}
// The cursor must point to the previous instruction for a possible instruction insertion.
_cc->_setCursor(node->prev());
// All `targets` should have the same sharedAssignmentId, we just read the first.
RABlock* anyTarget = targets[0];
if (!anyTarget->hasSharedAssignmentId())
if (!anyTarget->hasSharedAssignmentId()) {
return DebugUtils::errored(kErrorInvalidState);
}
RASharedAssignment& sharedAssignment = _pass->_sharedAssignments[anyTarget->sharedAssignmentId()];

View File

@@ -80,30 +80,47 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); }
//! Returns the currently processed block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; }
//! Sets the currently processed block.
ASMJIT_INLINE_NODEBUG void setBlock(RABlock* block) noexcept { _block = block; }
//! Returns the currently processed `InstNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstNode* node() const noexcept { return _node; }
//! Returns the currently processed `RAInst`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAInst* raInst() const noexcept { return _raInst; }
//! Returns all tied regs as `RATiedReg` array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); }
//! Returns tied registers grouped by the given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return _raInst->tiedRegs(group); }
//! Returns count of all TiedRegs used by the instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of TiedRegs used by the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount.get(group); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isGroupUsed(RegGroup group) const noexcept { return _tiedCount[group] != 0; }
//! \}
@@ -111,8 +128,10 @@ public:
//! \name Assignment
//! \{
[[nodiscard]]
Error makeInitialAssignment() noexcept;
[[nodiscard]]
Error replaceAssignment(const PhysToWorkMap* physToWorkMap) noexcept;
//! Switch to the given assignment by reassigning all register and emitting code that reassigns them.
@@ -121,12 +140,15 @@ public:
//! If `tryMode` is true then the final assignment doesn't have to be exactly same as specified by `dstPhysToWorkMap`
//! and `dstWorkToPhysMap`. This mode is only used before conditional jumps that already have assignment to generate
//! a code sequence that is always executed regardless of the flow.
[[nodiscard]]
Error switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, const ZoneBitVector& liveIn, bool dstReadOnly, bool tryMode) noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Error spillRegsBeforeEntry(RABlock* block) noexcept {
return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs());
}
[[nodiscard]]
Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept;
//! \}
@@ -134,10 +156,16 @@ public:
//! \name Allocation
//! \{
[[nodiscard]]
Error allocInst(InstNode* node) noexcept;
[[nodiscard]]
Error spillAfterAllocation(InstNode* node) noexcept;
[[nodiscard]]
Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept;
[[nodiscard]]
Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept;
//! \}
@@ -150,11 +178,13 @@ public:
kCostOfDirtyFlag = kCostOfFrequency / 4
};
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t costByFrequency(float freq) const noexcept {
return uint32_t(int32_t(freq * float(kCostOfFrequency)));
}
ASMJIT_FORCE_INLINE uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept {
[[nodiscard]]
ASMJIT_INLINE uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept {
RAWorkReg* workReg = workRegById(workId);
uint32_t cost = costByFrequency(workReg->liveStats().freq());
@@ -164,7 +194,8 @@ public:
return cost;
}
ASMJIT_FORCE_INLINE uint32_t pickBestSuitableRegister(RegGroup group, RegMask allocableRegs) const noexcept {
[[nodiscard]]
ASMJIT_INLINE uint32_t pickBestSuitableRegister(RegGroup group, RegMask allocableRegs) const noexcept {
// These are registers must be preserved by the function itself.
RegMask preservedRegs = _funcPreservedRegs[group];
@@ -177,6 +208,7 @@ public:
}
//! Decides on register assignment.
[[nodiscard]]
uint32_t decideOnAssignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept;
//! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated in a physical register that have
@@ -185,9 +217,11 @@ public:
//! The function must return either `RAAssignment::kPhysNone`, which means that the WorkReg of `workId` should be
//! spilled, or a valid physical register ID, which means that the register should be moved to that physical register
//! instead.
[[nodiscard]]
uint32_t decideOnReassignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs, RAInst* raInst) const noexcept;
//! Decides on best spill given a register mask `spillableRegs`
[[nodiscard]]
uint32_t decideOnSpillFor(RegGroup group, uint32_t workId, RegMask spillableRegs, uint32_t* spillWorkId) const noexcept;
//! \}
@@ -197,6 +231,7 @@ public:
//! Emits a move between a destination and source register, and fixes the
//! register assignment.
[[nodiscard]]
inline Error onMoveReg(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
if (dstPhysId == srcPhysId) {
return kErrorOk;
@@ -209,6 +244,7 @@ public:
//! Emits a swap between two physical registers and fixes their assignment.
//!
//! \note Target must support this operation otherwise this would ASSERT.
[[nodiscard]]
inline Error onSwapReg(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
_curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId);
return _pass->emitSwap(aWorkId, aPhysId, bWorkId, bPhysId);
@@ -216,6 +252,7 @@ public:
//! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register
//! and makes it assigned and clean.
[[nodiscard]]
inline Error onLoadReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.assign(group, workId, physId, RAAssignment::kClean);
return _pass->emitLoad(workId, physId);
@@ -223,6 +260,7 @@ public:
//! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot,
//! keeps it assigned, and makes it clean.
[[nodiscard]]
inline Error onSaveReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId);
@@ -232,26 +270,29 @@ public:
}
//! Assigns a register, the content of it is undefined at this point.
[[nodiscard]]
inline Error onAssignReg(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept {
_curAssignment.assign(group, workId, physId, dirty);
return kErrorOk;
}
//! Spills a variable/register, saves the content to the memory-home if modified.
[[nodiscard]]
inline Error onSpillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
if (_curAssignment.isPhysDirty(group, physId))
ASMJIT_PROPAGATE(onSaveReg(group, workId, physId));
return onKillReg(group, workId, physId);
onKillReg(group, workId, physId);
return kErrorOk;
}
[[nodiscard]]
inline Error onDirtyReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.makeDirty(group, workId, physId);
return kErrorOk;
}
inline Error onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
inline void onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.unassign(group, workId, physId);
return kErrorOk;
}
//! \}

View File

@@ -251,8 +251,9 @@ RABlock* BaseRAPass::newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stopped
if (block) {
// Exit node has always a block associated with it. If we went here it means that `cbLabel` passed here
// is after the end of the function and cannot be merged with the function exit block.
if (node == func->exitNode())
if (node == func->exitNode()) {
block = nullptr;
}
break;
}
@@ -273,8 +274,9 @@ RABlock* BaseRAPass::newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stopped
if (!block) {
block = newBlock();
if (ASMJIT_UNLIKELY(!block))
if (ASMJIT_UNLIKELY(!block)) {
return nullptr;
}
}
cbLabel->setPassData<RABlock>(block);
@@ -402,13 +404,15 @@ Error BaseRAPass::buildCFGViews() noexcept {
for (;;) {
for (;;) {
if (i >= current->successors().size())
if (i >= current->successors().size()) {
break;
}
// Skip if already visited.
RABlock* child = current->successors()[i++];
if (visited.bitAt(child->blockId()))
if (visited.bitAt(child->blockId())) {
continue;
}
// Mark as visited to prevent visiting the same block multiple times.
visited.setBit(child->blockId(), true);
@@ -423,8 +427,9 @@ Error BaseRAPass::buildCFGViews() noexcept {
current->_povOrder = _pov.size();
_pov.appendUnsafe(current);
if (stack.empty())
if (stack.empty()) {
break;
}
RABlockVisitItem top = stack.pop();
current = top.block();
@@ -454,7 +459,7 @@ Error BaseRAPass::buildCFGViews() noexcept {
// BaseRAPass - CFG - Dominators
// =============================
static ASMJIT_FORCE_INLINE RABlock* intersectBlocks(RABlock* b1, RABlock* b2) noexcept {
static ASMJIT_INLINE RABlock* intersectBlocks(RABlock* b1, RABlock* b2) noexcept {
while (b1 != b2) {
while (b2->povOrder() > b1->povOrder()) b1 = b1->iDom();
while (b1->povOrder() > b2->povOrder()) b2 = b2->iDom();
@@ -469,8 +474,9 @@ Error BaseRAPass::buildCFGDominators() noexcept {
ASMJIT_RA_LOG_FORMAT("[BuildCFGDominators]\n");
#endif
if (_blocks.empty())
if (_blocks.empty()) {
return kErrorOk;
}
RABlock* entryBlock = this->entryBlock();
entryBlock->setIDom(entryBlock);
@@ -491,8 +497,9 @@ Error BaseRAPass::buildCFGDominators() noexcept {
uint32_t i = _pov.size();
while (i) {
RABlock* block = _pov[--i];
if (block == entryBlock)
if (block == entryBlock) {
continue;
}
RABlock* iDom = nullptr;
const RABlocks& preds = block->predecessors();
@@ -500,8 +507,9 @@ Error BaseRAPass::buildCFGDominators() noexcept {
uint32_t j = preds.size();
while (j) {
RABlock* p = preds[--j];
if (!p->iDom())
if (!p->iDom()) {
continue;
}
iDom = !iDom ? p : intersectBlocks(iDom, p);
}
@@ -525,12 +533,14 @@ bool BaseRAPass::_strictlyDominates(const RABlock* a, const RABlock* b) const no
// Nothing strictly dominates the entry block.
const RABlock* entryBlock = this->entryBlock();
if (a == entryBlock)
if (a == entryBlock) {
return false;
}
const RABlock* iDom = b->iDom();
while (iDom != a && iDom != entryBlock)
while (iDom != a && iDom != entryBlock) {
iDom = iDom->iDom();
}
return iDom != entryBlock;
}
@@ -540,16 +550,19 @@ const RABlock* BaseRAPass::_nearestCommonDominator(const RABlock* a, const RABlo
ASMJIT_ASSERT(b != nullptr); // called, as both `a` and `b` must be valid blocks.
ASMJIT_ASSERT(a != b); // Checked by `dominates()` and `properlyDominates()`.
if (a == b)
if (a == b) {
return a;
}
// If `a` strictly dominates `b` then `a` is the nearest common dominator.
if (_strictlyDominates(a, b))
if (_strictlyDominates(a, b)) {
return a;
}
// If `b` strictly dominates `a` then `b` is the nearest common dominator.
if (_strictlyDominates(b, a))
if (_strictlyDominates(b, a)) {
return b;
}
const RABlock* entryBlock = this->entryBlock();
uint64_t timestamp = nextTimestamp();
@@ -564,8 +577,9 @@ const RABlock* BaseRAPass::_nearestCommonDominator(const RABlock* a, const RABlo
// Check all B's dominators against marked dominators of A.
block = b->iDom();
while (block != entryBlock) {
if (block->hasTimestamp(timestamp))
if (block->hasTimestamp(timestamp)) {
return block;
}
block = block->iDom();
}
@@ -580,8 +594,9 @@ Error BaseRAPass::removeUnreachableCode() noexcept {
uint32_t numReachableBlocks = reachableBlockCount();
// All reachable -> nothing to do.
if (numAllBlocks == numReachableBlocks)
if (numAllBlocks == numReachableBlocks) {
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
StringTmp<256> sb;
@@ -591,8 +606,9 @@ Error BaseRAPass::removeUnreachableCode() noexcept {
for (uint32_t i = 0; i < numAllBlocks; i++) {
RABlock* block = _blocks[i];
if (block->isReachable())
if (block->isReachable()) {
continue;
}
ASMJIT_RA_LOG_FORMAT(" Removing code from unreachable block {%u}\n", i);
BaseNode* first = block->first();
@@ -632,22 +648,26 @@ Error BaseRAPass::removeUnreachableCode() noexcept {
}
BaseNode* BaseRAPass::findSuccessorStartingAt(BaseNode* node) noexcept {
while (node && (node->isInformative() || node->hasNoEffect()))
while (node && (node->isInformative() || node->hasNoEffect())) {
node = node->next();
}
return node;
}
bool BaseRAPass::isNextTo(BaseNode* node, BaseNode* target) noexcept {
for (;;) {
node = node->next();
if (node == target)
if (node == target) {
return true;
}
if (!node)
if (!node) {
return false;
}
if (node->isCode() || node->isData())
if (node->isCode() || node->isData()) {
return false;
}
}
}
@@ -668,12 +688,14 @@ Error BaseRAPass::_asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept {
ASMJIT_PROPAGATE(wRegsByGroup.willGrow(allocator()));
RAWorkReg* wReg = zone()->newT<RAWorkReg>(vReg, wRegs.size());
if (ASMJIT_UNLIKELY(!wReg))
if (ASMJIT_UNLIKELY(!wReg)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
vReg->setWorkReg(wReg);
if (!vReg->isStack())
if (!vReg->isStack()) {
wReg->setRegByteMask(Support::lsbMask<uint64_t>(vReg->virtSize()));
}
wRegs.appendUnsafe(wReg);
wRegsByGroup.appendUnsafe(wReg);
@@ -696,8 +718,9 @@ RAAssignment::WorkToPhysMap* BaseRAPass::newWorkToPhysMap() noexcept {
}
WorkToPhysMap* map = zone()->allocT<WorkToPhysMap>(size);
if (ASMJIT_UNLIKELY(!map))
if (ASMJIT_UNLIKELY(!map)) {
return nullptr;
}
map->reset(count);
return map;
@@ -708,8 +731,9 @@ RAAssignment::PhysToWorkMap* BaseRAPass::newPhysToWorkMap() noexcept {
size_t size = PhysToWorkMap::sizeOf(count);
PhysToWorkMap* map = zone()->allocT<PhysToWorkMap>(size);
if (ASMJIT_UNLIKELY(!map))
if (ASMJIT_UNLIKELY(!map)) {
return nullptr;
}
map->reset(count);
return map;
@@ -719,17 +743,17 @@ RAAssignment::PhysToWorkMap* BaseRAPass::newPhysToWorkMap() noexcept {
// =========================================================
namespace LiveOps {
typedef ZoneBitVector::BitWord BitWord;
using BitWord = ZoneBitVector::BitWord;
struct In {
static ASMJIT_FORCE_INLINE BitWord op(BitWord dst, BitWord out, BitWord gen, BitWord kill) noexcept {
static ASMJIT_INLINE BitWord op(BitWord dst, BitWord out, BitWord gen, BitWord kill) noexcept {
DebugUtils::unused(dst);
return (out | gen) & ~kill;
}
};
template<typename Operator>
static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, uint32_t n) noexcept {
static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, uint32_t n) noexcept {
BitWord changed = 0;
for (uint32_t i = 0; i < n; i++) {
@@ -744,7 +768,7 @@ namespace LiveOps {
}
template<typename Operator>
static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, uint32_t n) noexcept {
static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, uint32_t n) noexcept {
BitWord changed = 0;
for (uint32_t i = 0; i < n; i++) {
@@ -759,7 +783,7 @@ namespace LiveOps {
}
template<typename Operator>
static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, const BitWord* c, uint32_t n) noexcept {
static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, const BitWord* c, uint32_t n) noexcept {
BitWord changed = 0;
#if defined(_MSC_VER) && _MSC_VER <= 1938
@@ -787,12 +811,14 @@ namespace LiveOps {
uint32_t numSuccessors = successors.size();
// Calculate `OUT` based on `IN` of all successors.
for (uint32_t i = 0; i < numSuccessors; i++)
for (uint32_t i = 0; i < numSuccessors; i++) {
changed |= op<Support::Or>(block->liveOut().data(), successors[i]->liveIn().data(), numBitWords);
}
// Calculate `IN` based on `OUT`, `GEN`, and `KILL` bits.
if (changed)
if (changed) {
changed = op<In>(block->liveIn().data(), block->liveOut().data(), block->gen().data(), block->kill().data(), numBitWords);
}
return changed;
}
@@ -858,10 +884,12 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept {
// Mark as:
// KILL - if this VirtReg is killed afterwards.
// LAST - if this VirtReg is last in this basic block.
if (block->kill().bitAt(workId))
if (block->kill().bitAt(workId)) {
tiedReg->addFlags(RATiedFlags::kKill);
else if (!block->gen().bitAt(workId))
}
else if (!block->gen().bitAt(workId)) {
tiedReg->addFlags(RATiedFlags::kLast);
}
if (tiedReg->isWriteOnly()) {
// KILL.
@@ -887,8 +915,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept {
nInsts++;
}
if (node == stop)
if (node == stop) {
break;
}
node = node->prev();
ASMJIT_ASSERT(node != nullptr);
@@ -976,8 +1005,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept {
for (i = 0; i < numAllBlocks; i++) {
RABlock* block = _blocks[i];
if (!block->isReachable())
if (!block->isReachable()) {
continue;
}
uint32_t blockId = block->blockId();
@@ -1087,8 +1117,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept {
maxLiveCount.op<Support::Max>(raInst->_liveCount);
}
if (node == stop)
if (node == stop) {
break;
}
node = node->next();
ASMJIT_ASSERT(node != nullptr);
@@ -1134,22 +1165,26 @@ Error BaseRAPass::assignArgIndexToWorkRegs() noexcept {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
// Unassigned argument.
const RegOnly& regArg = func()->argPack(argIndex)[valueIndex];
if (!regArg.isReg() || !cc()->isVirtIdValid(regArg.id()))
if (!regArg.isReg() || !cc()->isVirtIdValid(regArg.id())) {
continue;
}
VirtReg* virtReg = cc()->virtRegById(regArg.id());
if (!virtReg)
if (!virtReg) {
continue;
}
// Unreferenced argument.
RAWorkReg* workReg = virtReg->workReg();
if (!workReg)
if (!workReg) {
continue;
}
// Overwritten argument.
uint32_t workId = workReg->workId();
if (!liveIn.bitAt(workId))
if (!liveIn.bitAt(workId)) {
continue;
}
workReg->setArgIndex(argIndex, valueIndex);
const FuncValue& arg = func()->detail().arg(argIndex, valueIndex);
@@ -1172,7 +1207,9 @@ static void RAPass_dumpSpans(String& sb, uint32_t index, const LiveRegSpans& liv
for (uint32_t i = 0; i < liveSpans.size(); i++) {
const LiveRegSpan& liveSpan = liveSpans[i];
if (i) sb.append(", ");
if (i) {
sb.append(", ");
}
sb.appendFormat("[%u:%u@%u]", liveSpan.a, liveSpan.b, liveSpan.id);
}
@@ -1197,11 +1234,13 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::initGlobalLiveSpans() noexcept {
if (physCount) {
liveSpans = allocator()->allocT<LiveRegSpans>(physCount * sizeof(LiveRegSpans));
if (ASMJIT_UNLIKELY(!liveSpans))
if (ASMJIT_UNLIKELY(!liveSpans)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
for (size_t physId = 0; physId < physCount; physId++)
for (size_t physId = 0; physId < physCount; physId++) {
new(Support::PlacementNew{&liveSpans[physId]}) LiveRegSpans();
}
}
_globalLiveSpans[group] = liveSpans;
@@ -1272,8 +1311,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
continue;
}
if (err != 0xFFFFFFFFu)
if (err != 0xFFFFFFFFu) {
return err;
}
}
}
@@ -1294,8 +1334,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
i = 0;
for (;;) {
uint32_t stop = consecutiveRegs.size();
if (i == stop)
if (i == stop) {
break;
}
while (i < stop) {
RAWorkReg* workReg = consecutiveRegs[i].workReg;
@@ -1317,8 +1358,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
uint32_t numConsecutiveRegs = consecutiveRegs.size();
for (i = 0; i < numConsecutiveRegs; i++) {
RAWorkReg* workReg = consecutiveRegs[i].workReg;
if (workReg->isAllocated())
if (workReg->isAllocated()) {
continue;
}
RAWorkReg* parentReg = consecutiveRegs[i].parentReg;
RegMask physRegs = 0;
@@ -1330,16 +1372,18 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
// NOTE: This should never be true as it would mean we would never allocate this virtual register
// (not here, and not later when local register allocator processes RATiedReg sets).
if (ASMJIT_UNLIKELY(!physRegs))
if (ASMJIT_UNLIKELY(!physRegs)) {
return DebugUtils::errored(kErrorConsecutiveRegsAllocation);
}
}
}
else if (parentReg->hasHomeRegId()) {
uint32_t consecutiveId = parentReg->homeRegId() + 1;
// NOTE: We don't support wrapping. If this goes beyond all allocable registers there is something wrong.
if (consecutiveId > 31 || !Support::bitTest(availableRegs, consecutiveId))
if (consecutiveId > 31 || !Support::bitTest(availableRegs, consecutiveId)) {
return DebugUtils::errored(kErrorConsecutiveRegsAllocation);
}
workReg->setHintRegId(consecutiveId);
physRegs = Support::bitMask(consecutiveId);
@@ -1358,8 +1402,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
break;
}
if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu))
if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) {
return err;
}
physRegs ^= Support::bitMask(physId);
}
@@ -1373,20 +1418,23 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
for (i = 0; i < numWorkRegs; i++) {
RAWorkReg* workReg = workRegs[i];
if (workReg->isAllocated())
if (workReg->isAllocated()) {
continue;
}
RegMask remainingPhysRegs = availableRegs;
if (remainingPhysRegs & workReg->preferredMask())
if (remainingPhysRegs & workReg->preferredMask()) {
remainingPhysRegs &= workReg->preferredMask();
}
RegMask physRegs = remainingPhysRegs & ~preservedRegs;
remainingPhysRegs &= preservedRegs;
for (;;) {
if (!physRegs) {
if (!remainingPhysRegs)
if (!remainingPhysRegs) {
break;
}
physRegs = remainingPhysRegs;
remainingPhysRegs = 0;
}
@@ -1396,8 +1444,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
if (workReg->clobberSurvivalMask()) {
RegMask preferredMask = (physRegs | remainingPhysRegs) & workReg->clobberSurvivalMask();
if (preferredMask) {
if (preferredMask & ~remainingPhysRegs)
if (preferredMask & ~remainingPhysRegs) {
preferredMask &= ~remainingPhysRegs;
}
physId = Support::ctz(preferredMask);
}
}
@@ -1412,16 +1461,18 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
break;
}
if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu))
if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) {
return err;
}
physRegs &= ~Support::bitMask(physId);
remainingPhysRegs &= ~Support::bitMask(physId);
}
// Keep it in `workRegs` if it was not allocated.
if (!physRegs)
if (!physRegs) {
workRegs[dstIndex++] = workReg;
}
}
workRegs._setSize(dstIndex);
@@ -1431,8 +1482,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
ASMJIT_RA_LOG_COMPLEX({
for (uint32_t physId = 0; physId < physCount; physId++) {
LiveRegSpans& live = _globalLiveSpans[group][physId];
if (live.empty())
if (live.empty()) {
continue;
}
sb.clear();
RAPass_dumpSpans(sb, physId, live);
@@ -1448,8 +1500,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
}
else {
_strategy[group].setType(RAStrategyType::kComplex);
for (RAWorkReg* workReg : workRegs)
for (RAWorkReg* workReg : workRegs) {
workReg->markStackPreferred();
}
ASMJIT_RA_LOG_COMPLEX({
uint32_t count = workRegs.size();
@@ -1457,7 +1510,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
sb.appendFormat(" Unassigned (%u): ", count);
for (i = 0; i < numWorkRegs; i++) {
RAWorkReg* workReg = workRegs[i];
if (i) sb.append(", ");
if (i) {
sb.append(", ");
}
sb.append(workReg->name());
}
sb.append('\n');
@@ -1475,8 +1530,9 @@ Error BaseRAPass::runLocalAllocator() noexcept {
RALocalAllocator lra(this);
ASMJIT_PROPAGATE(lra.init());
if (!blockCount())
if (!blockCount()) {
return kErrorOk;
}
// The allocation is done when this reaches zero.
uint32_t blocksRemaining = reachableBlockCount();
@@ -1489,7 +1545,7 @@ Error BaseRAPass::runLocalAllocator() noexcept {
ASMJIT_ASSERT(block->isReachable());
// Assign function arguments for the initial block. The `lra` is valid now.
lra.makeInitialAssignment();
ASMJIT_PROPAGATE(lra.makeInitialAssignment());
ASMJIT_PROPAGATE(setBlockEntryAssignment(block, block, lra._curAssignment));
// The loop starts from the first block and iterates blocks in order, however, the algorithm also allows to jump to
@@ -1503,10 +1559,7 @@ Error BaseRAPass::runLocalAllocator() noexcept {
BaseNode* afterLast = last->next();
bool unconditionalJump = false;
RABlock* consecutive = nullptr;
if (block->hasSuccessors())
consecutive = block->successors()[0];
RABlock* consecutive = block->hasSuccessors() ? block->successors()[0] : nullptr;
lra.setBlock(block);
block->makeAllocated();
@@ -1539,10 +1592,12 @@ Error BaseRAPass::runLocalAllocator() noexcept {
}
ASMJIT_PROPAGATE(lra.allocInst(inst));
if (inst->type() == NodeType::kInvoke)
if (inst->type() == NodeType::kInvoke) {
ASMJIT_PROPAGATE(emitPreCall(inst->as<InvokeNode>()));
else
}
else {
ASMJIT_PROPAGATE(lra.spillAfterAllocation(inst));
}
}
node = next;
}
@@ -1566,30 +1621,34 @@ Error BaseRAPass::runLocalAllocator() noexcept {
block->setFirst(beforeFirst->next());
block->setLast(afterLast ? afterLast->prev() : cc()->lastNode());
if (--blocksRemaining == 0)
if (--blocksRemaining == 0) {
break;
}
// Switch to the next consecutive block, if any.
if (consecutive) {
block = consecutive;
if (!block->isAllocated())
if (!block->isAllocated()) {
continue;
}
}
// Get the next block.
for (;;) {
if (++blockId >= blockCount())
if (++blockId >= blockCount()) {
blockId = 0;
}
block = _blocks[blockId];
if (!block->isReachable() || block->isAllocated() || !block->hasEntryAssignment())
if (!block->isReachable() || block->isAllocated() || !block->hasEntryAssignment()) {
continue;
}
break;
}
// If we switched to some block we have to update the local allocator.
lra.replaceAssignment(block->entryPhysToWorkMap());
ASMJIT_PROPAGATE(lra.replaceAssignment(block->entryPhysToWorkMap()));
}
_clobberedRegs.op<Support::Or>(lra._clobberedRegs);
@@ -1602,23 +1661,26 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo
// Shouldn't happen. Entry assignment of a block that has a shared-state will assign to all blocks
// with the same sharedAssignmentId. It's a bug if the shared state has been already assigned.
if (!_sharedAssignments[sharedAssignmentId].empty())
if (!_sharedAssignments[sharedAssignmentId].empty()) {
return DebugUtils::errored(kErrorInvalidState);
}
return setSharedAssignment(sharedAssignmentId, fromAssignment);
}
PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
if (ASMJIT_UNLIKELY(!physToWorkMap))
if (ASMJIT_UNLIKELY(!physToWorkMap)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
block->setEntryAssignment(physToWorkMap);
// True if this is the first (entry) block, nothing to do in this case.
if (block == fromBlock) {
// Entry block should never have a shared state.
if (block->hasSharedAssignmentId())
if (block->hasSharedAssignmentId()) {
return DebugUtils::errored(kErrorInvalidState);
}
return kErrorOk;
}
@@ -1637,8 +1699,9 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo
RegGroup group = workReg->group();
uint32_t physId = fromAssignment.workToPhysId(group, workId);
if (physId != RAAssignment::kPhysNone)
if (physId != RAAssignment::kPhysNone) {
physToWorkMap->unassign(group, physId, _physRegIndex.get(group) + physId);
}
}
}
@@ -1663,8 +1726,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig
ASMJIT_ASSERT(!block->hasEntryAssignment());
PhysToWorkMap* entryPhysToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
if (ASMJIT_UNLIKELY(!entryPhysToWorkMap))
if (ASMJIT_UNLIKELY(!entryPhysToWorkMap)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
block->setEntryAssignment(entryPhysToWorkMap);
@@ -1681,8 +1745,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig
uint32_t physId = it.next();
uint32_t workId = entryPhysToWorkMap->workIds[physBaseIndex + physId];
if (!liveIn.bitAt(workId))
if (!liveIn.bitAt(workId)) {
entryPhysToWorkMap->unassign(group, physId, physBaseIndex + physId);
}
}
}
}
@@ -1694,8 +1759,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig
while (it.hasNext()) {
uint32_t physId = it.next();
if (Support::bitTest(physToWorkMap->assigned[group], physId))
if (Support::bitTest(physToWorkMap->assigned[group], physId)) {
physToWorkMap->unassign(group, physId, physBaseIndex + physId);
}
}
}
@@ -1705,8 +1771,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig
Error BaseRAPass::blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept {
// Complex allocation strategy requires to record register assignments upon block entry (or per shared state).
for (RegGroup group : RegGroupVirtValues{}) {
if (!_strategy[group].isComplex())
if (!_strategy[group].isComplex()) {
continue;
}
uint32_t physBaseIndex = _physRegIndex[group];
Support::BitWordIterator<RegMask> it(physToWorkMap->assigned[group]);
@@ -1753,15 +1820,17 @@ Error BaseRAPass::updateStackFrame() noexcept {
// Update some StackFrame information that we updated during allocation. The only information we don't have at the
// moment is final local stack size, which is calculated last.
FuncFrame& frame = func()->frame();
for (RegGroup group : RegGroupVirtValues{})
for (RegGroup group : RegGroupVirtValues{}) {
frame.addDirtyRegs(group, _clobberedRegs[group]);
}
frame.setLocalStackAlignment(_stackAllocator.alignment());
// If there are stack arguments that are not assigned to registers upon entry and the function doesn't require
// dynamic stack alignment we keep these arguments where they are. This will also mark all stack slots that match
// these arguments as allocated.
if (_numStackArgsToStackSlots)
if (_numStackArgsToStackSlots) {
ASMJIT_PROPAGATE(_markStackArgsToKeep());
}
// Calculate offsets of all stack slots and update StackSize to reflect the calculated local stack size.
ASMJIT_PROPAGATE(_stackAllocator.calculateStackFrame());
@@ -1773,13 +1842,15 @@ Error BaseRAPass::updateStackFrame() noexcept {
ASMJIT_PROPAGATE(frame.finalize());
// StackAllocator allocates all stots starting from [0], adjust them when necessary.
if (frame.localStackOffset() != 0)
if (frame.localStackOffset() != 0) {
ASMJIT_PROPAGATE(_stackAllocator.adjustSlotOffsets(int32_t(frame.localStackOffset())));
}
// Again, if there are stack arguments allocated in function's stack we have to handle them. This handles all cases
// (either regular or dynamic stack alignment).
if (_numStackArgsToStackSlots)
if (_numStackArgsToStackSlots) {
ASMJIT_PROPAGATE(_updateStackArgs());
}
return kErrorOk;
}
@@ -1800,8 +1871,9 @@ Error BaseRAPass::_markStackArgsToKeep() noexcept {
// If the register doesn't have stack slot then we failed. It doesn't make much sense as it was marked as
// `kFlagStackArgToStack`, which requires the WorkReg was live-in upon function entry.
RAStackSlot* slot = workReg->stackSlot();
if (ASMJIT_UNLIKELY(!slot))
if (ASMJIT_UNLIKELY(!slot)) {
return DebugUtils::errored(kErrorInvalidState);
}
if (hasSAReg && srcArg.isStack() && !srcArg.isIndirect()) {
uint32_t typeSize = TypeUtils::sizeOf(srcArg.typeId());
@@ -1832,8 +1904,9 @@ Error BaseRAPass::_updateStackArgs() noexcept {
ASMJIT_ASSERT(workReg->hasArgIndex());
RAStackSlot* slot = workReg->stackSlot();
if (ASMJIT_UNLIKELY(!slot))
if (ASMJIT_UNLIKELY(!slot)) {
return DebugUtils::errored(kErrorInvalidState);
}
if (slot->isStackArg()) {
const FuncValue& srcArg = _func->detail().arg(workReg->argIndex());
@@ -1930,38 +2003,48 @@ static void RAPass_formatLiveness(BaseRAPass* pass, String& sb, const RAInst* ra
for (uint32_t i = 0; i < tiedCount; i++) {
const RATiedReg& tiedReg = tiedRegs[i];
if (i != 0)
if (i != 0) {
sb.append(' ');
}
sb.appendFormat("%s{", pass->workRegById(tiedReg.workId())->name());
sb.append(tiedReg.isReadWrite() ? 'X' :
tiedReg.isRead() ? 'R' :
tiedReg.isWrite() ? 'W' : '?');
if (tiedReg.isLeadConsecutive())
if (tiedReg.isLeadConsecutive()) {
sb.appendFormat("|Lead[%u]", tiedReg.consecutiveData() + 1u);
}
if (tiedReg.hasUseId())
if (tiedReg.hasUseId()) {
sb.appendFormat("|Use=%u", tiedReg.useId());
else if (tiedReg.isUse())
}
else if (tiedReg.isUse()) {
sb.append("|Use");
}
if (tiedReg.isUseConsecutive() && !tiedReg.isLeadConsecutive())
if (tiedReg.isUseConsecutive() && !tiedReg.isLeadConsecutive()) {
sb.appendFormat("+%u", tiedReg.consecutiveData());
}
if (tiedReg.hasOutId())
if (tiedReg.hasOutId()) {
sb.appendFormat("|Out=%u", tiedReg.outId());
else if (tiedReg.isOut())
}
else if (tiedReg.isOut()) {
sb.append("|Out");
}
if (tiedReg.isOutConsecutive() && !tiedReg.isLeadConsecutive())
if (tiedReg.isOutConsecutive() && !tiedReg.isLeadConsecutive()) {
sb.appendFormat("+%u", tiedReg.consecutiveData());
}
if (tiedReg.isLast())
if (tiedReg.isLast()) {
sb.append("|Last");
}
if (tiedReg.isKill())
if (tiedReg.isKill()) {
sb.append("|Kill");
}
sb.append("}");
}
@@ -1972,7 +2055,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept {
for (const RABlock* block : _blocks) {
BaseNode* node = block->first();
if (!node) continue;
if (!node) {
continue;
}
BaseNode* last = block->last();
for (;;) {
@@ -1989,8 +2074,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept {
}
node->setInlineComment(static_cast<char*>(cc()->_dataZone.dup(sb.data(), sb.size(), true)));
if (node == last)
if (node == last) {
break;
}
node = node->next();
}
}
@@ -2001,10 +2087,12 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept {
ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpBlockIds(String& sb, const RABlocks& blocks) noexcept {
for (uint32_t i = 0, size = blocks.size(); i < size; i++) {
const RABlock* block = blocks[i];
if (i != 0)
if (i != 0) {
ASMJIT_PROPAGATE(sb.appendFormat(", #%u", block->blockId()));
else
}
else {
ASMJIT_PROPAGATE(sb.appendFormat("#%u", block->blockId()));
}
}
return kErrorOk;
}
@@ -2024,18 +2112,21 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpBlockLiveness(String& sb, const RABlock
if (bits.bitAt(workId)) {
RAWorkReg* wReg = workRegById(workId);
if (!n)
if (!n) {
sb.appendFormat(" %s [", bitsName);
else
}
else {
sb.append(", ");
}
sb.append(wReg->name());
n++;
}
}
if (n)
if (n) {
sb.append("]\n");
}
}
return kErrorOk;
@@ -2065,8 +2156,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpLiveSpans(String& sb) noexcept {
LiveRegSpans& liveSpans = workReg->liveSpans();
for (uint32_t x = 0; x < liveSpans.size(); x++) {
const LiveRegSpan& liveSpan = liveSpans[x];
if (x)
if (x) {
sb.append(", ");
}
sb.appendFormat("[%u:%u]", liveSpan.a, liveSpan.b);
}

View File

@@ -56,24 +56,25 @@ class RABlock {
public:
ASMJIT_NONCOPYABLE(RABlock)
typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
//! \name Types
//! \{
using PhysToWorkMap = RAAssignment::PhysToWorkMap;
using WorkToPhysMap = RAAssignment::WorkToPhysMap;
//! \}
//! \name Constants
//! \{
enum : uint32_t {
//! Unassigned block id.
kUnassignedId = 0xFFFFFFFFu
};
//! Unassigned block id.
static inline constexpr uint32_t kUnassignedId = 0xFFFFFFFFu;
enum LiveType : uint32_t {
kLiveIn = 0,
kLiveOut = 1,
kLiveGen = 2,
kLiveKill = 3,
kLiveCount = 4
};
static inline constexpr uint32_t kLiveIn = 0;
static inline constexpr uint32_t kLiveOut = 1;
static inline constexpr uint32_t kLiveGen = 2;
static inline constexpr uint32_t kLiveKill = 3;
static inline constexpr uint32_t kLiveCount = 4;
//! \}
@@ -145,24 +146,48 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseRAPass* pass() const noexcept { return _ra; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t blockId() const noexcept { return _blockId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlockFlags flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RABlockFlags flag) const noexcept { return Support::test(_flags, flag); }
ASMJIT_INLINE_NODEBUG void addFlags(RABlockFlags flags) noexcept { _flags |= flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAssigned() const noexcept { return _blockId != kUnassignedId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isConstructed() const noexcept { return hasFlag(RABlockFlags::kIsConstructed); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReachable() const noexcept { return hasFlag(RABlockFlags::kIsReachable); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isTargetable() const noexcept { return hasFlag(RABlockFlags::kIsTargetable); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return hasFlag(RABlockFlags::kIsAllocated); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFuncExit() const noexcept { return hasFlag(RABlockFlags::kIsFuncExit); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasTerminator() const noexcept { return hasFlag(RABlockFlags::kHasTerminator); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasConsecutive() const noexcept { return hasFlag(RABlockFlags::kHasConsecutive); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasJumpTable() const noexcept { return hasFlag(RABlockFlags::kHasJumpTable); }
ASMJIT_INLINE_NODEBUG void makeConstructed(const RARegsStats& regStats) noexcept {
@@ -174,11 +199,16 @@ public:
ASMJIT_INLINE_NODEBUG void makeTargetable() noexcept { _flags |= RABlockFlags::kIsTargetable; }
ASMJIT_INLINE_NODEBUG void makeAllocated() noexcept { _flags |= RABlockFlags::kIsAllocated; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegsStats& regsStats() const noexcept { return _regsStats; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPredecessors() const noexcept { return !_predecessors.empty(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSuccessors() const noexcept { return !_successors.empty(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSuccessor(RABlock* block) noexcept {
if (block->_predecessors.size() < _successors.size())
return block->_predecessors.contains(this);
@@ -186,56 +216,97 @@ public:
return _successors.contains(block);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlocks& predecessors() const noexcept { return _predecessors; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlocks& successors() const noexcept { return _successors; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* first() const noexcept { return _first; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* last() const noexcept { return _last; }
ASMJIT_INLINE_NODEBUG void setFirst(BaseNode* node) noexcept { _first = node; }
ASMJIT_INLINE_NODEBUG void setLast(BaseNode* node) noexcept { _last = node; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t firstPosition() const noexcept { return _firstPosition; }
ASMJIT_INLINE_NODEBUG void setFirstPosition(uint32_t position) noexcept { _firstPosition = position; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t endPosition() const noexcept { return _endPosition; }
ASMJIT_INLINE_NODEBUG void setEndPosition(uint32_t position) noexcept { _endPosition = position; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t povOrder() const noexcept { return _povOrder; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask entryScratchGpRegs() const noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; }
ASMJIT_INLINE_NODEBUG void addEntryScratchGpRegs(RegMask regMask) noexcept { _entryScratchGpRegs |= regMask; }
ASMJIT_INLINE_NODEBUG void addExitScratchGpRegs(RegMask regMask) noexcept { _exitScratchGpRegs |= regMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSharedAssignmentId() const noexcept { return _sharedAssignmentId != Globals::kInvalidId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sharedAssignmentId() const noexcept { return _sharedAssignmentId; }
ASMJIT_INLINE_NODEBUG void setSharedAssignmentId(uint32_t id) noexcept { _sharedAssignmentId = id; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t timestamp() const noexcept { return _timestamp; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasTimestamp(uint64_t ts) const noexcept { return _timestamp == ts; }
ASMJIT_INLINE_NODEBUG void setTimestamp(uint64_t ts) const noexcept { _timestamp = ts; }
ASMJIT_INLINE_NODEBUG void resetTimestamp() const noexcept { _timestamp = 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* consecutive() const noexcept { return hasConsecutive() ? _successors[0] : nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* iDom() noexcept { return _idom; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlock* iDom() const noexcept { return _idom; }
ASMJIT_INLINE_NODEBUG void setIDom(RABlock* block) noexcept { _idom = block; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneBitVector& liveIn() noexcept { return _liveBits[kLiveIn]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveIn() const noexcept { return _liveBits[kLiveIn]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneBitVector& liveOut() noexcept { return _liveBits[kLiveOut]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveOut() const noexcept { return _liveBits[kLiveOut]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneBitVector& gen() noexcept { return _liveBits[kLiveGen]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& gen() const noexcept { return _liveBits[kLiveGen]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneBitVector& kill() noexcept { return _liveBits[kLiveKill]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& kill() const noexcept { return _liveBits[kLiveKill]; }
[[nodiscard]]
inline Error resizeLiveBits(uint32_t size) noexcept {
ASMJIT_PROPAGATE(_liveBits[kLiveIn ].resize(allocator(), size));
ASMJIT_PROPAGATE(_liveBits[kLiveOut ].resize(allocator(), size));
@@ -244,8 +315,12 @@ public:
return kErrorOk;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEntryAssignment() const noexcept { return _entryPhysToWorkMap != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* entryPhysToWorkMap() const noexcept { return _entryPhysToWorkMap; }
ASMJIT_INLINE_NODEBUG void setEntryAssignment(PhysToWorkMap* physToWorkMap) noexcept { _entryPhysToWorkMap = physToWorkMap; }
//! \}
@@ -256,11 +331,13 @@ public:
//! Adds a successor to this block, and predecessor to `successor`, making connection on both sides.
//!
//! This API must be used to manage successors and predecessors, never manage it manually.
[[nodiscard]]
Error appendSuccessor(RABlock* successor) noexcept;
//! Similar to `appendSuccessor()`, but does prepend instead append.
//!
//! This function is used to add a natural flow (always first) to the block.
[[nodiscard]]
Error prependSuccessor(RABlock* successor) noexcept;
//! \}
@@ -318,53 +395,76 @@ public:
//! \{
//! Returns instruction RW flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstRWFlags instRWFlags() const noexcept { return _instRWFlags; };
//! Tests whether the given `flag` is present in instruction RW flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstRWFlag(InstRWFlags flag) const noexcept { return Support::test(_instRWFlags, flag); }
//! Adds `flags` to instruction RW flags.
ASMJIT_INLINE_NODEBUG void addInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags |= flags; }
//! Returns the instruction flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedFlags flags() const noexcept { return _flags; }
//! Tests whether the instruction has flag `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Replaces the existing instruction flags with `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(RATiedFlags flags) noexcept { _flags = flags; }
//! Adds instruction `flags` to this RAInst.
ASMJIT_INLINE_NODEBUG void addFlags(RATiedFlags flags) noexcept { _flags |= flags; }
//! Clears instruction `flags` from this RAInst.
ASMJIT_INLINE_NODEBUG void clearFlags(RATiedFlags flags) noexcept { _flags &= ~flags; }
//! Tests whether one operand of this instruction has been patched from Reg to Mem.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRegToMemPatched() const noexcept { return hasFlag(RATiedFlags::kInst_RegToMemPatched); }
//! Tests whether this instruction can be transformed to another instruction if necessary.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isTransformable() const noexcept { return hasFlag(RATiedFlags::kInst_IsTransformable); }
//! Returns the associated block with this RAInst.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; }
//! Returns tied registers (all).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return const_cast<RATiedReg*>(_tiedRegs); }
//! Returns tied registers for a given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return const_cast<RATiedReg*>(_tiedRegs) + _tiedIndex.get(group); }
//! Returns count of all tied registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of tied registers of a given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount[group]; }
//! Returns `RATiedReg` at the given `index`.
[[nodiscard]]
inline RATiedReg* tiedAt(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _tiedTotal);
return tiedRegs() + index;
}
//! Returns `RATiedReg` at the given `index` of the given register `group`.
[[nodiscard]]
inline RATiedReg* tiedOf(RegGroup group, uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _tiedCount.get(group));
return tiedRegs(group) + index;
}
[[nodiscard]]
inline const RATiedReg* tiedRegForWorkReg(RegGroup group, uint32_t workId) const noexcept {
const RATiedReg* array = tiedRegs(group);
size_t count = tiedCount(group);
@@ -387,6 +487,7 @@ public:
//! \name Static Functions
//! \{
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG size_t sizeOf(uint32_t tiedRegCount) noexcept {
return sizeof(RAInst) - sizeof(RATiedReg) + tiedRegCount * sizeof(RATiedReg);
}
@@ -447,33 +548,51 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstRWFlags instRWFlags() const noexcept { return _instRWFlags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstRWFlag(InstRWFlags flag) const noexcept { return Support::test(_instRWFlags, flag); }
ASMJIT_INLINE_NODEBUG void addInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags |= flags; }
ASMJIT_INLINE_NODEBUG void clearInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags &= ~flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedFlags aggregatedFlags() const noexcept { return _aggregatedFlags; }
ASMJIT_INLINE_NODEBUG void addAggregatedFlags(RATiedFlags flags) noexcept { _aggregatedFlags |= flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedFlags forbiddenFlags() const noexcept { return _forbiddenFlags; }
ASMJIT_INLINE_NODEBUG void addForbiddenFlags(RATiedFlags flags) noexcept { _forbiddenFlags |= flags; }
//! Returns the number of tied registers added to the builder.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedRegCount() const noexcept { return uint32_t((size_t)(_cur - _tiedRegs)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* begin() noexcept { return _tiedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* end() noexcept { return _cur; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RATiedReg* begin() const noexcept { return _tiedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RATiedReg* end() const noexcept { return _cur; }
//! Returns `RATiedReg` at the given `index`.
[[nodiscard]]
inline RATiedReg* operator[](size_t index) noexcept {
ASMJIT_ASSERT(index < tiedRegCount());
return &_tiedRegs[index];
}
//! Returns `RATiedReg` at the given `index`. (const).
[[nodiscard]]
inline const RATiedReg* operator[](size_t index) const noexcept {
ASMJIT_ASSERT(index < tiedRegCount());
return &_tiedRegs[index];
@@ -484,6 +603,7 @@ public:
//! \name Utilities
//! \{
[[nodiscard]]
Error add(
RAWorkReg* workReg,
RATiedFlags flags,
@@ -524,20 +644,23 @@ public:
}
else {
if (consecutiveParent != tiedReg->consecutiveParent()) {
if (tiedReg->consecutiveParent() != Globals::kInvalidId)
if (tiedReg->consecutiveParent() != Globals::kInvalidId) {
return DebugUtils::errored(kErrorInvalidState);
}
tiedReg->_consecutiveParent = consecutiveParent;
}
if (useId != BaseReg::kIdBad) {
if (ASMJIT_UNLIKELY(tiedReg->hasUseId()))
if (ASMJIT_UNLIKELY(tiedReg->hasUseId())) {
return DebugUtils::errored(kErrorOverlappedRegs);
}
tiedReg->setUseId(useId);
}
if (outId != BaseReg::kIdBad) {
if (ASMJIT_UNLIKELY(tiedReg->hasOutId()))
if (ASMJIT_UNLIKELY(tiedReg->hasOutId())) {
return DebugUtils::errored(kErrorOverlappedRegs);
}
tiedReg->setOutId(outId);
}
@@ -552,6 +675,7 @@ public:
}
}
[[nodiscard]]
Error addCallArg(RAWorkReg* workReg, uint32_t useId) noexcept {
ASMJIT_ASSERT(useId != BaseReg::kIdBad);
@@ -594,6 +718,7 @@ public:
}
}
[[nodiscard]]
Error addCallRet(RAWorkReg* workReg, uint32_t outId) noexcept {
ASMJIT_ASSERT(outId != BaseReg::kIdBad);
@@ -621,8 +746,9 @@ public:
return kErrorOk;
}
else {
if (tiedReg->hasOutId())
if (tiedReg->hasOutId()) {
return DebugUtils::errored(kErrorOverlappedRegs);
}
tiedReg->addRefCount();
tiedReg->addFlags(flags);
@@ -639,8 +765,13 @@ public:
//! See \ref RAAssignment for more information about register assignments.
class RASharedAssignment {
public:
typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
//! \name Types
//! \{
using PhysToWorkMap = RAAssignment::PhysToWorkMap;
using WorkToPhysMap = RAAssignment::WorkToPhysMap;
//! \}
//! \name Members
//! \{
@@ -659,14 +790,20 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _physToWorkMap == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; }
ASMJIT_INLINE_NODEBUG void addEntryScratchGpRegs(RegMask mask) noexcept { _entryScratchGpRegs |= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveIn() const noexcept { return _liveIn; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
ASMJIT_INLINE_NODEBUG void assignPhysToWorkMap(PhysToWorkMap* physToWorkMap) noexcept { _physToWorkMap = physToWorkMap; }
//! \}
@@ -676,14 +813,22 @@ public:
class BaseRAPass : public FuncPass {
public:
ASMJIT_NONCOPYABLE(BaseRAPass)
typedef FuncPass Base;
using Base = FuncPass;
enum : uint32_t {
kCallArgWeight = 80
};
//! \name Constants
//! \{
typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
static inline constexpr uint32_t kCallArgWeight = 80;
//! \}
//! \name Types
//! \{
using PhysToWorkMap = RAAssignment::PhysToWorkMap;
using WorkToPhysMap = RAAssignment::WorkToPhysMap;
//! \}
//! \name Members
//! \{
@@ -787,37 +932,55 @@ public:
//! \{
//! Returns \ref Logger passed to \ref runOnFunction().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; }
//! Returns either a valid logger if the given `option` is set and logging is enabled, or nullptr.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* getLoggerIf(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option) ? _logger : nullptr; }
//! Returns whether the diagnostic `option` is enabled.
//!
//! \note Returns false if there is no logger (as diagnostics without logging make no sense).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
//! Returns \ref Zone passed to \ref runOnFunction().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Zone* zone() const noexcept { return _allocator.zone(); }
//! Returns \ref ZoneAllocator used by the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<RASharedAssignment>& sharedAssignments() const { return _sharedAssignments; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sharedAssignmentCount() const noexcept { return _sharedAssignments.size(); }
//! Returns the current function node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; }
//! Returns the stop of the current function.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* stop() const noexcept { return _stop; }
//! Returns an extra block used by the current function being processed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* extraBlock() const noexcept { return _extraBlock; }
//! Sets an extra block, see `extraBlock()`.
ASMJIT_INLINE_NODEBUG void setExtraBlock(BaseNode* node) noexcept { _extraBlock = node; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t endPosition() const noexcept { return _instructionCount * 2; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& availableRegs() const noexcept { return _availableRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& clobberedRegs() const noexcept { return _clobberedRegs; }
//! \}
@@ -855,25 +1018,33 @@ public:
//! \{
//! Returns the function's entry block.
[[nodiscard]]
inline RABlock* entryBlock() noexcept {
ASMJIT_ASSERT(!_blocks.empty());
return _blocks[0];
}
//! \overload
[[nodiscard]]
inline const RABlock* entryBlock() const noexcept {
ASMJIT_ASSERT(!_blocks.empty());
return _blocks[0];
}
//! Returns all basic blocks of this function.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlocks& blocks() noexcept { return _blocks; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlocks& blocks() const noexcept { return _blocks; }
//! Returns the count of basic blocks (returns size of `_blocks` array).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t blockCount() const noexcept { return _blocks.size(); }
//! Returns the count of reachable basic blocks (returns size of `_pov` array).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t reachableBlockCount() const noexcept { return _pov.size(); }
//! Tests whether the CFG has dangling blocks - these were created by `newBlock()`, but not added to CFG through
@@ -881,41 +1052,51 @@ public:
//! incomplete.
//!
//! \note This is only used to check if the number of created blocks matches the number of added blocks.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDanglingBlocks() const noexcept { return _createdBlockCount != blockCount(); }
//! Gest a next timestamp to be used to mark CFG blocks.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t nextTimestamp() const noexcept { return ++_lastTimestamp; }
//! Creates a new `RABlock` instance.
//!
//! \note New blocks don't have ID assigned until they are added to the block array by calling `addBlock()`.
[[nodiscard]]
RABlock* newBlock(BaseNode* initialNode = nullptr) noexcept;
//! Tries to find a neighboring LabelNode (without going through code) that is already connected with `RABlock`.
//! If no label is found then a new RABlock is created and assigned to all possible labels in a backward direction.
[[nodiscard]]
RABlock* newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt = nullptr) noexcept;
//! Adds the given `block` to the block list and assign it a unique block id.
[[nodiscard]]
Error addBlock(RABlock* block) noexcept;
[[nodiscard]]
inline Error addExitBlock(RABlock* block) noexcept {
block->addFlags(RABlockFlags::kIsFuncExit);
return _exits.append(allocator(), block);
}
ASMJIT_FORCE_INLINE RAInst* newRAInst(RABlock* block, InstRWFlags instRWFlags, RATiedFlags flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept {
[[nodiscard]]
ASMJIT_INLINE RAInst* newRAInst(RABlock* block, InstRWFlags instRWFlags, RATiedFlags flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept {
void* p = zone()->alloc(RAInst::sizeOf(tiedRegCount));
if (ASMJIT_UNLIKELY(!p))
if (ASMJIT_UNLIKELY(!p)) {
return nullptr;
}
return new(Support::PlacementNew{p}) RAInst(block, instRWFlags, flags, tiedRegCount, clobberedRegs);
}
ASMJIT_FORCE_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept {
[[nodiscard]]
ASMJIT_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept {
uint32_t tiedRegCount = ib.tiedRegCount();
RAInst* raInst = newRAInst(block, ib.instRWFlags(), ib.aggregatedFlags(), tiedRegCount, ib._clobbered);
if (ASMJIT_UNLIKELY(!raInst))
if (ASMJIT_UNLIKELY(!raInst)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
RARegIndex index;
RATiedFlags flagsFilter = ~ib.forbiddenFlags();
@@ -944,8 +1125,9 @@ public:
dst = *tiedReg;
dst._flags &= flagsFilter;
if (!tiedReg->isDuplicate())
if (!tiedReg->isDuplicate()) {
dst._useRegMask &= ~ib._used[group];
}
}
node->setPassData<RAInst>(raInst);
@@ -970,9 +1152,11 @@ public:
//! analysis and register allocation.
//!
//! Use `RACFGBuilderT` template that provides the necessary boilerplate.
[[nodiscard]]
virtual Error buildCFG() noexcept;
//! Called after the CFG is built.
[[nodiscard]]
Error initSharedAssignments(const ZoneVector<uint32_t>& sharedAssignmentsMap) noexcept;
//! \}
@@ -981,6 +1165,7 @@ public:
//! \{
//! Constructs CFG views (only POV at the moment).
[[nodiscard]]
Error buildCFGViews() noexcept;
//! \}
@@ -993,19 +1178,29 @@ public:
// - A node `Z` post-dominates a node `X` if any path from `X` to the end of the graph has to go through `Z`.
//! Constructs a dominator-tree from CFG.
[[nodiscard]]
Error buildCFGDominators() noexcept;
[[nodiscard]]
bool _strictlyDominates(const RABlock* a, const RABlock* b) const noexcept;
[[nodiscard]]
const RABlock* _nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept;
//! Tests whether the basic block `a` dominates `b` - non-strict, returns true when `a == b`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool dominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? true : _strictlyDominates(a, b); }
//! Tests whether the basic block `a` dominates `b` - strict dominance check, returns false when `a == b`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool strictlyDominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? false : _strictlyDominates(a, b); }
//! Returns a nearest common dominator of `a` and `b`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* nearestCommonDominator(RABlock* a, RABlock* b) const noexcept { return const_cast<RABlock*>(_nearestCommonDominator(a, b)); }
//! Returns a nearest common dominator of `a` and `b` (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlock* nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept { return _nearestCommonDominator(a, b); }
//! \}
@@ -1013,15 +1208,18 @@ public:
//! \name CFG - Utilities
//! \{
[[nodiscard]]
Error removeUnreachableCode() noexcept;
//! Returns `node` or some node after that is ideal for beginning a new block. This function is mostly used after
//! a conditional or unconditional jump to select the successor node. In some cases the next node could be a label,
//! which means it could have assigned some block already.
[[nodiscard]]
BaseNode* findSuccessorStartingAt(BaseNode* node) noexcept;
//! Returns `true` of the `node` can flow to `target` without reaching code nor data. It's used to eliminate jumps
//! to labels that are next right to them.
[[nodiscard]]
bool isNextTo(BaseNode* node, BaseNode* target) noexcept;
//! \}
@@ -1030,18 +1228,31 @@ public:
//! \{
//! Returns a native size of the general-purpose register of the target architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return _sp.size(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t availableRegCount(RegGroup group) const noexcept { return _availableRegCount[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _workRegs[workId]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkRegs& workRegs() noexcept { return _workRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkRegs& workRegs(RegGroup group) noexcept { return _workRegsOfGroup[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RAWorkRegs& workRegs() const noexcept { return _workRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RAWorkRegs& workRegs(RegGroup group) const noexcept { return _workRegsOfGroup[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t workRegCount() const noexcept { return _workRegs.size(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t workRegCount(RegGroup group) const noexcept { return _workRegsOfGroup[group].size(); }
inline void _buildPhysIndex() noexcept {
@@ -1049,30 +1260,39 @@ public:
_physRegTotal = uint32_t(_physRegIndex[RegGroup::kMaxVirt]) +
uint32_t(_physRegCount[RegGroup::kMaxVirt]) ;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t physRegIndex(RegGroup group) const noexcept { return _physRegIndex[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t physRegTotal() const noexcept { return _physRegTotal; }
[[nodiscard]]
Error _asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept;
//! Creates `RAWorkReg` data for the given `vReg`. The function does nothing
//! if `vReg` already contains link to `RAWorkReg`. Called by `constructBlocks()`.
[[nodiscard]]
inline Error asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept {
*out = vReg->workReg();
return *out ? kErrorOk : _asWorkReg(vReg, out);
}
ASMJIT_FORCE_INLINE Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept {
[[nodiscard]]
ASMJIT_INLINE Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept {
const ZoneVector<VirtReg*>& virtRegs = cc()->virtRegs();
if (ASMJIT_UNLIKELY(vIndex >= virtRegs.size()))
return DebugUtils::errored(kErrorInvalidVirtId);
return asWorkReg(virtRegs[vIndex], out);
}
[[nodiscard]]
inline RAStackSlot* getOrCreateStackSlot(RAWorkReg* workReg) noexcept {
RAStackSlot* slot = workReg->stackSlot();
if (slot)
if (slot) {
return slot;
}
slot = _stackAllocator.newSlot(_sp.id(), workReg->virtReg()->virtSize(), workReg->virtReg()->alignment(), RAStackSlot::kFlagRegHome);
workReg->_stackSlot = slot;
@@ -1080,17 +1300,22 @@ public:
return slot;
}
[[nodiscard]]
inline BaseMem workRegAsMem(RAWorkReg* workReg) noexcept {
getOrCreateStackSlot(workReg);
(void)getOrCreateStackSlot(workReg);
return BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(_sp.type()) |
OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag),
workReg->virtId(), 0, 0);
}
[[nodiscard]]
WorkToPhysMap* newWorkToPhysMap() noexcept;
[[nodiscard]]
PhysToWorkMap* newPhysToWorkMap() noexcept;
[[nodiscard]]
inline PhysToWorkMap* clonePhysToWorkMap(const PhysToWorkMap* map) noexcept {
size_t size = PhysToWorkMap::sizeOf(_physRegTotal);
return static_cast<PhysToWorkMap*>(zone()->dupAligned(map, size, sizeof(uint32_t)));
@@ -1101,10 +1326,12 @@ public:
//! 1. Calculates GEN/KILL/IN/OUT of each block.
//! 2. Calculates live spans and basic statistics of each work register.
[[nodiscard]]
Error buildLiveness() noexcept;
//! Assigns argIndex to WorkRegs. Must be called after the liveness analysis
//! finishes as it checks whether the argument is live upon entry.
[[nodiscard]]
Error assignArgIndexToWorkRegs() noexcept;
//! \}
@@ -1113,11 +1340,14 @@ public:
//! \{
//! Runs a global register allocator.
[[nodiscard]]
Error runGlobalAllocator() noexcept;
//! Initializes data structures used for global live spans.
[[nodiscard]]
Error initGlobalLiveSpans() noexcept;
[[nodiscard]]
Error binPack(RegGroup group) noexcept;
//! \}
@@ -1126,13 +1356,19 @@ public:
//! \{
//! Runs a local register allocator.
[[nodiscard]]
Error runLocalAllocator() noexcept;
[[nodiscard]]
Error setBlockEntryAssignment(RABlock* block, const RABlock* fromBlock, const RAAssignment& fromAssignment) noexcept;
[[nodiscard]]
Error setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept;
//! Called after the RA assignment has been assigned to a block.
//!
//! This cannot change the assignment, but can examine it.
[[nodiscard]]
Error blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept;
//! \}
@@ -1140,6 +1376,7 @@ public:
//! \name Register Allocation Utilities
//! \{
[[nodiscard]]
Error useTemporaryMem(BaseMem& out, uint32_t size, uint32_t alignment) noexcept;
//! \}
@@ -1147,9 +1384,16 @@ public:
//! \name Function Prolog & Epilog
//! \{
[[nodiscard]]
virtual Error updateStackFrame() noexcept;
[[nodiscard]]
Error _markStackArgsToKeep() noexcept;
[[nodiscard]]
Error _updateStackArgs() noexcept;
[[nodiscard]]
Error insertPrologEpilog() noexcept;
//! \}
@@ -1157,7 +1401,10 @@ public:
//! \name Instruction Rewriter
//! \{
[[nodiscard]]
Error rewrite() noexcept;
[[nodiscard]]
virtual Error _rewrite(BaseNode* first, BaseNode* stop) noexcept;
//! \}
@@ -1167,7 +1414,6 @@ public:
//! \{
Error annotateCode() noexcept;
Error _dumpBlockIds(String& sb, const RABlocks& blocks) noexcept;
Error _dumpBlockLiveness(String& sb, const RABlock* block) noexcept;
Error _dumpLiveSpans(String& sb) noexcept;
@@ -1178,13 +1424,22 @@ public:
//! \name Emit
//! \{
[[nodiscard]]
virtual Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept;
[[nodiscard]]
virtual Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept;
[[nodiscard]]
virtual Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept;
[[nodiscard]]
virtual Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept;
[[nodiscard]]
virtual Error emitJump(const Label& label) noexcept;
[[nodiscard]]
virtual Error emitPreCall(InvokeNode* invokeNode) noexcept;
//! \}
@@ -1194,8 +1449,9 @@ inline ZoneAllocator* RABlock::allocator() const noexcept { return _ra->allocato
inline RegMask RABlock::entryScratchGpRegs() const noexcept {
RegMask regs = _entryScratchGpRegs;
if (hasSharedAssignmentId())
if (hasSharedAssignmentId()) {
regs = _ra->_sharedAssignments[_sharedAssignmentId].entryScratchGpRegs();
}
return regs;
}

View File

@@ -15,12 +15,14 @@ ASMJIT_BEGIN_NAMESPACE
// ========================
RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk))
if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk)) {
return nullptr;
}
RAStackSlot* slot = allocator()->allocT<RAStackSlot>();
if (ASMJIT_UNLIKELY(!slot))
if (ASMJIT_UNLIKELY(!slot)) {
return nullptr;
}
slot->_baseRegId = uint8_t(baseRegId);
slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1));
@@ -72,15 +74,18 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
uint32_t power = Support::min<uint32_t>(Support::ctz(alignment), 6);
uint64_t weight;
if (slot->isRegHome())
if (slot->isRegHome()) {
weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power));
else
}
else {
weight = power;
}
// If overflown, which has less chance of winning a lottery, just use max possible weight. In such case it
// probably doesn't matter at all.
if (weight > 0xFFFFFFFFu)
if (weight > 0xFFFFFFFFu) {
weight = 0xFFFFFFFFu;
}
slot->setWeight(uint32_t(weight));
}
@@ -104,8 +109,9 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
ZoneVector<RAStackGap> gaps[kSizeCount - 1];
for (RAStackSlot* slot : _slots) {
if (slot->isStackArg())
if (slot->isStackArg()) {
continue;
}
uint32_t slotAlignment = slot->alignment();
uint32_t alignedOffset = Support::alignUp(offset, slotAlignment);
@@ -153,8 +159,9 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
uint32_t slotSize = 1u << index;
// Weird case, better to bail...
if (gapEnd - gapOffset < slotSize)
if (gapEnd - gapOffset < slotSize) {
break;
}
ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize)));
gapOffset += slotSize;
@@ -173,9 +180,11 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
}
Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept {
for (RAStackSlot* slot : _slots)
if (!slot->isStackArg())
for (RAStackSlot* slot : _slots) {
if (!slot->isStackArg()) {
slot->_offset += offset;
}
}
return kErrorOk;
}

View File

@@ -57,32 +57,50 @@ struct RAStackSlot {
//! \name Accessors
//! \{
[[nodiscard]]
inline uint32_t baseRegId() const noexcept { return _baseRegId; }
inline void setBaseRegId(uint32_t id) noexcept { _baseRegId = uint8_t(id); }
[[nodiscard]]
inline uint32_t size() const noexcept { return _size; }
[[nodiscard]]
inline uint32_t alignment() const noexcept { return _alignment; }
[[nodiscard]]
inline uint32_t flags() const noexcept { return _flags; }
[[nodiscard]]
inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
inline void addFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags | flags); }
[[nodiscard]]
inline bool isRegHome() const noexcept { return hasFlag(kFlagRegHome); }
[[nodiscard]]
inline bool isStackArg() const noexcept { return hasFlag(kFlagStackArg); }
[[nodiscard]]
inline uint32_t useCount() const noexcept { return _useCount; }
inline void addUseCount(uint32_t n = 1) noexcept { _useCount += n; }
[[nodiscard]]
inline uint32_t weight() const noexcept { return _weight; }
inline void setWeight(uint32_t weight) noexcept { _weight = weight; }
[[nodiscard]]
inline int32_t offset() const noexcept { return _offset; }
inline void setOffset(int32_t offset) noexcept { _offset = offset; }
//! \}
};
typedef ZoneVector<RAStackSlot*> RAStackSlots;
using RAStackSlots = ZoneVector<RAStackSlot*>;
//! Stack allocator.
class RAStackAllocator {
@@ -134,14 +152,25 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return _allocator; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t bytesUsed() const noexcept { return _bytesUsed; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t stackSize() const noexcept { return _stackSize; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAStackSlots& slots() noexcept { return _slots; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RAStackSlots& slots() const noexcept { return _slots; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t slotCount() const noexcept { return _slots.size(); }
//! \}
@@ -149,9 +178,13 @@ public:
//! \name Utilities
//! \{
[[nodiscard]]
RAStackSlot* newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags = 0) noexcept;
[[nodiscard]]
Error calculateStackFrame() noexcept;
[[nodiscard]]
Error adjustSlotOffsets(int32_t offset) noexcept;
//! \}

View File

@@ -21,7 +21,7 @@ constexpr size_t kMaxAllocSize = SIZE_MAX - Globals::kGrowThreshold;
//
// NOTE: The sizes here include null terminators - that way we can have aligned allocations that are power of 2s
// initially.
static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t minimumByteSize) noexcept {
static ASMJIT_INLINE size_t String_growCapacity(size_t byteSize, size_t minimumByteSize) noexcept {
static constexpr size_t kGrowThreshold = Globals::kGrowThreshold;
ASMJIT_ASSERT(minimumByteSize < kMaxAllocSize);
@@ -51,8 +51,9 @@ static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t mi
byteSize = minimumByteSize + remainder;
// Bail to `minimumByteSize` in case of overflow.
if (byteSize < minimumByteSize)
if (byteSize < minimumByteSize) {
return minimumByteSize;
}
}
}
@@ -63,8 +64,9 @@ static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t mi
// ======================
Error String::reset() noexcept {
if (_type == kTypeLarge)
if (_type == kTypeLarge) {
::free(_large.data);
}
_resetInternal();
return kErrorOk;
@@ -104,17 +106,20 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
if (op == ModifyOp::kAssign) {
if (size > curCapacity) {
// Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize))
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize)) {
return nullptr;
}
size_t newCapacity = Support::alignUp<size_t>(size + 1, kMinAllocSize);
char* newData = static_cast<char*>(::malloc(newCapacity));
if (ASMJIT_UNLIKELY(!newData))
if (ASMJIT_UNLIKELY(!newData)) {
return nullptr;
}
if (_type == kTypeLarge)
if (_type == kTypeLarge) {
::free(curData);
}
_large.type = kTypeLarge;
_large.size = size;
@@ -132,8 +137,9 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
}
else {
// Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize - 1))
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize - 1)) {
return nullptr;
}
size_t newSize = size + curSize;
size_t newSizePlusOne = newSize + 1;
@@ -142,17 +148,20 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
size_t newCapacityPlusOne = String_growCapacity(size + 1u, newSizePlusOne);
ASMJIT_ASSERT(newCapacityPlusOne >= newSizePlusOne);
if (ASMJIT_UNLIKELY(newCapacityPlusOne < newSizePlusOne))
if (ASMJIT_UNLIKELY(newCapacityPlusOne < newSizePlusOne)) {
return nullptr;
}
char* newData = static_cast<char*>(::malloc(newCapacityPlusOne));
if (ASMJIT_UNLIKELY(!newData))
if (ASMJIT_UNLIKELY(!newData)) {
return nullptr;
}
memcpy(newData, curData, curSize);
if (_type == kTypeLarge)
if (_type == kTypeLarge) {
::free(curData);
}
_large.type = kTypeLarge;
_large.size = newSize;
@@ -177,8 +186,9 @@ Error String::assign(const char* data, size_t size) noexcept {
char* dst = nullptr;
// Null terminated string without `size` specified.
if (size == SIZE_MAX)
if (size == SIZE_MAX) {
size = data ? strlen(data) : size_t(0);
}
if (isLargeOrExternal()) {
if (size <= _large.capacity) {
@@ -187,15 +197,18 @@ Error String::assign(const char* data, size_t size) noexcept {
}
else {
size_t capacityPlusOne = Support::alignUp(size + 1, 32);
if (ASMJIT_UNLIKELY(capacityPlusOne < size))
if (ASMJIT_UNLIKELY(capacityPlusOne < size)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
dst = static_cast<char*>(::malloc(capacityPlusOne));
if (ASMJIT_UNLIKELY(!dst))
if (ASMJIT_UNLIKELY(!dst)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
if (_type == kTypeLarge)
if (_type == kTypeLarge) {
::free(_large.data);
}
_large.type = kTypeLarge;
_large.data = dst;
@@ -212,8 +225,9 @@ Error String::assign(const char* data, size_t size) noexcept {
}
else {
dst = static_cast<char*>(::malloc(size + 1));
if (ASMJIT_UNLIKELY(!dst))
if (ASMJIT_UNLIKELY(!dst)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
_large.type = kTypeLarge;
_large.data = dst;
@@ -237,15 +251,18 @@ Error String::assign(const char* data, size_t size) noexcept {
// ===================
Error String::_opString(ModifyOp op, const char* str, size_t size) noexcept {
if (size == SIZE_MAX)
if (size == SIZE_MAX) {
size = str ? strlen(str) : size_t(0);
}
if (!size)
if (!size) {
return kErrorOk;
}
char* p = prepare(op, size);
if (!p)
if (!p) {
return DebugUtils::errored(kErrorOutOfMemory);
}
memcpy(p, str, size);
return kErrorOk;
@@ -253,20 +270,23 @@ Error String::_opString(ModifyOp op, const char* str, size_t size) noexcept {
Error String::_opChar(ModifyOp op, char c) noexcept {
char* p = prepare(op, 1);
if (!p)
if (!p) {
return DebugUtils::errored(kErrorOutOfMemory);
}
*p = c;
return kErrorOk;
}
Error String::_opChars(ModifyOp op, char c, size_t n) noexcept {
if (!n)
if (!n) {
return kErrorOk;
}
char* p = prepare(op, n);
if (!p)
if (!p) {
return DebugUtils::errored(kErrorOutOfMemory);
}
memset(p, c, n);
return kErrorOk;
@@ -278,8 +298,9 @@ Error String::padEnd(size_t n, char c) noexcept {
}
Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, StringFormatFlags flags) noexcept {
if (base == 0)
if (base == 0) {
base = 10;
}
char buf[128];
char* p = buf + ASMJIT_ARRAY_SIZE(buf);
@@ -345,8 +366,9 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St
if (Support::test(flags, StringFormatFlags::kAlternate)) {
if (base == 8) {
if (orig != 0)
if (orig != 0) {
*--p = '0';
}
}
if (base == 16) {
*--p = 'x';
@@ -357,16 +379,20 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St
// String Width
// ------------
if (sign != 0)
if (sign != 0) {
*--p = sign;
}
if (width > 256)
if (width > 256) {
width = 256;
}
if (width <= numberSize)
if (width <= numberSize) {
width = 0;
else
}
else {
width -= numberSize;
}
// Finalize
// --------
@@ -374,8 +400,9 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St
size_t prefixSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberSize;
char* data = prepare(op, prefixSize + width + numberSize);
if (!data)
if (!data) {
return DebugUtils::errored(kErrorOutOfMemory);
}
memcpy(data, p, prefixSize);
data += prefixSize;
@@ -391,23 +418,29 @@ Error String::_opHex(ModifyOp op, const void* data, size_t size, char separator)
char* dst;
const uint8_t* src = static_cast<const uint8_t*>(data);
if (!size)
if (!size) {
return kErrorOk;
}
if (separator) {
if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 3))
if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 3)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
dst = prepare(op, size * 3 - 1);
if (ASMJIT_UNLIKELY(!dst))
if (ASMJIT_UNLIKELY(!dst)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
size_t i = 0;
for (;;) {
dst[0] = String_baseN[(src[0] >> 4) & 0xF];
dst[1] = String_baseN[(src[0] ) & 0xF];
if (++i == size)
if (++i == size) {
break;
}
// This makes sure that the separator is only put between two hexadecimal bytes.
dst[2] = separator;
dst += 3;
@@ -415,12 +448,14 @@ Error String::_opHex(ModifyOp op, const void* data, size_t size, char separator)
}
}
else {
if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 2))
if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 2)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
dst = prepare(op, size * 2);
if (ASMJIT_UNLIKELY(!dst))
if (ASMJIT_UNLIKELY(!dst)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
for (size_t i = 0; i < size; i++, dst += 2, src++) {
dst[0] = String_baseN[(src[0] >> 4) & 0xF];
@@ -466,16 +501,19 @@ Error String::_opVFormat(ModifyOp op, const char* fmt, va_list ap) noexcept {
fmtResult = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
outputSize = size_t(fmtResult);
if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf)))
if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf))) {
return _opString(op, buf, outputSize);
}
}
if (ASMJIT_UNLIKELY(fmtResult < 0))
if (ASMJIT_UNLIKELY(fmtResult < 0)) {
return DebugUtils::errored(kErrorInvalidState);
}
char* p = prepare(op, outputSize);
if (ASMJIT_UNLIKELY(!p))
if (ASMJIT_UNLIKELY(!p)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
fmtResult = vsnprintf(p, outputSize + 1, fmt, apCopy);
ASMJIT_ASSERT(size_t(fmtResult) == outputSize);
@@ -509,14 +547,17 @@ bool String::equals(const char* other, size_t size) const noexcept {
if (bSize == SIZE_MAX) {
size_t i;
for (i = 0; i < aSize; i++)
if (aData[i] != bData[i] || bData[i] == 0)
for (i = 0; i < aSize; i++) {
if (aData[i] != bData[i] || bData[i] == 0) {
return false;
}
}
return bData[i] == 0;
}
else {
if (aSize != bSize)
if (aSize != bSize) {
return false;
}
return ::memcmp(aData, bData, aSize) == 0;
}
}

View File

@@ -36,9 +36,7 @@ union FixedString {
//! \{
// This cannot be constexpr as GCC 4.8 refuses constexpr members of unions.
enum : uint32_t {
kNumUInt32Words = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t))
};
static inline constexpr uint32_t kNumUInt32Words = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t));
//! \}
@@ -53,6 +51,7 @@ union FixedString {
//! \name Utilities
//! \{
[[nodiscard]]
inline bool equals(const char* other) const noexcept { return strcmp(str, other) == 0; }
//! \}
@@ -85,18 +84,13 @@ public:
};
//! \cond INTERNAL
enum : uint32_t {
kLayoutSize = 32,
kSSOCapacity = kLayoutSize - 2
};
static inline constexpr uint32_t kLayoutSize = 32;
static inline constexpr uint32_t kSSOCapacity = kLayoutSize - 2;
//! String type.
enum Type : uint8_t {
//! Large string (owned by String).
kTypeLarge = 0x1Fu,
//! External string (zone allocated or not owned by String).
kTypeExternal = 0x20u
};
//! Large string (owned by String).
static inline constexpr uint8_t kTypeLarge = 0x1Fu;
//! External string (zone allocated or not owned by String).
static inline constexpr uint8_t kTypeExternal = 0x20u;
union Raw {
uint8_t u8[kLayoutSize];
@@ -156,10 +150,16 @@ public:
return *this;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const char* other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const char* other) const noexcept { return !equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const String& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const String& other) const noexcept { return !equals(other); }
//! \}
@@ -167,25 +167,42 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return _type == kTypeExternal; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLargeOrExternal() const noexcept { return _type >= kTypeLarge; }
//! Tests whether the string is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return size() == 0; }
//! Returns the size of the string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return isLargeOrExternal() ? size_t(_large.size) : size_t(_type); }
//! Returns the capacity of the string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return isLargeOrExternal() ? _large.capacity : size_t(kSSOCapacity); }
//! Returns the data of the string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG char* data() noexcept { return isLargeOrExternal() ? _large.data : _small.data; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return isLargeOrExternal() ? _large.data : _small.data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG char* start() noexcept { return data(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* start() const noexcept { return data(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG char* end() noexcept { return data() + size(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* end() const noexcept { return data() + size(); }
//! \}
@@ -201,6 +218,7 @@ public:
//! Clears the content of the string.
ASMJIT_API Error clear() noexcept;
[[nodiscard]]
ASMJIT_API char* prepare(ModifyOp op, size_t size) noexcept;
ASMJIT_API Error _opString(ModifyOp op, const char* str, size_t size = SIZE_MAX) noexcept;
@@ -310,7 +328,10 @@ public:
//! Truncate the string length into `newSize`.
ASMJIT_API Error truncate(size_t newSize) noexcept;
[[nodiscard]]
ASMJIT_API bool equals(const char* other, size_t size = SIZE_MAX) const noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool equals(const String& other) const noexcept { return equals(other.data(), other.size()); }
//! \}
@@ -323,15 +344,18 @@ public:
//! \note This is always called internally after an external buffer was released as it zeroes all bytes
//! used by String's embedded storage.
inline void _resetInternal() noexcept {
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++)
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++) {
_raw.uptr[i] = 0;
}
}
inline void _setSize(size_t newSize) noexcept {
if (isLargeOrExternal())
if (isLargeOrExternal()) {
_large.size = newSize;
else
}
else {
_small.type = uint8_t(newSize);
}
}
//! \}

File diff suppressed because it is too large Load Diff

View File

@@ -40,12 +40,18 @@ public:
//! \{
//! Returns target's environment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; }
//! Returns the target architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _environment.arch(); }
//! Returns the target sub-architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _environment.subArch(); }
[[nodiscard]]
//! Returns target CPU features.
ASMJIT_INLINE_NODEBUG const CpuFeatures& cpuFeatures() const noexcept { return _cpuFeatures; }

View File

@@ -13,50 +13,46 @@ namespace TypeUtils {
template<uint32_t Index>
struct ScalarOfTypeId {
enum : uint32_t {
kTypeId = uint32_t(
isScalar(TypeId(Index)) ? TypeId(Index) :
isMask8 (TypeId(Index)) ? TypeId::kUInt8 :
isMask16(TypeId(Index)) ? TypeId::kUInt16 :
isMask32(TypeId(Index)) ? TypeId::kUInt32 :
isMask64(TypeId(Index)) ? TypeId::kUInt64 :
isMmx32 (TypeId(Index)) ? TypeId::kUInt32 :
isMmx64 (TypeId(Index)) ? TypeId::kUInt64 :
isVec32 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec32Start ) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec64 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec64Start ) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec128(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec128Start) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec256(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec256Start) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec512(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec512Start) + uint32_t(TypeId::kInt8)) & 0xFF) : TypeId::kVoid)
};
static inline constexpr uint32_t kTypeId = uint32_t(
isScalar(TypeId(Index)) ? TypeId(Index) :
isMask8 (TypeId(Index)) ? TypeId::kUInt8 :
isMask16(TypeId(Index)) ? TypeId::kUInt16 :
isMask32(TypeId(Index)) ? TypeId::kUInt32 :
isMask64(TypeId(Index)) ? TypeId::kUInt64 :
isMmx32 (TypeId(Index)) ? TypeId::kUInt32 :
isMmx64 (TypeId(Index)) ? TypeId::kUInt64 :
isVec32 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec32Start ) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec64 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec64Start ) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec128(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec128Start) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec256(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec256Start) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec512(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec512Start) + uint32_t(TypeId::kInt8)) & 0xFF) : TypeId::kVoid);
};
template<uint32_t Index>
struct SizeOfTypeId {
enum : uint32_t {
kTypeSize =
isInt8 (TypeId(Index)) ? 1 :
isUInt8 (TypeId(Index)) ? 1 :
isInt16 (TypeId(Index)) ? 2 :
isUInt16 (TypeId(Index)) ? 2 :
isInt32 (TypeId(Index)) ? 4 :
isUInt32 (TypeId(Index)) ? 4 :
isInt64 (TypeId(Index)) ? 8 :
isUInt64 (TypeId(Index)) ? 8 :
isFloat32(TypeId(Index)) ? 4 :
isFloat64(TypeId(Index)) ? 8 :
isFloat80(TypeId(Index)) ? 10 :
isMask8 (TypeId(Index)) ? 1 :
isMask16 (TypeId(Index)) ? 2 :
isMask32 (TypeId(Index)) ? 4 :
isMask64 (TypeId(Index)) ? 8 :
isMmx32 (TypeId(Index)) ? 4 :
isMmx64 (TypeId(Index)) ? 8 :
isVec32 (TypeId(Index)) ? 4 :
isVec64 (TypeId(Index)) ? 8 :
isVec128 (TypeId(Index)) ? 16 :
isVec256 (TypeId(Index)) ? 32 :
isVec512 (TypeId(Index)) ? 64 : 0
};
static inline constexpr uint32_t kTypeSize =
isInt8 (TypeId(Index)) ? 1 :
isUInt8 (TypeId(Index)) ? 1 :
isInt16 (TypeId(Index)) ? 2 :
isUInt16 (TypeId(Index)) ? 2 :
isInt32 (TypeId(Index)) ? 4 :
isUInt32 (TypeId(Index)) ? 4 :
isInt64 (TypeId(Index)) ? 8 :
isUInt64 (TypeId(Index)) ? 8 :
isFloat32(TypeId(Index)) ? 4 :
isFloat64(TypeId(Index)) ? 8 :
isFloat80(TypeId(Index)) ? 10 :
isMask8 (TypeId(Index)) ? 1 :
isMask16 (TypeId(Index)) ? 2 :
isMask32 (TypeId(Index)) ? 4 :
isMask64 (TypeId(Index)) ? 8 :
isMmx32 (TypeId(Index)) ? 4 :
isMmx64 (TypeId(Index)) ? 8 :
isVec32 (TypeId(Index)) ? 4 :
isVec64 (TypeId(Index)) ? 8 :
isVec128 (TypeId(Index)) ? 16 :
isVec256 (TypeId(Index)) ? 32 :
isVec512 (TypeId(Index)) ? 64 : 0;
};
const TypeData _typeData = {

View File

@@ -164,98 +164,164 @@ struct TypeData {
ASMJIT_VARAPI const TypeData _typeData;
//! Returns the scalar type of `typeId`.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG TypeId scalarOf(TypeId typeId) noexcept { return _typeData.scalarOf[uint32_t(typeId)]; }
//! Returns the size [in bytes] of `typeId`.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG uint32_t sizeOf(TypeId typeId) noexcept { return _typeData.sizeOf[uint32_t(typeId)]; }
//! Tests whether a given type `typeId` is between `a` and `b`.
static ASMJIT_INLINE_NODEBUG constexpr bool isBetween(TypeId typeId, TypeId a, TypeId b) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isBetween(TypeId typeId, TypeId a, TypeId b) noexcept {
return Support::isBetween(uint32_t(typeId), uint32_t(a), uint32_t(b));
}
//! Tests whether a given type `typeId` is \ref TypeId::kVoid.
static ASMJIT_INLINE_NODEBUG constexpr bool isVoid(TypeId typeId) noexcept { return typeId == TypeId::kVoid; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVoid(TypeId typeId) noexcept { return typeId == TypeId::kVoid; }
//! Tests whether a given type `typeId` is a valid non-void type.
static ASMJIT_INLINE_NODEBUG constexpr bool isValid(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kVec512End); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isValid(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kVec512End); }
//! Tests whether a given type `typeId` is scalar (has no vector part).
static ASMJIT_INLINE_NODEBUG constexpr bool isScalar(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kBaseStart, TypeId::_kBaseEnd); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isScalar(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kBaseStart, TypeId::_kBaseEnd); }
//! Tests whether a given type `typeId` is abstract, which means that its size depends on register size.
static ASMJIT_INLINE_NODEBUG constexpr bool isAbstract(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kIntPtr, TypeId::kUIntPtr); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isAbstract(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kIntPtr, TypeId::kUIntPtr); }
//! Tests whether a given type is a scalar integer (signed or unsigned) of any size.
static ASMJIT_INLINE_NODEBUG constexpr bool isInt(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kIntEnd); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kIntEnd); }
//! Tests whether a given type is a scalar 8-bit integer (signed).
static ASMJIT_INLINE_NODEBUG constexpr bool isInt8(TypeId typeId) noexcept { return typeId == TypeId::kInt8; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt8(TypeId typeId) noexcept { return typeId == TypeId::kInt8; }
//! Tests whether a given type is a scalar 8-bit integer (unsigned).
static ASMJIT_INLINE_NODEBUG constexpr bool isUInt8(TypeId typeId) noexcept { return typeId == TypeId::kUInt8; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isUInt8(TypeId typeId) noexcept { return typeId == TypeId::kUInt8; }
//! Tests whether a given type is a scalar 16-bit integer (signed).
static ASMJIT_INLINE_NODEBUG constexpr bool isInt16(TypeId typeId) noexcept { return typeId == TypeId::kInt16; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt16(TypeId typeId) noexcept { return typeId == TypeId::kInt16; }
//! Tests whether a given type is a scalar 16-bit integer (unsigned).
static ASMJIT_INLINE_NODEBUG constexpr bool isUInt16(TypeId typeId) noexcept { return typeId == TypeId::kUInt16; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isUInt16(TypeId typeId) noexcept { return typeId == TypeId::kUInt16; }
//! Tests whether a given type is a scalar 32-bit integer (signed).
static ASMJIT_INLINE_NODEBUG constexpr bool isInt32(TypeId typeId) noexcept { return typeId == TypeId::kInt32; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt32(TypeId typeId) noexcept { return typeId == TypeId::kInt32; }
//! Tests whether a given type is a scalar 32-bit integer (unsigned).
static ASMJIT_INLINE_NODEBUG constexpr bool isUInt32(TypeId typeId) noexcept { return typeId == TypeId::kUInt32; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isUInt32(TypeId typeId) noexcept { return typeId == TypeId::kUInt32; }
//! Tests whether a given type is a scalar 64-bit integer (signed).
static ASMJIT_INLINE_NODEBUG constexpr bool isInt64(TypeId typeId) noexcept { return typeId == TypeId::kInt64; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt64(TypeId typeId) noexcept { return typeId == TypeId::kInt64; }
//! Tests whether a given type is a scalar 64-bit integer (unsigned).
static ASMJIT_INLINE_NODEBUG constexpr bool isUInt64(TypeId typeId) noexcept { return typeId == TypeId::kUInt64; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isUInt64(TypeId typeId) noexcept { return typeId == TypeId::kUInt64; }
//! Tests whether a given type is an 8-bit general purpose register representing either signed or unsigned 8-bit integer.
static ASMJIT_INLINE_NODEBUG constexpr bool isGp8(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt8, TypeId::kUInt8); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isGp8(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt8, TypeId::kUInt8); }
//! Tests whether a given type is a 16-bit general purpose register representing either signed or unsigned 16-bit integer
static ASMJIT_INLINE_NODEBUG constexpr bool isGp16(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt16, TypeId::kUInt16); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isGp16(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt16, TypeId::kUInt16); }
//! Tests whether a given type is a 32-bit general purpose register representing either signed or unsigned 32-bit integer
static ASMJIT_INLINE_NODEBUG constexpr bool isGp32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt32, TypeId::kUInt32); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isGp32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt32, TypeId::kUInt32); }
//! Tests whether a given type is a 64-bit general purpose register representing either signed or unsigned 64-bit integer
static ASMJIT_INLINE_NODEBUG constexpr bool isGp64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt64, TypeId::kUInt64); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isGp64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt64, TypeId::kUInt64); }
//! Tests whether a given type is a scalar floating point of any size.
static ASMJIT_INLINE_NODEBUG constexpr bool isFloat(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kFloatStart, TypeId::_kFloatEnd); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isFloat(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kFloatStart, TypeId::_kFloatEnd); }
//! Tests whether a given type is a scalar 32-bit float.
static ASMJIT_INLINE_NODEBUG constexpr bool isFloat32(TypeId typeId) noexcept { return typeId == TypeId::kFloat32; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isFloat32(TypeId typeId) noexcept { return typeId == TypeId::kFloat32; }
//! Tests whether a given type is a scalar 64-bit float.
static ASMJIT_INLINE_NODEBUG constexpr bool isFloat64(TypeId typeId) noexcept { return typeId == TypeId::kFloat64; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isFloat64(TypeId typeId) noexcept { return typeId == TypeId::kFloat64; }
//! Tests whether a given type is a scalar 80-bit float.
static ASMJIT_INLINE_NODEBUG constexpr bool isFloat80(TypeId typeId) noexcept { return typeId == TypeId::kFloat80; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isFloat80(TypeId typeId) noexcept { return typeId == TypeId::kFloat80; }
//! Tests whether a given type is a mask register of any size.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMaskStart, TypeId::_kMaskEnd); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMaskStart, TypeId::_kMaskEnd); }
//! Tests whether a given type is an 8-bit mask register.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask8(TypeId typeId) noexcept { return typeId == TypeId::kMask8; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask8(TypeId typeId) noexcept { return typeId == TypeId::kMask8; }
//! Tests whether a given type is an 16-bit mask register.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask16(TypeId typeId) noexcept { return typeId == TypeId::kMask16; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask16(TypeId typeId) noexcept { return typeId == TypeId::kMask16; }
//! Tests whether a given type is an 32-bit mask register.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask32(TypeId typeId) noexcept { return typeId == TypeId::kMask32; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask32(TypeId typeId) noexcept { return typeId == TypeId::kMask32; }
//! Tests whether a given type is an 64-bit mask register.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask64(TypeId typeId) noexcept { return typeId == TypeId::kMask64; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask64(TypeId typeId) noexcept { return typeId == TypeId::kMask64; }
//! Tests whether a given type is an MMX register.
//!
//! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness.
static ASMJIT_INLINE_NODEBUG constexpr bool isMmx(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMmxStart, TypeId::_kMmxEnd); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMmx(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMmxStart, TypeId::_kMmxEnd); }
//! Tests whether a given type is an MMX register, which only uses the low 32 bits of data (only specific cases).
//!
//! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness.
static ASMJIT_INLINE_NODEBUG constexpr bool isMmx32(TypeId typeId) noexcept { return typeId == TypeId::kMmx32; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMmx32(TypeId typeId) noexcept { return typeId == TypeId::kMmx32; }
//! Tests whether a given type is an MMX register, which uses 64 bits of data (default).
//!
//! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness.
static ASMJIT_INLINE_NODEBUG constexpr bool isMmx64(TypeId typeId) noexcept { return typeId == TypeId::kMmx64; }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMmx64(TypeId typeId) noexcept { return typeId == TypeId::kMmx64; }
//! Tests whether a given type is a vector register of any size.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec512End); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec512End); }
//! Tests whether a given type is a 32-bit or 32-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec32End); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec32End); }
//! Tests whether a given type is a 64-bit or 64-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec64Start, TypeId::_kVec64End); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec64Start, TypeId::_kVec64End); }
//! Tests whether a given type is a 128-bit or 128-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec128(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec128Start, TypeId::_kVec128End); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec128(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec128Start, TypeId::_kVec128End); }
//! Tests whether a given type is a 256-bit or 256-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec256(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec256Start, TypeId::_kVec256End); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec256(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec256Start, TypeId::_kVec256End); }
//! Tests whether a given type is a 512-bit or 512-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec512(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec512Start, TypeId::_kVec512End); }
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec512(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec512Start, TypeId::_kVec512End); }
//! \cond
enum TypeCategory : uint32_t {
@@ -271,38 +337,32 @@ struct TypeIdOfT_ByCategory {}; // Fails if not specialized.
template<typename T>
struct TypeIdOfT_ByCategory<T, kTypeCategoryIntegral> {
enum : uint32_t {
kTypeId = uint32_t(
(sizeof(T) == 1 && std::is_signed<T>::value) ? TypeId::kInt8 :
(sizeof(T) == 1 && !std::is_signed<T>::value) ? TypeId::kUInt8 :
(sizeof(T) == 2 && std::is_signed<T>::value) ? TypeId::kInt16 :
(sizeof(T) == 2 && !std::is_signed<T>::value) ? TypeId::kUInt16 :
(sizeof(T) == 4 && std::is_signed<T>::value) ? TypeId::kInt32 :
(sizeof(T) == 4 && !std::is_signed<T>::value) ? TypeId::kUInt32 :
(sizeof(T) == 8 && std::is_signed<T>::value) ? TypeId::kInt64 :
(sizeof(T) == 8 && !std::is_signed<T>::value) ? TypeId::kUInt64 : TypeId::kVoid)
};
static inline constexpr uint32_t kTypeId = uint32_t(
(sizeof(T) == 1 && std::is_signed_v<T>) ? TypeId::kInt8 :
(sizeof(T) == 1 && !std::is_signed_v<T>) ? TypeId::kUInt8 :
(sizeof(T) == 2 && std::is_signed_v<T>) ? TypeId::kInt16 :
(sizeof(T) == 2 && !std::is_signed_v<T>) ? TypeId::kUInt16 :
(sizeof(T) == 4 && std::is_signed_v<T>) ? TypeId::kInt32 :
(sizeof(T) == 4 && !std::is_signed_v<T>) ? TypeId::kUInt32 :
(sizeof(T) == 8 && std::is_signed_v<T>) ? TypeId::kInt64 :
(sizeof(T) == 8 && !std::is_signed_v<T>) ? TypeId::kUInt64 : TypeId::kVoid);
};
template<typename T>
struct TypeIdOfT_ByCategory<T, kTypeCategoryFloatingPoint> {
enum : uint32_t {
kTypeId = uint32_t(
(sizeof(T) == 4 ) ? TypeId::kFloat32 :
(sizeof(T) == 8 ) ? TypeId::kFloat64 :
(sizeof(T) >= 10) ? TypeId::kFloat80 : TypeId::kVoid)
};
static inline constexpr uint32_t kTypeId = uint32_t(
(sizeof(T) == 4 ) ? TypeId::kFloat32 :
(sizeof(T) == 8 ) ? TypeId::kFloat64 :
(sizeof(T) >= 10) ? TypeId::kFloat80 : TypeId::kVoid);
};
template<typename T>
struct TypeIdOfT_ByCategory<T, kTypeCategoryEnum>
: public TypeIdOfT_ByCategory<typename std::underlying_type<T>::type, kTypeCategoryIntegral> {};
: public TypeIdOfT_ByCategory<std::underlying_type_t<T>, kTypeCategoryIntegral> {};
template<typename T>
struct TypeIdOfT_ByCategory<T, kTypeCategoryFunction> {
enum : uint32_t {
kTypeId = uint32_t(TypeId::kUIntPtr)
};
static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr);
};
//! \endcond
@@ -311,37 +371,33 @@ struct TypeIdOfT_ByCategory<T, kTypeCategoryFunction> {
template<typename T>
struct TypeIdOfT {
//! TypeId of C++ type `T`.
static constexpr TypeId kTypeId = _TypeIdDeducedAtCompileTime_;
static inline constexpr TypeId kTypeId = _TypeIdDeducedAtCompileTime_;
};
#else
template<typename T>
struct TypeIdOfT
: public TypeIdOfT_ByCategory<T,
std::is_enum<T>::value ? kTypeCategoryEnum :
std::is_integral<T>::value ? kTypeCategoryIntegral :
std::is_floating_point<T>::value ? kTypeCategoryFloatingPoint :
std::is_function<T>::value ? kTypeCategoryFunction : kTypeCategoryUnknown> {};
std::is_enum_v<T> ? kTypeCategoryEnum :
std::is_integral_v<T> ? kTypeCategoryIntegral :
std::is_floating_point_v<T> ? kTypeCategoryFloatingPoint :
std::is_function_v<T> ? kTypeCategoryFunction : kTypeCategoryUnknown> {};
#endif
//! \cond
template<typename T>
struct TypeIdOfT<T*> {
enum : uint32_t {
kTypeId = uint32_t(TypeId::kUIntPtr)
};
static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr);
};
template<typename T>
struct TypeIdOfT<T&> {
enum : uint32_t {
kTypeId = uint32_t(TypeId::kUIntPtr)
};
static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr);
};
//! \endcond
//! Returns a corresponding \ref TypeId of `T` type.
template<typename T>
static ASMJIT_INLINE_NODEBUG constexpr TypeId typeIdOfT() noexcept { return TypeId(TypeIdOfT<T>::kTypeId); }
static ASMJIT_INLINE_CONSTEXPR TypeId typeIdOfT() noexcept { return TypeId(TypeIdOfT<T>::kTypeId); }
//! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId into a type that matches `registerSize`
//! (general-purpose register size). If you find such TypeId it's then only about adding the offset to it.
@@ -360,18 +416,21 @@ static ASMJIT_INLINE_NODEBUG constexpr TypeId typeIdOfT() noexcept { return Type
//! // The same, but by using TypeUtils::deabstract() function.
//! typeId = TypeUtils::deabstract(typeId, deabstractDelta);
//! ```
static ASMJIT_INLINE_NODEBUG constexpr uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept {
return registerSize >= 8 ? uint32_t(TypeId::kInt64) - uint32_t(TypeId::kIntPtr)
: uint32_t(TypeId::kInt32) - uint32_t(TypeId::kIntPtr);
}
//! Deabstracts a given `typeId` into a native type by using `deabstractDelta`, which was previously
//! calculated by calling \ref deabstractDeltaOfSize() with a target native register size.
static ASMJIT_INLINE_NODEBUG constexpr TypeId deabstract(TypeId typeId, uint32_t deabstractDelta) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR TypeId deabstract(TypeId typeId, uint32_t deabstractDelta) noexcept {
return isAbstract(typeId) ? TypeId(uint32_t(typeId) + deabstractDelta) : typeId;
}
static ASMJIT_INLINE_NODEBUG constexpr TypeId scalarToVector(TypeId scalarTypeId, TypeId vecStartId) noexcept {
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR TypeId scalarToVector(TypeId scalarTypeId, TypeId vecStartId) noexcept {
return TypeId(uint32_t(vecStartId) + uint32_t(scalarTypeId) - uint32_t(TypeId::kInt8));
}
@@ -410,14 +469,12 @@ struct Float64 {};
} // {Type}
//! \cond
#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \
namespace TypeUtils { \
template<> \
struct TypeIdOfT<T> { \
enum : uint32_t { \
kTypeId = uint32_t(TYPE_ID) \
}; \
}; \
#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \
namespace TypeUtils { \
template<> \
struct TypeIdOfT<T> { \
static inline constexpr uint32_t kTypeId = uint32_t(TYPE_ID); \
}; \
}
ASMJIT_DEFINE_TYPE_ID(void , TypeId::kVoid);

View File

@@ -146,7 +146,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(VirtMem)
// Virtual Memory Utilities
// ========================
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static const constexpr MemoryFlags dualMappingFilter[2] = {
MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite,
MemoryFlags::kAccessExecute | MemoryFlags::kMMapMaxAccessExecute
@@ -162,8 +162,9 @@ struct ScopedHandle {
: value(nullptr) {}
inline ~ScopedHandle() noexcept {
if (value != nullptr)
if (value != nullptr) {
::CloseHandle(value);
}
}
HANDLE value;
@@ -191,12 +192,15 @@ static DWORD protectFlagsFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
DWORD protectFlags;
// READ|WRITE|EXECUTE.
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute))
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) {
protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
else if (Support::test(memoryFlags, MemoryFlags::kAccessRW))
}
else if (Support::test(memoryFlags, MemoryFlags::kAccessRW)) {
protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY;
else
}
else {
protectFlags = PAGE_NOACCESS;
}
// Any other flags to consider?
return protectFlags;
@@ -204,24 +208,28 @@ static DWORD protectFlagsFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
static DWORD desiredAccessFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
DWORD access = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ;
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute))
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) {
access |= FILE_MAP_EXECUTE;
}
return access;
}
static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept {
HardenedRuntimeFlags flags = HardenedRuntimeFlags::kNone;
if (hasDualMappingSupport())
if (hasDualMappingSupport()) {
flags |= HardenedRuntimeFlags::kDualMapping;
}
return flags;
}
Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
*p = nullptr;
if (size == 0)
if (size == 0) {
return DebugUtils::errored(kErrorInvalidArgument);
}
DWORD allocationType = MEM_COMMIT | MEM_RESERVE;
DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags);
@@ -230,18 +238,21 @@ Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
size_t lpSize = largePageSize();
// Does it make sense to call VirtualAlloc() if we failed to query large page size?
if (lpSize == 0)
if (lpSize == 0) {
return DebugUtils::errored(kErrorFeatureNotEnabled);
}
if (!Support::isAligned(size, lpSize))
if (!Support::isAligned(size, lpSize)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
allocationType |= MEM_LARGE_PAGES;
}
void* result = ::VirtualAlloc(nullptr, size, allocationType, protectFlags);
if (!result)
if (!result) {
return DebugUtils::errored(kErrorOutOfMemory);
}
*p = result;
return kErrorOk;
@@ -249,10 +260,14 @@ Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
Error release(void* p, size_t size) noexcept {
DebugUtils::unused(size);
// NOTE: If the `dwFreeType` parameter is MEM_RELEASE, `size` parameter must be zero.
constexpr DWORD dwFreeType = MEM_RELEASE;
if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, dwFreeType)))
if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, dwFreeType))) {
return DebugUtils::errored(kErrorInvalidArgument);
}
return kErrorOk;
}
@@ -260,8 +275,9 @@ Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept {
DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags);
DWORD oldFlags;
if (::VirtualProtect(p, size, protectFlags, &oldFlags))
if (::VirtualProtect(p, size, protectFlags, &oldFlags)) {
return kErrorOk;
}
return DebugUtils::errored(kErrorInvalidArgument);
}
@@ -270,8 +286,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
dm->rx = nullptr;
dm->rw = nullptr;
if (size == 0)
if (size == 0) {
return DebugUtils::errored(kErrorInvalidArgument);
}
ScopedHandle handle;
handle.value = ::CreateFileMappingW(
@@ -282,8 +299,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
(DWORD)(size & 0xFFFFFFFFu),
nullptr);
if (ASMJIT_UNLIKELY(!handle.value))
if (ASMJIT_UNLIKELY(!handle.value)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
void* ptr[2];
for (uint32_t i = 0; i < 2; i++) {
@@ -292,8 +310,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
ptr[i] = ::MapViewOfFile(handle.value, desiredAccess, 0, 0, size);
if (ptr[i] == nullptr) {
if (i == 1u)
if (i == 1u) {
::UnmapViewOfFile(ptr[0]);
}
return DebugUtils::errored(kErrorOutOfMemory);
}
}
@@ -307,14 +326,17 @@ Error releaseDualMapping(DualMapping* dm, size_t size) noexcept {
DebugUtils::unused(size);
bool failed = false;
if (!::UnmapViewOfFile(dm->rx))
if (!::UnmapViewOfFile(dm->rx)) {
failed = true;
}
if (dm->rx != dm->rw && !UnmapViewOfFile(dm->rw))
if (dm->rx != dm->rw && !UnmapViewOfFile(dm->rw)) {
failed = true;
}
if (failed)
if (failed) {
return DebugUtils::errored(kErrorInvalidArgument);
}
dm->rx = nullptr;
dm->rw = nullptr;
@@ -342,7 +364,7 @@ struct KernelVersion {
inline bool ge(long major, long minor) const noexcept { return ver[0] > major || (ver[0] == major && ver[1] >= minor); }
};
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static KernelVersion getKernelVersion() noexcept {
KernelVersion out {};
struct utsname buf {};
@@ -368,7 +390,7 @@ static KernelVersion getKernelVersion() noexcept {
#endif // getKernelVersion
// Translates libc errors specific to VirtualMemory mapping to `asmjit::Error`.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static Error asmjitErrorFromErrno(int e) noexcept {
switch (e) {
case EACCES:
@@ -391,20 +413,20 @@ static Error asmjitErrorFromErrno(int e) noexcept {
}
}
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static MemoryFlags maxAccessFlagsToRegularAccessFlags(MemoryFlags memoryFlags) noexcept {
static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value;
return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kMMapMaxAccessRWX) >> kMaxProtShift);
}
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static MemoryFlags regularAccessFlagsToMaxAccessFlags(MemoryFlags memoryFlags) noexcept {
static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value;
return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kAccessRWX) << kMaxProtShift);
}
// Returns `mmap()` protection flags from \ref MemoryFlags.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
int protection = 0;
if (Support::test(memoryFlags, MemoryFlags::kAccessRead)) protection |= PROT_READ;
@@ -418,7 +440,7 @@ static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
// Uses:
// - `PROT_MPROTECT()` on NetBSD.
// - `PROT_MAX()` when available on other BSDs.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
MemoryFlags acc = maxAccessFlagsToRegularAccessFlags(memoryFlags);
if (acc != MemoryFlags::kNone) {
@@ -450,8 +472,10 @@ static size_t detectLargePageSize() noexcept {
return (getpagesizes(pageSize.data(), 2) < 2) ? 0 : uint32_t(pageSize[1]);
#elif defined(__linux__)
StringTmp<128> storage;
if (OSUtils::readFile("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", storage, 16) != kErrorOk || storage.empty())
if (OSUtils::readFile("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", storage, 16) != kErrorOk || storage.empty()) {
return 0u;
}
// The first value should be the size of the page (hpage_pmd_size).
size_t largePageSize = 0;
@@ -461,8 +485,9 @@ static size_t detectLargePageSize() noexcept {
for (size_t i = 0; i < bufSize; i++) {
uint32_t digit = uint32_t(uint8_t(buf[i]) - uint8_t('0'));
if (digit >= 10u)
if (digit >= 10u) {
break;
}
largePageSize = largePageSize * 10 + digit;
}
@@ -514,7 +539,7 @@ static uint32_t getMfdExecFlag() noexcept {
// It's not fully random, just to avoid collisions when opening TMP or SHM file.
ASMJIT_MAYBE_UNUSED
[[maybe_unused]]
static uint64_t generateRandomBits(uintptr_t stackPtr, uint32_t attempt) noexcept {
static std::atomic<uint32_t> internalCounter;
@@ -573,14 +598,17 @@ public:
if (!memfd_create_not_supported) {
_fd = (int)syscall(__NR_memfd_create, "vmem", MFD_CLOEXEC | getMfdExecFlag());
if (ASMJIT_LIKELY(_fd >= 0))
if (ASMJIT_LIKELY(_fd >= 0)) {
return kErrorOk;
}
int e = errno;
if (e == ENOSYS)
if (e == ENOSYS) {
memfd_create_not_supported = 1;
else
}
else {
return DebugUtils::errored(asmjitErrorFromErrno(e));
}
}
#endif // __linux__ && __NR_memfd_create
@@ -589,10 +617,12 @@ public:
DebugUtils::unused(preferTmpOverDevShm);
_fd = ::shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
if (ASMJIT_LIKELY(_fd >= 0))
if (ASMJIT_LIKELY(_fd >= 0)) {
return kErrorOk;
else
}
else {
return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
#else
// POSIX API. We have to generate somehow a unique name, so use `generateRandomBits()` helper. To prevent
// having file collisions we use `shm_open()` with flags that require creation of the file so we never open
@@ -625,8 +655,9 @@ public:
#endif
int e = errno;
if (e != EEXIST)
if (e != EEXIST) {
return DebugUtils::errored(asmjitErrorFromErrno(e));
}
}
return DebugUtils::errored(kErrorFailedToOpenAnonymousMemory);
@@ -659,8 +690,9 @@ public:
Error allocate(size_t size) noexcept {
// TODO: Improve this by using `posix_fallocate()` when available.
if (ASMJIT_FILE64_API(ftruncate)(_fd, off_t(size)) != 0)
if (ASMJIT_FILE64_API(ftruncate)(_fd, off_t(size)) != 0) {
return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
return kErrorOk;
}
@@ -725,11 +757,9 @@ static bool hasHardenedRuntime() noexcept {
#else
static std::atomic<uint32_t> cachedHardenedFlag;
enum HardenedFlag : uint32_t {
kHardenedFlagUnknown = 0,
kHardenedFlagDisabled = 1,
kHardenedFlagEnabled = 2
};
constexpr uint32_t kHardenedFlagUnknown = 0;
constexpr uint32_t kHardenedFlagDisabled = 1;
constexpr uint32_t kHardenedFlagEnabled = 2;
uint32_t flag = cachedHardenedFlag.load();
if (flag == kHardenedFlagUnknown) {
@@ -784,12 +814,14 @@ static inline int mmMapJitFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
//
// MAP_JIT is not required when dual-mapping memory and is incompatible with MAP_SHARED, so it will not be
// added when the latter is enabled.
bool useMapJit = (Support::test(memoryFlags, MemoryFlags::kMMapEnableMapJit) || hasHardenedRuntime())
&& !Support::test(memoryFlags, MemoryFlags::kMapShared);
if (useMapJit)
bool useMapJit = (Support::test(memoryFlags, MemoryFlags::kMMapEnableMapJit) || hasHardenedRuntime()) &&
!Support::test(memoryFlags, MemoryFlags::kMapShared);
if (useMapJit) {
return hasMapJitSupport() ? int(MAP_JIT) : 0;
else
}
else {
return 0;
}
#else
DebugUtils::unused(memoryFlags);
return 0;
@@ -807,40 +839,48 @@ static inline bool hasDualMappingSupport() noexcept {
static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept {
HardenedRuntimeFlags flags = HardenedRuntimeFlags::kNone;
if (hasHardenedRuntime())
if (hasHardenedRuntime()) {
flags |= HardenedRuntimeFlags::kEnabled;
}
if (hasMapJitSupport())
if (hasMapJitSupport()) {
flags |= HardenedRuntimeFlags::kMapJit;
}
if (hasDualMappingSupport())
if (hasDualMappingSupport()) {
flags |= HardenedRuntimeFlags::kDualMapping;
}
return flags;
}
static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd = -1, off_t offset = 0) noexcept {
*p = nullptr;
if (size == 0)
if (size == 0) {
return DebugUtils::errored(kErrorInvalidArgument);
}
int protection = mmProtFromMemoryFlags(memoryFlags) | mmMaxProtFromMemoryFlags(memoryFlags);
int mmFlags = mmMapJitFromMemoryFlags(memoryFlags);
mmFlags |= Support::test(memoryFlags, MemoryFlags::kMapShared) ? MAP_SHARED : MAP_PRIVATE;
if (fd == -1)
if (fd == -1) {
mmFlags |= MAP_ANONYMOUS;
}
bool useLargePages = Support::test(memoryFlags, VirtMem::MemoryFlags::kMMapLargePages);
if (useLargePages) {
#if defined(__linux__)
size_t lpSize = largePageSize();
if (lpSize == 0)
if (lpSize == 0) {
return DebugUtils::errored(kErrorFeatureNotEnabled);
}
if (!Support::isAligned(size, lpSize))
if (!Support::isAligned(size, lpSize)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
unsigned lpSizeLog2 = Support::ctz(lpSize);
mmFlags |= int(unsigned(MAP_HUGETLB) | (lpSizeLog2 << MAP_HUGE_SHIFT));
@@ -850,8 +890,9 @@ static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd =
}
void* ptr = mmap(nullptr, size, protection, mmFlags, fd, offset);
if (ptr == MAP_FAILED)
if (ptr == MAP_FAILED) {
return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
#if defined(MADV_HUGEPAGE)
if (useLargePages) {
@@ -864,8 +905,9 @@ static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd =
}
static Error unmapMemory(void* p, size_t size) noexcept {
if (ASMJIT_UNLIKELY(munmap(p, size) != 0))
if (ASMJIT_UNLIKELY(munmap(p, size) != 0)) {
return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
return kErrorOk;
}
@@ -880,9 +922,9 @@ Error release(void* p, size_t size) noexcept {
Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept {
int protection = mmProtFromMemoryFlags(memoryFlags);
if (mprotect(p, size, protection) == 0)
if (mprotect(p, size, protection) == 0) {
return kErrorOk;
}
return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
@@ -894,12 +936,14 @@ static Error unmapDualMapping(DualMapping* dm, size_t size) noexcept {
Error err1 = unmapMemory(dm->rx, size);
Error err2 = kErrorOk;
if (dm->rx != dm->rw)
if (dm->rx != dm->rw) {
err2 = unmapMemory(dm->rw, size);
}
// We can report only one error, so report the first...
if (err1 || err2)
if (err1 || err2) {
return DebugUtils::errored(err1 ? err1 : err2);
}
dm->rx = nullptr;
dm->rw = nullptr;
@@ -964,8 +1008,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M
int rwProtectFlags = VM_PROT_READ | VM_PROT_WRITE;
int rxProtectFlags = VM_PROT_READ;
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute))
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) {
rxProtectFlags |= VM_PROT_EXECUTE;
}
kern_return_t result {};
do {
@@ -992,8 +1037,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M
&maxProt, // max_protection
VM_INHERIT_DEFAULT); // inheritance
if (result != KERN_SUCCESS)
if (result != KERN_SUCCESS) {
break;
}
dm.rw = (void*)remappedAddr;
@@ -1011,8 +1057,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M
setMaximum, // set_maximum
rxProtectFlags); // new_protection
if (result != KERN_SUCCESS)
if (result != KERN_SUCCESS) {
break;
}
result = vm_protect(task, // target_task
(vm_address_t)dm.rw, // address
@@ -1020,8 +1067,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M
setMaximum, // set_maximum
rwProtectFlags); // new_protection
if (result != KERN_SUCCESS)
if (result != KERN_SUCCESS) {
break;
}
}
} while (0);
@@ -1053,8 +1101,9 @@ static Error allocDualMappingUsingFile(DualMapping* dm, size_t size, MemoryFlags
MemoryFlags restrictedMemoryFlags = memoryFlags & ~dualMappingFilter[i];
Error err = mapMemory(&ptr[i], size, restrictedMemoryFlags | MemoryFlags::kMapShared, anonMem.fd(), 0);
if (err != kErrorOk) {
if (i == 1)
if (i == 1) {
unmapMemory(ptr[0], size);
}
return err;
}
}
@@ -1073,8 +1122,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
DebugUtils::unused(size, memoryFlags);
return DebugUtils::errored(kErrorFeatureNotEnabled);
#else
if (off_t(size) <= 0)
if (off_t(size) <= 0) {
return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge);
}
#if defined(ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP)
return allocDualMappingUsingRemapdup(dm, size, memoryFlags);
@@ -1143,11 +1193,13 @@ size_t largePageSize() noexcept {
static constexpr size_t kNotAvailable = 1;
size_t size = largePageSize.load();
if (ASMJIT_LIKELY(size > kNotAvailable))
if (ASMJIT_LIKELY(size > kNotAvailable)) {
return size;
}
if (size == kNotAvailable)
if (size == kNotAvailable) {
return 0;
}
size = detectLargePageSize();
largePageSize.store(size != 0 ? size : kNotAvailable);

View File

@@ -50,6 +50,7 @@ struct Info {
};
//! Returns virtual memory information, see `VirtMem::Info` for more details.
[[nodiscard]]
ASMJIT_API Info info() noexcept;
//! Returns the size of the smallest large page supported.
@@ -59,6 +60,7 @@ ASMJIT_API Info info() noexcept;
//!
//! Returns either the detected large page size or 0, if large page support is either not supported by AsmJit
//! or not accessible to the process.
[[nodiscard]]
ASMJIT_API size_t largePageSize() noexcept;
//! Virtual memory access and mmap-specific flags.
@@ -164,15 +166,18 @@ ASMJIT_DEFINE_ENUM_FLAGS(MemoryFlags)
//!
//! \note `size` should be aligned to page size, use \ref VirtMem::info() to obtain it. Invalid size will not be
//! corrected by the implementation and the allocation would not succeed in such case.
[[nodiscard]]
ASMJIT_API Error alloc(void** p, size_t size, MemoryFlags flags) noexcept;
//! Releases virtual memory previously allocated by \ref VirtMem::alloc().
//!
//! \note The size must be the same as used by \ref VirtMem::alloc(). If the size is not the same value the call
//! will fail on any POSIX system, but pass on Windows, because it's implemented differently.
[[nodiscard]]
ASMJIT_API Error release(void* p, size_t size) noexcept;
//! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect()` (Windows).
[[nodiscard]]
ASMJIT_API Error protect(void* p, size_t size, MemoryFlags flags) noexcept;
//! Dual memory mapping used to map an anonymous memory into two memory regions where one region is read-only, but
@@ -195,11 +200,13 @@ struct DualMapping {
//! release the memory returned by `allocDualMapping()` as that would fail on Windows.
//!
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails.
[[nodiscard]]
ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags flags) noexcept;
//! Releases virtual memory mapping previously allocated by \ref VirtMem::allocDualMapping().
//!
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds.
[[nodiscard]]
ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept;
//! Hardened runtime flags.
@@ -238,12 +245,14 @@ struct HardenedRuntimeInfo {
//! \{
//! Tests whether the hardened runtime `flag` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(HardenedRuntimeFlags flag) const noexcept { return Support::test(flags, flag); }
//! \}
};
//! Returns runtime features provided by the OS.
[[nodiscard]]
ASMJIT_API HardenedRuntimeInfo hardenedRuntimeInfo() noexcept;
//! Values that can be used with `protectJitMemory()` function.
@@ -296,10 +305,11 @@ public:
//! \{
//! Makes the given memory block RW protected.
ASMJIT_FORCE_INLINE ProtectJitReadWriteScope(
ASMJIT_INLINE ProtectJitReadWriteScope(
void* rxPtr,
size_t size,
CachePolicy policy = CachePolicy::kDefault) noexcept
CachePolicy policy = CachePolicy::kDefault
) noexcept
: _rxPtr(rxPtr),
_size(size),
_policy(policy) {
@@ -307,11 +317,12 @@ public:
}
//! Makes the memory block RX protected again and flushes instruction cache.
ASMJIT_FORCE_INLINE ~ProtectJitReadWriteScope() noexcept {
ASMJIT_INLINE ~ProtectJitReadWriteScope() noexcept {
protectJitMemory(ProtectJitAccess::kReadExecute);
if (_policy != CachePolicy::kNeverFlush)
if (_policy != CachePolicy::kNeverFlush) {
flushInstructionCache(_rxPtr, _size);
}
}
//! \}

View File

@@ -210,28 +210,36 @@ void* Zone::_alloc(size_t size, size_t alignment) noexcept {
void* Zone::allocZeroed(size_t size, size_t alignment) noexcept {
void* p = alloc(size, alignment);
if (ASMJIT_UNLIKELY(!p))
if (ASMJIT_UNLIKELY(!p)) {
return p;
}
return memset(p, 0, size);
}
void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
if (ASMJIT_UNLIKELY(!data || !size))
if (ASMJIT_UNLIKELY(!data || !size)) {
return nullptr;
}
ASMJIT_ASSERT(size != SIZE_MAX);
uint8_t* m = allocT<uint8_t>(size + nullTerminate);
if (ASMJIT_UNLIKELY(!m)) return nullptr;
if (ASMJIT_UNLIKELY(!m)) {
return nullptr;
}
memcpy(m, data, size);
if (nullTerminate) m[size] = '\0';
if (nullTerminate) {
m[size] = '\0';
}
return static_cast<void*>(m);
}
char* Zone::sformat(const char* fmt, ...) noexcept {
if (ASMJIT_UNLIKELY(!fmt))
if (ASMJIT_UNLIKELY(!fmt)) {
return nullptr;
}
char buf[512];
size_t size;
@@ -252,8 +260,9 @@ char* Zone::sformat(const char* fmt, ...) noexcept {
static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept {
ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks;
while (cur) {
if (cur == block)
if (cur == block) {
return true;
}
cur = cur->next;
}
return false;
@@ -334,8 +343,9 @@ void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
size_t blockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
// Handle a possible overflow.
if (ASMJIT_UNLIKELY(blockOverhead >= SIZE_MAX - size))
if (ASMJIT_UNLIKELY(blockOverhead >= SIZE_MAX - size)) {
return nullptr;
}
void* p = ::malloc(size + blockOverhead);
if (ASMJIT_UNLIKELY(!p)) {
@@ -347,8 +357,9 @@ void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
DynamicBlock* block = static_cast<DynamicBlock*>(p);
DynamicBlock* next = _dynamicBlocks;
if (next)
if (next) {
next->prev = block;
}
block->prev = nullptr;
block->next = next;
@@ -368,7 +379,9 @@ void* ZoneAllocator::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
void* p = _alloc(size, allocatedSize);
if (ASMJIT_UNLIKELY(!p)) return p;
if (ASMJIT_UNLIKELY(!p)) {
return p;
}
return memset(p, 0, allocatedSize);
}
@@ -384,13 +397,16 @@ void ZoneAllocator::_releaseDynamic(void* p, size_t size) noexcept {
DynamicBlock* prev = block->prev;
DynamicBlock* next = block->next;
if (prev)
if (prev) {
prev->next = next;
else
}
else {
_dynamicBlocks = next;
}
if (next)
if (next) {
next->prev = prev;
}
::free(block);
}

View File

@@ -41,16 +41,21 @@ public:
size_t size;
};
enum Limits : size_t {
kMinBlockSize = 256, // The number is ridiculously small, but still possible.
kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 1),
static inline constexpr size_t kMinBlockSize = 256; // The number is ridiculously small, but still possible.
static inline constexpr size_t kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 1);
kMinAlignment = 1,
kMaxAlignment = 64,
static inline constexpr size_t kMinAlignment = 1;
static inline constexpr size_t kMaxAlignment = 64;
kBlockSize = sizeof(Block),
kBlockOverhead = kBlockSize + Globals::kAllocOverhead
};
static inline constexpr size_t kBlockSize = sizeof(Block);
static inline constexpr size_t kBlockOverhead = kBlockSize + Globals::kAllocOverhead;
static ASMJIT_API const Block _zeroBlock;
//! \endcond
//! \name Members
//! \{
//! Pointer in the current block.
uint8_t* _ptr;
@@ -72,9 +77,7 @@ public:
//! Count of allocated blocks.
size_t _blockCount;
static ASMJIT_API const Block _zeroBlock;
//! \endcond
//! \}
//! \name Construction & Destruction
//! \{
@@ -141,25 +144,35 @@ public:
//! \{
//! Returns the default block alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; }
//! Returns a minimum block size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t minimumBlockSize() const noexcept { return size_t(1) << _minimumBlockSizeShift; }
//! Returns a maximum block size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t maximumBlockSize() const noexcept { return size_t(1) << _maximumBlockSizeShift; }
//! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t hasStaticBlock() const noexcept { return _hasStaticBlock; }
//! Returns remaining size of the current block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); }
//! Returns the current zone cursor (dangerous).
//!
//! This is a function that can be used to get exclusive access to the current block's memory buffer.
template<typename T = uint8_t>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); }
//! Returns the end of the current zone block, only useful if you use `ptr()`.
template<typename T = uint8_t>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* end() noexcept { return reinterpret_cast<T*>(_end); }
//! Sets the current zone pointer to `ptr` (must be within the current block).
@@ -209,11 +222,14 @@ public:
//!
//! \note This function doesn't respect any alignment. If you need to ensure there is enough room for an aligned
//! allocation you need to call `align()` before calling `ensure()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Error ensure(size_t size) noexcept {
if (ASMJIT_LIKELY(size <= remainingSize()))
if (ASMJIT_LIKELY(size <= remainingSize())) {
return kErrorOk;
else
}
else {
return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory);
}
}
//! \}
@@ -252,6 +268,7 @@ public:
//! // Reset or destroy `Zone`.
//! zone.reset();
//! ```
[[nodiscard]]
inline void* alloc(size_t size) noexcept {
if (ASMJIT_UNLIKELY(size > remainingSize()))
return _alloc(size, 1);
@@ -262,6 +279,7 @@ public:
}
//! Allocates the requested memory specified by `size` and `alignment`.
[[nodiscard]]
inline void* alloc(size_t size, size_t alignment) noexcept {
ASMJIT_ASSERT(Support::isPowerOf2(alignment));
uint8_t* ptr = Support::alignUp(_ptr, alignment);
@@ -276,6 +294,7 @@ public:
//! Allocates the requested memory specified by `size` without doing any checks.
//!
//! Can only be called if `remainingSize()` returns size at least equal to `size`.
[[nodiscard]]
inline void* allocNoCheck(size_t size) noexcept {
ASMJIT_ASSERT(remainingSize() >= size);
@@ -287,6 +306,7 @@ public:
//! Allocates the requested memory specified by `size` and `alignment` without doing any checks.
//!
//! Performs the same operation as `Zone::allocNoCheck(size)` with `alignment` applied.
[[nodiscard]]
inline void* allocNoCheck(size_t size, size_t alignment) noexcept {
ASMJIT_ASSERT(Support::isPowerOf2(alignment));
@@ -298,28 +318,33 @@ public:
}
//! Allocates `size` bytes of zeroed memory. See `alloc()` for more details.
[[nodiscard]]
ASMJIT_API void* allocZeroed(size_t size, size_t alignment = 1) noexcept;
//! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T>
[[nodiscard]]
inline T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(alloc(size, alignment));
}
//! Like `allocNoCheck()`, but the return pointer is casted to `T*`.
template<typename T>
[[nodiscard]]
inline T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(allocNoCheck(size, alignment));
}
//! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T>
[[nodiscard]]
inline T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(allocZeroed(size, alignment));
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T>
[[nodiscard]]
inline T* newT() noexcept {
void* p = alloc(sizeof(T), alignof(T));
if (ASMJIT_UNLIKELY(!p))
@@ -329,6 +354,7 @@ public:
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T, typename... Args>
[[nodiscard]]
inline T* newT(Args&&... args) noexcept {
void* p = alloc(sizeof(T), alignof(T));
if (ASMJIT_UNLIKELY(!p))
@@ -339,28 +365,32 @@ public:
//! \cond INTERNAL
//!
//! Internal alloc function used by other inlines.
[[nodiscard]]
ASMJIT_API void* _alloc(size_t size, size_t alignment) noexcept;
//! \endcond
//! Helper to duplicate data.
[[nodiscard]]
ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept;
//! Helper to duplicate data.
[[nodiscard]]
inline void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept {
align(alignment);
return dup(data, size, nullTerminate);
}
//! Helper to duplicate a formatted string, maximum size is 256 bytes.
[[nodiscard]]
ASMJIT_API char* sformat(const char* str, ...) noexcept;
//! \}
#if !defined(ASMJIT_NO_DEPRECATED)
ASMJIT_DEPRECATED("Use Zone::minimumBlockSize() instead of Zone::blockSize()")
[[deprecated("Use Zone::minimumBlockSize() instead of Zone::blockSize()")]]
ASMJIT_INLINE_NODEBUG size_t blockSize() const noexcept { return minimumBlockSize(); }
ASMJIT_DEPRECATED("Use Zone::hasStaticBlock() instead of Zone::isTemporary()")
[[deprecated("Use Zone::hasStaticBlock() instead of Zone::isTemporary()")]]
ASMJIT_INLINE_NODEBUG bool isTemporary() const noexcept { return hasStaticBlock() != 0u; }
#endif
};
@@ -402,24 +432,22 @@ public:
// In short, we pool chunks of these sizes:
// [32, 64, 96, 128, 192, 256, 320, 384, 448, 512]
enum : uint32_t {
//! How many bytes per a low granularity pool (has to be at least 16).
kLoGranularity = 32,
//! Number of slots of a low granularity pool.
kLoCount = 4,
//! Maximum size of a block that can be allocated in a low granularity pool.
kLoMaxSize = kLoGranularity * kLoCount,
//! How many bytes per a low granularity pool (has to be at least 16).
static inline constexpr uint32_t kLoGranularity = 32;
//! Number of slots of a low granularity pool.
static inline constexpr uint32_t kLoCount = 4;
//! Maximum size of a block that can be allocated in a low granularity pool.
static inline constexpr uint32_t kLoMaxSize = kLoGranularity * kLoCount;
//! How many bytes per a high granularity pool.
kHiGranularity = 64,
//! Number of slots of a high granularity pool.
kHiCount = 6,
//! Maximum size of a block that can be allocated in a high granularity pool.
kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount,
//! How many bytes per a high granularity pool.
static inline constexpr uint32_t kHiGranularity = 64;
//! Number of slots of a high granularity pool.
static inline constexpr uint32_t kHiCount = 6;
//! Maximum size of a block that can be allocated in a high granularity pool.
static inline constexpr uint32_t kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount;
//! Alignment of every pointer returned by `alloc()`.
kBlockAlignment = kLoGranularity
};
//! Alignment of every pointer returned by `alloc()`.
static inline constexpr uint32_t kBlockAlignment = kLoGranularity;
//! Single-linked list used to store unused chunks.
struct Slot {
@@ -482,6 +510,7 @@ public:
//! \{
//! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator` is not initialized.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Zone* zone() const noexcept { return _zone; }
//! \}
@@ -493,24 +522,30 @@ public:
//! Returns the slot index to be used for `size`. Returns `true` if a valid slot has been written to `slot` and
//! `allocatedSize` has been filled with slot exact size (`allocatedSize` can be equal or slightly greater than
//! `size`).
[[nodiscard]]
static inline bool _getSlotIndex(size_t size, uint32_t& slot) noexcept {
ASMJIT_ASSERT(size > 0);
if (size > kHiMaxSize)
if (size > kHiMaxSize) {
return false;
}
if (size <= kLoMaxSize)
if (size <= kLoMaxSize) {
slot = uint32_t((size - 1) / kLoGranularity);
else
}
else {
slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount;
}
return true;
}
//! \overload
[[nodiscard]]
static inline bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(size > 0);
if (size > kHiMaxSize)
if (size > kHiMaxSize) {
return false;
}
if (size <= kLoMaxSize) {
slot = uint32_t((size - 1) / kLoGranularity);
@@ -531,14 +566,19 @@ public:
//! \{
//! \cond INTERNAL
[[nodiscard]]
ASMJIT_API void* _alloc(size_t size, size_t& allocatedSize) noexcept;
[[nodiscard]]
ASMJIT_API void* _allocZeroed(size_t size, size_t& allocatedSize) noexcept;
ASMJIT_API void _releaseDynamic(void* p, size_t size) noexcept;
//! \endcond
//! Allocates `size` bytes of memory, ideally from an available pool.
//!
//! \note `size` can't be zero, it will assert in debug mode in such case.
[[nodiscard]]
inline void* alloc(size_t size) noexcept {
ASMJIT_ASSERT(isInitialized());
size_t allocatedSize;
@@ -547,6 +587,7 @@ public:
//! Like `alloc(size)`, but provides a second argument `allocatedSize` that provides a way to know how big
//! the block returned actually is. This is useful for containers to prevent growing too early.
[[nodiscard]]
inline void* alloc(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
return _alloc(size, allocatedSize);
@@ -554,11 +595,13 @@ public:
//! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T>
[[nodiscard]]
inline T* allocT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(alloc(size));
}
//! Like `alloc(size)`, but returns zeroed memory.
[[nodiscard]]
inline void* allocZeroed(size_t size) noexcept {
ASMJIT_ASSERT(isInitialized());
size_t allocatedSize;
@@ -566,6 +609,7 @@ public:
}
//! Like `alloc(size, allocatedSize)`, but returns zeroed memory.
[[nodiscard]]
inline void* allocZeroed(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
return _allocZeroed(size, allocatedSize);
@@ -573,24 +617,29 @@ public:
//! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T>
[[nodiscard]]
inline T* allocZeroedT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(allocZeroed(size));
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T>
[[nodiscard]]
inline T* newT() noexcept {
void* p = allocT<T>();
if (ASMJIT_UNLIKELY(!p))
if (ASMJIT_UNLIKELY(!p)) {
return nullptr;
}
return new(Support::PlacementNew{p}) T();
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T, typename... Args>
[[nodiscard]]
inline T* newT(Args&&... args) noexcept {
void* p = allocT<T>();
if (ASMJIT_UNLIKELY(!p))
if (ASMJIT_UNLIKELY(!p)) {
return nullptr;
}
return new(Support::PlacementNew{p}) T(std::forward<Args>(args)...);
}

View File

@@ -172,12 +172,12 @@ void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexce
uint32_t newCount = ZoneHash_primeArray[primeIndex].prime;
ZoneHashNode** oldData = _data;
ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(
allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*)));
ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*)));
// We can still store nodes into the table, but it will degrade.
if (ASMJIT_UNLIKELY(newData == nullptr))
if (ASMJIT_UNLIKELY(newData == nullptr)) {
return;
}
uint32_t i;
uint32_t oldCount = _bucketsCount;
@@ -201,8 +201,9 @@ void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexce
}
}
if (oldData != _embedded)
if (oldData != _embedded) {
allocator->release(oldData, oldCount * sizeof(ZoneHashNode*));
}
}
// ZoneHashBase - Operations
@@ -217,8 +218,9 @@ ZoneHashNode* ZoneHashBase::_insert(ZoneAllocator* allocator, ZoneHashNode* node
if (++_size > _bucketsGrow) {
uint32_t primeIndex = Support::min<uint32_t>(_primeIndex + 2, ASMJIT_ARRAY_SIZE(ZoneHash_primeArray) - 1);
if (primeIndex > _primeIndex)
if (primeIndex > _primeIndex) {
_rehash(allocator, primeIndex);
}
}
return node;

View File

@@ -73,7 +73,9 @@ public:
_primeIndex = other._primeIndex;
_embedded[0] = other._embedded[0];
if (_data == other._embedded) _data = _embedded;
if (_data == other._embedded) {
_data = _embedded;
}
}
inline void reset() noexcept {
@@ -89,8 +91,9 @@ public:
inline void release(ZoneAllocator* allocator) noexcept {
ZoneHashNode** oldData = _data;
if (oldData != _embedded)
if (oldData != _embedded) {
allocator->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
}
reset();
}
@@ -99,7 +102,10 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! \}
@@ -117,8 +123,13 @@ public:
std::swap(_primeIndex, other._primeIndex);
std::swap(_embedded[0], other._embedded[0]);
if (_data == other._embedded) _data = _embedded;
if (other._data == _embedded) other._data = other._embedded;
if (_data == other._embedded) {
_data = _embedded;
}
if (other._data == _embedded) {
other._data = other._embedded;
}
}
//! \cond INTERNAL
@@ -145,7 +156,7 @@ class ZoneHash : public ZoneHashBase {
public:
ASMJIT_NONCOPYABLE(ZoneHash)
typedef NodeT Node;
using Node = NodeT;
//! \name Construction & Destruction
//! \{
@@ -164,12 +175,14 @@ public:
ASMJIT_INLINE_NODEBUG void swap(ZoneHash& other) noexcept { ZoneHashBase::_swap(other); }
template<typename KeyT>
[[nodiscard]]
inline NodeT* get(const KeyT& key) const noexcept {
uint32_t hashMod = _calcMod(key.hashCode());
NodeT* node = static_cast<NodeT*>(_data[hashMod]);
while (node && !key.matches(node))
while (node && !key.matches(node)) {
node = static_cast<NodeT*>(node->_hashNext);
}
return node;
}

View File

@@ -22,10 +22,8 @@ public:
//! \name Constants
//! \{
enum : size_t {
kNodeIndexPrev = 0,
kNodeIndexNext = 1
};
static inline constexpr size_t kNodeIndexPrev = 0;
static inline constexpr size_t kNodeIndexNext = 1;
//! \}
@@ -50,10 +48,16 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPrev() const noexcept { return _listNodes[kNodeIndexPrev] != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasNext() const noexcept { return _listNodes[kNodeIndexNext] != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* prev() const noexcept { return _listNodes[kNodeIndexPrev]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* next() const noexcept { return _listNodes[kNodeIndexNext]; }
//! \}
@@ -68,10 +72,8 @@ public:
//! \name Constants
//! \{
enum : size_t {
kNodeIndexFirst = 0,
kNodeIndexLast = 1
};
static inline constexpr size_t kNodeIndexFirst = 0;
static inline constexpr size_t kNodeIndexLast = 1;
//! \}
@@ -100,8 +102,13 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _nodes[0] == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* first() const noexcept { return _nodes[kNodeIndexFirst]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* last() const noexcept { return _nodes[kNodeIndexLast]; }
//! \}
@@ -120,10 +127,12 @@ public:
node->_listNodes[!dir] = prev;
_nodes[dir] = node;
if (prev)
if (prev) {
prev->_listNodes[dir] = node;
else
}
else {
_nodes[!dir] = node;
}
}
// Can be used to both append and prepend.
@@ -134,10 +143,12 @@ public:
NodeT* next = ref->_listNodes[dir];
prev->_listNodes[dir] = node;
if (next)
if (next) {
next->_listNodes[!dir] = node;
else
}
else {
_nodes[dir] = node;
}
node->_listNodes[!dir] = prev;
node->_listNodes[ dir] = next;
@@ -162,6 +173,7 @@ public:
return node;
}
[[nodiscard]]
inline NodeT* popFirst() noexcept {
NodeT* node = _nodes[0];
ASMJIT_ASSERT(node != nullptr);
@@ -180,6 +192,7 @@ public:
return node;
}
[[nodiscard]]
inline NodeT* pop() noexcept {
NodeT* node = _nodes[1];
ASMJIT_ASSERT(node != nullptr);

View File

@@ -30,8 +30,9 @@ Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcep
if (allocator) {
Block* block = static_cast<Block*>(allocator->alloc(kBlockSize));
if (ASMJIT_UNLIKELY(!block))
if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
block->_link[kBlockIndexPrev] = nullptr;
block->_link[kBlockIndexNext] = nullptr;
@@ -56,8 +57,9 @@ Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept
ASMJIT_ASSERT(!prev->empty());
Block* block = _allocator->allocT<Block>(kBlockSize);
if (ASMJIT_UNLIKELY(!block))
if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
block->_link[ side] = nullptr;
block->_link[!side] = prev;

View File

@@ -21,15 +21,13 @@ public:
//! \name Constants
//! \{
enum : size_t {
kBlockIndexPrev = 0,
kBlockIndexNext = 1,
static inline constexpr size_t kBlockIndexPrev = 0;
static inline constexpr size_t kBlockIndexNext = 1;
kBlockIndexFirst = 0,
kBlockIndexLast = 1,
static inline constexpr size_t kBlockIndexFirst = 0;
static inline constexpr size_t kBlockIndexLast = 1;
kBlockSize = ZoneAllocator::kHiMaxSize
};
static inline constexpr size_t kBlockSize = ZoneAllocator::kHiMaxSize;
//! \}
@@ -44,32 +42,46 @@ public:
//! Pointer to the end of the array.
void* _end;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _start == _end; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Block* prev() const noexcept { return _link[kBlockIndexPrev]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Block* next() const noexcept { return _link[kBlockIndexNext]; }
ASMJIT_INLINE_NODEBUG void setPrev(Block* block) noexcept { _link[kBlockIndexPrev] = block; }
ASMJIT_INLINE_NODEBUG void setNext(Block* block) noexcept { _link[kBlockIndexNext] = block; }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* start() const noexcept { return static_cast<T*>(_start); }
template<typename T>
ASMJIT_INLINE_NODEBUG void setStart(T* start) noexcept { _start = static_cast<void*>(start); }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* end() const noexcept { return (T*)_end; }
template<typename T>
ASMJIT_INLINE_NODEBUG void setEnd(T* end) noexcept { _end = (void*)end; }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return (const T*)((const uint8_t*)(this) + sizeof(Block)); }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* data() noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool canPrepend() const noexcept { return _start > data<void>(); }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool canAppend() const noexcept {
size_t kNumBlockItems = (kBlockSize - sizeof(Block)) / sizeof(T);
size_t kStartBlockIndex = sizeof(Block);
@@ -107,8 +119,10 @@ public:
//! \{
//! Returns `ZoneAllocator` attached to this container.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return _allocator; }
[[nodiscard]]
inline bool empty() const noexcept {
ASMJIT_ASSERT(isInitialized());
return _block[0]->start<void>() == _block[1]->end<void>();
@@ -120,7 +134,9 @@ public:
//! \name Internal
//! \{
[[nodiscard]]
ASMJIT_API Error _prepareBlock(uint32_t side, size_t initialIndex) noexcept;
ASMJIT_API void _cleanupBlock(uint32_t side, size_t middleIndex) noexcept;
//! \}
@@ -136,12 +152,10 @@ public:
//! \name Constants
//! \{
enum : uint32_t {
kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T)),
kStartBlockIndex = uint32_t(sizeof(Block)),
kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T)),
kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T))
};
static inline constexpr uint32_t kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T));
static inline constexpr uint32_t kStartBlockIndex = uint32_t(sizeof(Block));
static inline constexpr uint32_t kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T));
static inline constexpr uint32_t kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T));
//! \}
@@ -191,6 +205,7 @@ public:
return kErrorOk;
}
[[nodiscard]]
inline T popFirst() noexcept {
ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(!empty());
@@ -202,12 +217,14 @@ public:
T item = *ptr++;
block->setStart(ptr);
if (block->empty())
if (block->empty()) {
_cleanupBlock(kBlockIndexFirst, kMidBlockIndex);
}
return item;
}
[[nodiscard]]
inline T pop() noexcept {
ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(!empty());
@@ -221,8 +238,9 @@ public:
ASMJIT_ASSERT(ptr >= block->start<T>());
block->setEnd(ptr);
if (block->empty())
if (block->empty()) {
_cleanupBlock(kBlockIndexLast, kMidBlockIndex);
}
return item;
}

View File

@@ -63,10 +63,8 @@ public:
//! \name Constants
//! \{
enum : uint32_t {
kWholeSize = (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase)),
kMaxEmbeddedSize = kWholeSize - 5
};
static inline constexpr uint32_t kWholeSize = (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase));
static inline constexpr uint32_t kMaxEmbeddedSize = kWholeSize - 5;
//! \}
@@ -92,14 +90,19 @@ public:
//! \{
//! Tests whether the string is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _base._size == 0; }
//! Returns the string data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _base._size <= kMaxEmbeddedSize ? _base._embedded : _base._external; }
//! Returns the string size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _base._size; }
//! Tests whether the string is embedded (e.g. no dynamically allocated).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedded() const noexcept { return _base._size <= kMaxEmbeddedSize; }
//! Copies a new `data` of the given `size` to the string.

View File

@@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE
#if defined(ASMJIT_TEST)
template<typename NodeT>
struct ZoneRBUnit {
typedef ZoneTree<NodeT> Tree;
using Tree = ZoneTree<NodeT>;
static void verifyTree(Tree& tree) noexcept {
EXPECT_GT(checkHeight(static_cast<NodeT*>(tree._root)), 0);

View File

@@ -25,10 +25,8 @@ public:
//! \name Constants
//! \{
enum : uintptr_t {
kRedMask = 0x1,
kPtrMask = ~kRedMask
};
static inline constexpr uintptr_t kRedMask = 0x1;
static inline constexpr uintptr_t kPtrMask = ~kRedMask;
//! \}
@@ -49,17 +47,28 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRed() const noexcept { return static_cast<bool>(_rbNodeData[0] & kRedMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasChild(size_t i) const noexcept { return _rbNodeData[i] > kRedMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLeft() const noexcept { return _rbNodeData[0] > kRedMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRight() const noexcept { return _rbNodeData[1] != 0; }
template<typename T = ZoneTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* child(size_t i) const noexcept { return static_cast<T*>(_getChild(i)); }
template<typename T = ZoneTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* left() const noexcept { return static_cast<T*>(_getLeft()); }
template<typename T = ZoneTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* right() const noexcept { return static_cast<T*>(_getRight()); }
//! \}
@@ -68,8 +77,13 @@ public:
//! \name Internal
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getChild(size_t i) const noexcept { return (ZoneTreeNode*)(_rbNodeData[i] & kPtrMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getLeft() const noexcept { return (ZoneTreeNode*)(_rbNodeData[0] & kPtrMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getRight() const noexcept { return (ZoneTreeNode*)(_rbNodeData[1]); }
ASMJIT_INLINE_NODEBUG void _setChild(size_t i, ZoneTreeNode* node) noexcept { _rbNodeData[i] = (_rbNodeData[i] & kRedMask) | (uintptr_t)node; }
@@ -80,6 +94,7 @@ public:
ASMJIT_INLINE_NODEBUG void _makeBlack() noexcept { _rbNodeData[0] &= kPtrMask; }
//! Tests whether the node is RED (RED node must be non-null and must have RED flag set).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool _isValidRed(ZoneTreeNode* node) noexcept { return node && node->isRed(); }
//! \}
@@ -103,8 +118,13 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* child(size_t i) const noexcept { return static_cast<NodeT*>(_getChild(i)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* left() const noexcept { return static_cast<NodeT*>(_getLeft()); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* right() const noexcept { return static_cast<NodeT*>(_getRight()); }
//! \}
@@ -116,7 +136,7 @@ class ZoneTree {
public:
ASMJIT_NONCOPYABLE(ZoneTree)
typedef NodeT Node;
using Node = NodeT;
NodeT* _root {};
//! \name Construction & Destruction
@@ -132,7 +152,10 @@ public:
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _root == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* root() const noexcept { return static_cast<NodeT*>(_root); }
//! \}
@@ -192,14 +215,17 @@ public:
}
// Stop if found.
if (q == node)
if (q == node) {
break;
}
last = dir;
dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
// Update helpers.
if (g) t = g;
if (g) {
t = g;
}
g = p;
p = q;
@@ -319,15 +345,20 @@ public:
// Update root and make it black.
_root = static_cast<NodeT*>(head._getRight());
if (_root) _root->_makeBlack();
if (_root) {
_root->_makeBlack();
}
}
template<typename KeyT, typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
[[nodiscard]]
inline NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept {
ZoneTreeNode* node = _root;
while (node) {
auto result = cmp(*static_cast<const NodeT*>(node), key);
if (result == 0) break;
if (result == 0) {
break;
}
// Go left or right depending on the `result`.
node = node->_getChild(result < 0);

View File

@@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE
// ZoneVector is used as an array to hold short-lived data structures used during code generation. The growing
// strategy is simple - use small capacity at the beginning (very good for ZoneAllocator) and then grow quicker
// to prevent successive reallocations.
static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, uint32_t growMinimum, uint32_t sizeOfT) noexcept {
static ASMJIT_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, uint32_t growMinimum, uint32_t sizeOfT) noexcept {
static constexpr size_t kGrowThreshold = Globals::kGrowThreshold;
size_t byteSize = size_t(current) * sizeOfT;
@@ -53,8 +53,9 @@ static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, ui
// Bail to `growMinimum` in case of overflow - should never happen as it's unlikely we would hit this on a 32-bit
// machine (consecutive near 4GiB allocation is impossible, and this should never happen on 64-bit machine as we
// use 32-bit size & capacity, so overflow of 64 bit integer is not possible. Added just as an extreme measure.
if (byteSize < minimumByteSize)
if (byteSize < minimumByteSize) {
return growMinimum;
}
}
}
@@ -62,41 +63,48 @@ static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, ui
return uint32_t(Support::min<size_t>(n, 0xFFFFFFFFu));
}
static ASMJIT_FORCE_INLINE bool ZoneVector_byteSizeIsSafe(size_t nBytes, uint32_t n) noexcept {
if (sizeof(uint32_t) < sizeof(size_t))
static ASMJIT_INLINE bool ZoneVector_byteSizeIsSafe(size_t nBytes, uint32_t n) noexcept {
if constexpr (sizeof(uint32_t) < sizeof(size_t)) {
return true; // there is no problem when running on a 64-bit machine.
else
}
else {
return nBytes >= size_t(n);
}
};
Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t capacity = _capacity;
uint32_t after = _size;
if (ASMJIT_UNLIKELY(std::numeric_limits<uint32_t>::max() - n < after))
if (ASMJIT_UNLIKELY(std::numeric_limits<uint32_t>::max() - n < after)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
after += n;
if (capacity >= after)
if (capacity >= after) {
return kErrorOk;
}
return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, after, sizeOfT));
}
Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t oldCapacity = _capacity;
if (oldCapacity >= n)
if (oldCapacity >= n) {
return kErrorOk;
}
size_t nBytes = size_t(n) * sizeOfT;
if (ASMJIT_UNLIKELY(!ZoneVector_byteSizeIsSafe(nBytes, n)))
if (ASMJIT_UNLIKELY(!ZoneVector_byteSizeIsSafe(nBytes, n))) {
return DebugUtils::errored(kErrorOutOfMemory);
}
size_t allocatedBytes;
uint8_t* newData = static_cast<uint8_t*>(allocator->alloc(nBytes, allocatedBytes));
if (ASMJIT_UNLIKELY(!newData))
if (ASMJIT_UNLIKELY(!newData)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
uint32_t newCapacity = uint32_t(allocatedBytes / sizeOfT);
ASMJIT_ASSERT(newCapacity >= n);
@@ -115,8 +123,9 @@ Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint3
Error ZoneVectorBase::_growingReserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t capacity = _capacity;
if (capacity >= n)
if (capacity >= n) {
return kErrorOk;
}
return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, n, sizeOfT));
}
@@ -128,8 +137,9 @@ Error ZoneVectorBase::_resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32
ASMJIT_ASSERT(_capacity >= n);
}
if (size < n)
if (size < n) {
memset(static_cast<uint8_t*>(_data) + size_t(size) * sizeOfT, 0, size_t(n - size) * sizeOfT);
}
_size = n;
return kErrorOk;
@@ -150,16 +160,18 @@ Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& oth
if (newSize > _capacity) {
// Realloc needed... Calculate the minimum capacity (in bytes) required.
uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(newSize, kBitWordSizeInBits);
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize))
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
// Normalize to bytes.
uint32_t minimumCapacity = minimumCapacityInBits / 8;
size_t allocatedCapacity;
BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
if (ASMJIT_UNLIKELY(!newData))
if (ASMJIT_UNLIKELY(!newData)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
// `allocatedCapacity` now contains number in bytes, we need bits.
size_t allocatedCapacityInBits = allocatedCapacity * 8;
@@ -167,11 +179,13 @@ Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& oth
// Arithmetic overflow should normally not happen. If it happens we just
// change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
// this value is still safe to be used to call `_allocator->release(...)`.
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) {
allocatedCapacityInBits = minimumCapacityInBits;
}
if (data)
if (data) {
allocator->release(data, _capacity / 8);
}
data = newData;
_data = data;
@@ -197,8 +211,9 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_
// happens when `newSize` is a multiply of `kBitWordSizeInBits` like 64, 128,
// and so on. In that case don't change anything as that would mean settings
// bits outside of the `_size`.
if (bit)
if (bit) {
_data[idx] &= (BitWord(1) << bit) - 1u;
}
_size = newSize;
return kErrorOk;
@@ -211,16 +226,18 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_
// Realloc needed, calculate the minimum capacity (in bytes) required.
uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(idealCapacity, kBitWordSizeInBits);
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize))
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
// Normalize to bytes.
uint32_t minimumCapacity = minimumCapacityInBits / 8;
size_t allocatedCapacity;
BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
if (ASMJIT_UNLIKELY(!newData))
if (ASMJIT_UNLIKELY(!newData)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
// `allocatedCapacity` now contains number in bytes, we need bits.
size_t allocatedCapacityInBits = allocatedCapacity * 8;
@@ -228,13 +245,15 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_
// Arithmetic overflow should normally not happen. If it happens we just
// change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
// this value is still safe to be used to call `_allocator->release(...)`.
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) {
allocatedCapacityInBits = minimumCapacityInBits;
}
_copyBits(newData, data, _wordsPerBits(oldSize));
if (data)
if (data) {
allocator->release(data, _capacity / 8);
}
data = newData;
_data = data;
@@ -274,8 +293,9 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_
while (idx < endIdx) data[idx++] = pattern;
// Clear unused bits of the last bit-word.
if (endBit)
if (endBit) {
data[endIdx - 1] = pattern & ((BitWord(1) << endBit) - 1);
}
_size = newSize;
return kErrorOk;
@@ -286,16 +306,20 @@ Error ZoneBitVector::_append(ZoneAllocator* allocator, bool value) noexcept {
uint32_t newSize = _size + 1;
uint32_t idealCapacity = _capacity;
if (idealCapacity < 128)
if (idealCapacity < 128) {
idealCapacity = 128;
else if (idealCapacity <= kThreshold)
}
else if (idealCapacity <= kThreshold) {
idealCapacity *= 2;
else
}
else {
idealCapacity += kThreshold;
}
if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) {
if (ASMJIT_UNLIKELY(_size == std::numeric_limits<uint32_t>::max()))
if (ASMJIT_UNLIKELY(_size == std::numeric_limits<uint32_t>::max())) {
return DebugUtils::errored(kErrorOutOfMemory);
}
idealCapacity = newSize;
}
@@ -356,7 +380,7 @@ static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) {
EXPECT_EQ(fsum, rsum);
vec.release(allocator);
INFO("ZoneBitVector::growingReserve()");
INFO("ZoneVector<%s>::growingReserve()", typeName);
for (uint32_t j = 0; j < 40 / sizeof(T); j += 8) {
EXPECT_EQ(vec.growingReserve(allocator, j * kMiB), kErrorOk);
EXPECT_GE(vec.capacity(), j * kMiB);
@@ -379,15 +403,17 @@ static void test_zone_bitvector(ZoneAllocator* allocator) {
EXPECT_EQ(vec.resize(allocator, count, false), kErrorOk);
EXPECT_EQ(vec.size(), count);
for (i = 0; i < count; i++)
for (i = 0; i < count; i++) {
EXPECT_FALSE(vec.bitAt(i));
}
vec.clear();
EXPECT_EQ(vec.resize(allocator, count, true), kErrorOk);
EXPECT_EQ(vec.size(), count);
for (i = 0; i < count; i++)
for (i = 0; i < count; i++) {
EXPECT_TRUE(vec.bitAt(i));
}
}
INFO("ZoneBitVector::fillBits() / clearBits()");
@@ -398,10 +424,12 @@ static void test_zone_bitvector(ZoneAllocator* allocator) {
for (i = 0; i < (count + 1) / 2; i++) {
bool value = bool(i & 1);
if (value)
if (value) {
vec.fillBits(i, count - i * 2);
else
}
else {
vec.clearBits(i, count - i * 2);
}
}
for (i = 0; i < count; i++) {

View File

@@ -19,9 +19,16 @@ class ZoneVectorBase {
public:
ASMJIT_NONCOPYABLE(ZoneVectorBase)
// STL compatibility;
typedef uint32_t size_type;
typedef ptrdiff_t difference_type;
//! \name Types (C++ compatibility)
//! \{
using size_type = uint32_t;
using difference_type = ptrdiff_t;
//! \}
//! \name Members
//! \{
//! Vector data (untyped).
void* _data = nullptr;
@@ -30,6 +37,8 @@ public:
//! Capacity of the vector.
size_type _capacity = 0;
//! \}
protected:
//! \name Construction & Destruction
//! \{
@@ -74,10 +83,15 @@ public:
//! \{
//! Tests whether the vector is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
//! Returns the vector size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_type size() const noexcept { return _size; }
//! Returns the vector capacity.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_type capacity() const noexcept { return _capacity; }
//! \}
@@ -120,17 +134,21 @@ class ZoneVector : public ZoneVectorBase {
public:
ASMJIT_NONCOPYABLE(ZoneVector)
// STL compatibility;
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
//! \name Types (C++ compatibility)
//! \{
typedef T* iterator;
typedef const T* const_iterator;
typedef Support::ArrayReverseIterator<T> reverse_iterator;
typedef Support::ArrayReverseIterator<const T> const_reverse_iterator;
using value_type = T;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using iterator = T*;
using const_iterator = const T*;
using reverse_iterator = Support::ArrayReverseIterator<T>;
using const_reverse_iterator = Support::ArrayReverseIterator<const T>;
//! \}
//! \name Construction & Destruction
//! \{
@@ -144,11 +162,15 @@ public:
//! \{
//! Returns vector data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* data() noexcept { return static_cast<T*>(_data); }
//! Returns vector data (const)
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return static_cast<const T*>(_data); }
//! Returns item at the given index `i` (const).
[[nodiscard]]
inline const T& at(size_t i) const noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
@@ -164,22 +186,40 @@ public:
//! \name STL Compatibility (Iterators)
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG iterator begin() noexcept { return iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator begin() const noexcept { return const_iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG iterator end() noexcept { return iterator(data() + _size); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator end() const noexcept { return const_iterator(data() + _size); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG reverse_iterator rbegin() noexcept { return reverse_iterator(end()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG reverse_iterator rend() noexcept { return reverse_iterator(begin()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator cbegin() const noexcept { return const_iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator cend() const noexcept { return const_iterator(data() + _size); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(cend()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_reverse_iterator crend() const noexcept { return const_reverse_iterator(cbegin()); };
//! \}
@@ -188,12 +228,13 @@ public:
//! \{
//! Swaps this vector with `other`.
ASMJIT_FORCE_INLINE void swap(ZoneVector<T>& other) noexcept { _swap(other); }
ASMJIT_INLINE void swap(ZoneVector<T>& other) noexcept { _swap(other); }
//! Prepends `item` to the vector.
ASMJIT_FORCE_INLINE Error prepend(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity))
ASMJIT_INLINE Error prepend(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity)) {
ASMJIT_PROPAGATE(grow(allocator, 1));
}
memmove(static_cast<void*>(static_cast<T*>(_data) + 1),
static_cast<const void*>(_data),
@@ -208,11 +249,12 @@ public:
}
//! Inserts an `item` at the specified `index`.
ASMJIT_FORCE_INLINE Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept {
ASMJIT_INLINE Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept {
ASMJIT_ASSERT(index <= _size);
if (ASMJIT_UNLIKELY(_size == _capacity))
if (ASMJIT_UNLIKELY(_size == _capacity)) {
ASMJIT_PROPAGATE(grow(allocator, 1));
}
T* dst = static_cast<T*>(_data) + index;
memmove(static_cast<void*>(dst + 1),
@@ -228,9 +270,10 @@ public:
}
//! Appends `item` to the vector.
ASMJIT_FORCE_INLINE Error append(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity))
ASMJIT_INLINE Error append(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity)) {
ASMJIT_PROPAGATE(grow(allocator, 1));
}
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
static_cast<const void*>(&item),
@@ -241,10 +284,11 @@ public:
}
//! Appends `other` vector at the end of this vector.
ASMJIT_FORCE_INLINE Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept {
ASMJIT_INLINE Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept {
uint32_t size = other._size;
if (_capacity - _size < size)
if (_capacity - _size < size) {
ASMJIT_PROPAGATE(grow(allocator, size));
}
if (size) {
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
@@ -260,7 +304,7 @@ public:
//!
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements
//! can be added to the vector without checking if there is a place for them. Used mostly internally.
ASMJIT_FORCE_INLINE void prependUnsafe(const T& item) noexcept {
ASMJIT_INLINE void prependUnsafe(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
T* data = static_cast<T*>(_data);
@@ -280,7 +324,7 @@ public:
//!
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements
//! can be added to the vector without checking if there is a place for them. Used mostly internally.
ASMJIT_FORCE_INLINE void appendUnsafe(const T& item) noexcept {
ASMJIT_INLINE void appendUnsafe(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
@@ -290,7 +334,7 @@ public:
}
//! Inserts an `item` at the specified `index` (unsafe case).
ASMJIT_FORCE_INLINE void insertUnsafe(size_t index, const T& item) noexcept {
ASMJIT_INLINE void insertUnsafe(size_t index, const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
ASMJIT_ASSERT(index <= _size);
@@ -307,7 +351,7 @@ public:
}
//! Concatenates all items of `other` at the end of the vector.
ASMJIT_FORCE_INLINE void concatUnsafe(const ZoneVector<T>& other) noexcept {
ASMJIT_INLINE void concatUnsafe(const ZoneVector<T>& other) noexcept {
uint32_t size = other._size;
ASMJIT_ASSERT(_capacity - _size >= size);
@@ -320,7 +364,7 @@ public:
}
//! Returns index of the given `val` or `Globals::kNotFound` if it doesn't exist.
ASMJIT_FORCE_INLINE uint32_t indexOf(const T& val) const noexcept {
ASMJIT_INLINE uint32_t indexOf(const T& val) const noexcept {
const T* data = static_cast<const T*>(_data);
uint32_t size = _size;
@@ -350,6 +394,7 @@ public:
}
//! Pops the last element from the vector and returns it.
[[nodiscard]]
inline T pop() noexcept {
ASMJIT_ASSERT(_size > 0);
@@ -363,12 +408,14 @@ public:
}
//! Returns item at index `i`.
[[nodiscard]]
inline T& operator[](size_t i) noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
}
//! Returns item at index `i`.
[[nodiscard]]
inline const T& operator[](size_t i) const noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
@@ -378,16 +425,22 @@ public:
//!
//! \note The vector must have at least one element. Attempting to use `first()` on empty vector will trigger
//! an assertion failure in debug builds.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T& first() noexcept { return operator[](0); }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T& first() const noexcept { return operator[](0); }
//! Returns a reference to the last element of the vector.
//!
//! \note The vector must have at least one element. Attempting to use `last()` on empty vector will trigger
//! an assertion failure in debug builds.
[[nodiscard]]
inline T& last() noexcept { return operator[](_size - 1); }
//! \overload
[[nodiscard]]
inline const T& last() const noexcept { return operator[](_size - 1); }
//! \}
@@ -401,6 +454,7 @@ public:
}
//! Called to grow the buffer to fit at least `n` elements more.
[[nodiscard]]
inline Error grow(ZoneAllocator* allocator, uint32_t n) noexcept {
return ZoneVectorBase::_grow(allocator, sizeof(T), n);
}
@@ -409,27 +463,34 @@ public:
//!
//! If `n` is greater than the current size then the additional elements' content will be initialized to zero.
//! If `n` is less than the current size then the vector will be truncated to exactly `n` elements.
[[nodiscard]]
inline Error resize(ZoneAllocator* allocator, uint32_t n) noexcept {
return ZoneVectorBase::_resize(allocator, sizeof(T), n);
}
//! Reallocates the internal array to fit at least `n` items.
[[nodiscard]]
inline Error reserve(ZoneAllocator* allocator, uint32_t n) noexcept {
if (ASMJIT_UNLIKELY(n > _capacity))
if (ASMJIT_UNLIKELY(n > _capacity)) {
return ZoneVectorBase::_reserve(allocator, sizeof(T), n);
else
}
else {
return Error(kErrorOk);
}
}
//! Reallocates the internal array to fit at least `n` items with growing semantics.
//!
//! If the vector is smaller than `n` the same growing calculations will be used as if N items were appended
//! to an empty vector, which means reserving additional space for more append operations that could follow.
[[nodiscard]]
inline Error growingReserve(ZoneAllocator* allocator, uint32_t n) noexcept {
if (ASMJIT_UNLIKELY(n > _capacity))
if (ASMJIT_UNLIKELY(n > _capacity)) {
return ZoneVectorBase::_growingReserve(allocator, sizeof(T), n);
else
}
else {
return Error(kErrorOk);
}
}
inline Error willGrow(ZoneAllocator* allocator, uint32_t n = 1) noexcept {
@@ -442,16 +503,19 @@ public:
//! Zone-allocated bit vector.
class ZoneBitVector {
public:
typedef Support::BitWord BitWord;
ASMJIT_NONCOPYABLE(ZoneBitVector)
//! \name Types
//! \{
using BitWord = Support::BitWord;
//! \}
//! \name Constants
//! \{
enum : uint32_t {
kBitWordSizeInBits = Support::kBitWordSizeInBits
};
static inline constexpr uint32_t kBitWordSizeInBits = Support::kBitWordSizeInBits;
//! \}
@@ -476,18 +540,21 @@ public:
}
static ASMJIT_INLINE_NODEBUG void _zeroBits(BitWord* dst, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++)
for (uint32_t i = 0; i < nBitWords; i++) {
dst[i] = 0;
}
}
static ASMJIT_INLINE_NODEBUG void _fillBits(BitWord* dst, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++)
for (uint32_t i = 0; i < nBitWords; i++) {
dst[i] = ~BitWord(0);
}
}
static ASMJIT_INLINE_NODEBUG void _copyBits(BitWord* dst, const BitWord* src, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++)
for (uint32_t i = 0; i < nBitWords; i++) {
dst[i] = src[i];
}
}
//! \}
@@ -508,7 +575,10 @@ public:
//! \name Overloaded Operators
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const ZoneBitVector& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const ZoneBitVector& other) const noexcept { return !equals(other); }
//! \}
@@ -517,20 +587,31 @@ public:
//! \{
//! Tests whether the bit-vector is empty (has no bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
//! Returns the size of this bit-vector (in bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _size; }
//! Returns the capacity of this bit-vector (in bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t capacity() const noexcept { return _capacity; }
//! Returns the size of the `BitWord[]` array in `BitWord` units.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sizeInBitWords() const noexcept { return _wordsPerBits(_size); }
//! Returns the capacity of the `BitWord[]` array in `BitWord` units.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t capacityInBitWords() const noexcept { return _wordsPerBits(_capacity); }
//! Returns bit-vector data as `BitWord[]`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BitWord* data() noexcept { return _data; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BitWord* data() const noexcept { return _data; }
//! \}
@@ -559,6 +640,7 @@ public:
_clearUnusedBits();
}
[[nodiscard]]
inline bool bitAt(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _size);
return Support::bitVectorGetBit(_data, index);
@@ -574,7 +656,7 @@ public:
Support::bitVectorFlipBit(_data, index);
}
ASMJIT_FORCE_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept {
ASMJIT_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept {
uint32_t index = _size;
if (ASMJIT_UNLIKELY(index >= _capacity))
return _append(allocator, value);
@@ -593,23 +675,23 @@ public:
ASMJIT_API Error copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept;
ASMJIT_FORCE_INLINE void clearAll() noexcept {
ASMJIT_INLINE void clearAll() noexcept {
_zeroBits(_data, _wordsPerBits(_size));
}
ASMJIT_FORCE_INLINE void fillAll() noexcept {
ASMJIT_INLINE void fillAll() noexcept {
_fillBits(_data, _wordsPerBits(_size));
_clearUnusedBits();
}
ASMJIT_FORCE_INLINE void clearBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_INLINE void clearBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_ASSERT(start <= _size);
ASMJIT_ASSERT(_size - start >= count);
Support::bitVectorClear(_data, start, count);
}
ASMJIT_FORCE_INLINE void fillBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_INLINE void fillBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_ASSERT(start <= _size);
ASMJIT_ASSERT(_size - start >= count);
@@ -620,7 +702,7 @@ public:
//! bits than `this` then all remaining bits are set to zero.
//!
//! \note The size of the BitVector is unaffected by this operation.
ASMJIT_FORCE_INLINE void and_(const ZoneBitVector& other) noexcept {
ASMJIT_INLINE void and_(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
@@ -644,49 +726,57 @@ public:
//! has less bits than `this` then all remaining bits are kept intact.
//!
//! \note The size of the BitVector is unaffected by this operation.
ASMJIT_FORCE_INLINE void andNot(const ZoneBitVector& other) noexcept {
ASMJIT_INLINE void andNot(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
for (uint32_t i = 0; i < commonBitWordCount; i++)
for (uint32_t i = 0; i < commonBitWordCount; i++) {
dst[i] = dst[i] & ~src[i];
}
}
//! Performs a logical bitwise OP between bits specified in this array and bits in `other`. If `other` has less
//! bits than `this` then all remaining bits are kept intact.
//!
//! \note The size of the BitVector is unaffected by this operation.
ASMJIT_FORCE_INLINE void or_(const ZoneBitVector& other) noexcept {
ASMJIT_INLINE void or_(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
for (uint32_t i = 0; i < commonBitWordCount; i++)
for (uint32_t i = 0; i < commonBitWordCount; i++) {
dst[i] = dst[i] | src[i];
}
_clearUnusedBits();
}
ASMJIT_FORCE_INLINE void _clearUnusedBits() noexcept {
ASMJIT_INLINE void _clearUnusedBits() noexcept {
uint32_t idx = _size / kBitWordSizeInBits;
uint32_t bit = _size % kBitWordSizeInBits;
if (!bit)
if (!bit) {
return;
}
_data[idx] &= (BitWord(1) << bit) - 1u;
}
ASMJIT_FORCE_INLINE bool equals(const ZoneBitVector& other) const noexcept {
if (_size != other._size)
[[nodiscard]]
ASMJIT_INLINE bool equals(const ZoneBitVector& other) const noexcept {
if (_size != other._size) {
return false;
}
const BitWord* aData = _data;
const BitWord* bData = other._data;
uint32_t numBitWords = _wordsPerBits(_size);
for (uint32_t i = 0; i < numBitWords; i++)
if (aData[i] != bData[i])
for (uint32_t i = 0; i < numBitWords; i++) {
if (aData[i] != bData[i]) {
return false;
}
}
return true;
}
@@ -696,12 +786,15 @@ public:
//! \{
inline void release(ZoneAllocator* allocator) noexcept {
if (!_data)
if (!_data) {
return;
}
allocator->release(_data, _capacity / 8);
reset();
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Error resize(ZoneAllocator* allocator, uint32_t newSize, bool newBitsValue = false) noexcept {
return _resize(allocator, newSize, newSize, newBitsValue);
}

Some files were not shown because too many files have changed in this diff Show More