mirror of
https://github.com/asmjit/asmjit.git
synced 2025-12-18 13:04:36 +03:00
Minor code cleanup, fixed variable shadowing and unused parameters.
This commit is contained in:
@@ -63,12 +63,13 @@ uint32_t HostRuntime::getStackAlignment() {
|
|||||||
void HostRuntime::flush(void* p, size_t size) {
|
void HostRuntime::flush(void* p, size_t size) {
|
||||||
// Only useful on non-x86 architectures.
|
// Only useful on non-x86 architectures.
|
||||||
#if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
|
#if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
|
||||||
|
# if ASMJIT_OS_WINDOWS
|
||||||
// Windows has built-in support in kernel32.dll.
|
// Windows has built-in support in kernel32.dll.
|
||||||
#if ASMJIT_OS_WINDOWS
|
|
||||||
::FlushInstructionCache(_memMgr.getProcessHandle(), p, size);
|
::FlushInstructionCache(_memMgr.getProcessHandle(), p, size);
|
||||||
#endif // ASMJIT_OS_WINDOWS
|
# endif // ASMJIT_OS_WINDOWS
|
||||||
|
#else
|
||||||
|
ASMJIT_UNUSED(p);
|
||||||
|
ASMJIT_UNUSED(size);
|
||||||
#endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
|
#endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2184,45 +2184,45 @@ Error X86Context::fetch() {
|
|||||||
if (vaCount == 0 && clobberedRegs.isEmpty()) \
|
if (vaCount == 0 && clobberedRegs.isEmpty()) \
|
||||||
break; \
|
break; \
|
||||||
\
|
\
|
||||||
X86VarMap* map = newVarMap(vaCount); \
|
X86RegCount _vaIndex; \
|
||||||
if (map == nullptr) \
|
_vaIndex.indexFromRegCount(regCount); \
|
||||||
|
\
|
||||||
|
X86VarMap* _map = newVarMap(vaCount); \
|
||||||
|
if (_map == nullptr) \
|
||||||
goto _NoMemory; \
|
goto _NoMemory; \
|
||||||
\
|
\
|
||||||
X86RegCount vaIndex; \
|
_map->_vaCount = vaCount; \
|
||||||
vaIndex.indexFromRegCount(regCount); \
|
_map->_count = regCount; \
|
||||||
|
_map->_start = _vaIndex; \
|
||||||
\
|
\
|
||||||
map->_vaCount = vaCount; \
|
_map->_inRegs = inRegs; \
|
||||||
map->_count = regCount; \
|
_map->_outRegs = outRegs; \
|
||||||
map->_start = vaIndex; \
|
_map->_clobberedRegs = clobberedRegs; \
|
||||||
\
|
\
|
||||||
map->_inRegs = inRegs; \
|
VarAttr* _va = vaTmpList; \
|
||||||
map->_outRegs = outRegs; \
|
|
||||||
map->_clobberedRegs = clobberedRegs; \
|
|
||||||
\
|
|
||||||
VarAttr* va = vaTmpList; \
|
|
||||||
while (vaCount) { \
|
while (vaCount) { \
|
||||||
VarData* vd = va->getVd(); \
|
VarData* _vd = _va->getVd(); \
|
||||||
\
|
\
|
||||||
uint32_t class_ = vd->getClass(); \
|
uint32_t _class = _vd->getClass(); \
|
||||||
uint32_t index = vaIndex.get(class_); \
|
uint32_t _index = _vaIndex.get(_class); \
|
||||||
\
|
\
|
||||||
vaIndex.add(class_); \
|
_vaIndex.add(_class); \
|
||||||
\
|
\
|
||||||
if (va->_inRegs) \
|
if (_va->_inRegs) \
|
||||||
va->_allocableRegs = va->_inRegs; \
|
_va->_allocableRegs = _va->_inRegs; \
|
||||||
else if (va->_outRegIndex != kInvalidReg) \
|
else if (_va->_outRegIndex != kInvalidReg) \
|
||||||
va->_allocableRegs = Utils::mask(va->_outRegIndex); \
|
_va->_allocableRegs = Utils::mask(_va->_outRegIndex); \
|
||||||
else \
|
else \
|
||||||
va->_allocableRegs &= ~inRegs.get(class_); \
|
_va->_allocableRegs &= ~inRegs.get(_class); \
|
||||||
\
|
\
|
||||||
vd->_va = nullptr; \
|
_vd->_va = nullptr; \
|
||||||
map->getVa(index)[0] = va[0]; \
|
_map->getVa(_index)[0] = _va[0]; \
|
||||||
\
|
\
|
||||||
va++; \
|
_va++; \
|
||||||
vaCount--; \
|
vaCount--; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
_Node_->setMap(map); \
|
_Node_->setMap(_map); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define VI_ADD_VAR(_Vd_, _Va_, _Flags_, _NewAllocable_) \
|
#define VI_ADD_VAR(_Vd_, _Va_, _Flags_, _NewAllocable_) \
|
||||||
@@ -3367,15 +3367,12 @@ ASMJIT_INLINE void X86VarAlloc::plan() {
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
uint32_t willAlloc = _willAlloc.get(C);
|
uint32_t willAlloc = _willAlloc.get(C);
|
||||||
uint32_t willFree = 0;
|
uint32_t willFree = 0;
|
||||||
|
|
||||||
VarAttr* list = getVaListByClass(C);
|
VarAttr* list = getVaListByClass(C);
|
||||||
uint32_t count = getVaCountByClass(C);
|
uint32_t count = getVaCountByClass(C);
|
||||||
|
|
||||||
X86VarState* state = getState();
|
X86VarState* state = getState();
|
||||||
VarData** sVars = state->getListByClass(C);
|
|
||||||
|
|
||||||
// Calculate 'willAlloc' and 'willFree' masks based on mandatory masks.
|
// Calculate 'willAlloc' and 'willFree' masks based on mandatory masks.
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
@@ -3633,15 +3630,12 @@ ASMJIT_INLINE void X86VarAlloc::alloc() {
|
|||||||
if (isVaDone(C))
|
if (isVaDone(C))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
VarAttr* list = getVaListByClass(C);
|
|
||||||
uint32_t count = getVaCountByClass(C);
|
|
||||||
|
|
||||||
X86VarState* state = getState();
|
|
||||||
VarData** sVars = state->getListByClass(C);
|
|
||||||
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
bool didWork;
|
bool didWork;
|
||||||
|
|
||||||
|
VarAttr* list = getVaListByClass(C);
|
||||||
|
uint32_t count = getVaCountByClass(C);
|
||||||
|
|
||||||
// Alloc 'in' regs.
|
// Alloc 'in' regs.
|
||||||
do {
|
do {
|
||||||
didWork = false;
|
didWork = false;
|
||||||
@@ -3714,7 +3708,7 @@ ASMJIT_INLINE void X86VarAlloc::alloc() {
|
|||||||
ASMJIT_ASSERT(regIndex != kInvalidReg);
|
ASMJIT_ASSERT(regIndex != kInvalidReg);
|
||||||
|
|
||||||
if (vd->getRegIndex() != regIndex) {
|
if (vd->getRegIndex() != regIndex) {
|
||||||
ASMJIT_ASSERT(sVars[regIndex] == nullptr);
|
ASMJIT_ASSERT(getState()->getListByClass(C)[regIndex] == nullptr);
|
||||||
_context->attach<C>(vd, regIndex, false);
|
_context->attach<C>(vd, regIndex, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4040,7 +4034,6 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe
|
|||||||
return safeRegs;
|
return safeRegs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<int C>
|
template<int C>
|
||||||
ASMJIT_INLINE uint32_t X86VarAlloc::guessSpill(VarData* vd, uint32_t allocableRegs) {
|
ASMJIT_INLINE uint32_t X86VarAlloc::guessSpill(VarData* vd, uint32_t allocableRegs) {
|
||||||
ASMJIT_ASSERT(allocableRegs != 0);
|
ASMJIT_ASSERT(allocableRegs != 0);
|
||||||
@@ -4106,7 +4099,6 @@ struct X86CallAlloc : public X86BaseAlloc {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Just to prevent calling these methods from X86Context::translate().
|
// Just to prevent calling these methods from X86Context::translate().
|
||||||
|
|
||||||
ASMJIT_INLINE void init(X86CallNode* node, X86VarMap* map);
|
ASMJIT_INLINE void init(X86CallNode* node, X86VarMap* map);
|
||||||
ASMJIT_INLINE void cleanup();
|
ASMJIT_INLINE void cleanup();
|
||||||
|
|
||||||
@@ -4282,7 +4274,6 @@ ASMJIT_INLINE void X86CallAlloc::plan() {
|
|||||||
uint32_t count = getVaCountByClass(C);
|
uint32_t count = getVaCountByClass(C);
|
||||||
|
|
||||||
X86VarState* state = getState();
|
X86VarState* state = getState();
|
||||||
VarData** sVars = state->getListByClass(C);
|
|
||||||
|
|
||||||
// Calculate 'willAlloc' and 'willFree' masks based on mandatory masks.
|
// Calculate 'willAlloc' and 'willFree' masks based on mandatory masks.
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
@@ -4412,10 +4403,9 @@ ASMJIT_INLINE void X86CallAlloc::spill() {
|
|||||||
ASMJIT_ASSERT(vd->getVa() == nullptr);
|
ASMJIT_ASSERT(vd->getVa() == nullptr);
|
||||||
|
|
||||||
if (vd->isModified() && availableRegs) {
|
if (vd->isModified() && availableRegs) {
|
||||||
uint32_t m = guessSpill<C>(vd, availableRegs);
|
uint32_t available = guessSpill<C>(vd, availableRegs);
|
||||||
|
if (available != 0) {
|
||||||
if (m != 0) {
|
uint32_t regIndex = Utils::findFirstBit(available);
|
||||||
uint32_t regIndex = Utils::findFirstBit(m);
|
|
||||||
uint32_t regMask = Utils::mask(regIndex);
|
uint32_t regMask = Utils::mask(regIndex);
|
||||||
|
|
||||||
_context->move<C>(vd, regIndex);
|
_context->move<C>(vd, regIndex);
|
||||||
@@ -5669,6 +5659,7 @@ Error X86Context::schedule() {
|
|||||||
|
|
||||||
HLNode* node_ = getFunc();
|
HLNode* node_ = getFunc();
|
||||||
HLNode* stop = getStop();
|
HLNode* stop = getStop();
|
||||||
|
ASMJIT_UNUSED(stop); // Unused in release mode.
|
||||||
|
|
||||||
PodList<HLNode*>::Link* jLink = _jccList.getFirst();
|
PodList<HLNode*>::Link* jLink = _jccList.getFirst();
|
||||||
|
|
||||||
|
|||||||
@@ -1219,6 +1219,7 @@ struct X86Mem : public BaseMem {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ASMJIT_INLINE X86Mem(const X86RipReg& rip, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) {
|
ASMJIT_INLINE X86Mem(const X86RipReg& rip, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) {
|
||||||
|
ASMJIT_UNUSED(rip);
|
||||||
_init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeRip, 0, kInvalidValue);
|
_init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeRip, 0, kInvalidValue);
|
||||||
_init_packed_d2_d3(kInvalidValue, disp);
|
_init_packed_d2_d3(kInvalidValue, disp);
|
||||||
}
|
}
|
||||||
@@ -1606,7 +1607,7 @@ struct X86Mem : public BaseMem {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//! Reset memory operand relative displacement.
|
//! Reset memory operand relative displacement.
|
||||||
ASMJIT_INLINE X86Mem& resetDisplacement(int32_t disp) {
|
ASMJIT_INLINE X86Mem& resetDisplacement() {
|
||||||
_vmem.displacement = 0;
|
_vmem.displacement = 0;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@@ -2421,8 +2422,8 @@ static ASMJIT_INLINE X86Mem ptr(const Label& label, const X86GpReg& index, uint3
|
|||||||
}
|
}
|
||||||
|
|
||||||
//! Create `[RIP + disp]` memory operand with no/custom size information.
|
//! Create `[RIP + disp]` memory operand with no/custom size information.
|
||||||
static ASMJIT_INLINE X86Mem ptr(const X86RipReg& rip, int32_t disp = 0, uint32_t size = 0) {
|
static ASMJIT_INLINE X86Mem ptr(const X86RipReg& rip_, int32_t disp = 0, uint32_t size = 0) {
|
||||||
return X86Mem(rip, disp, size);
|
return X86Mem(rip_, disp, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
//! Create `[pAbs + disp]` absolute memory operand with no/custom size information.
|
//! Create `[pAbs + disp]` absolute memory operand with no/custom size information.
|
||||||
@@ -2457,8 +2458,8 @@ ASMJIT_API X86Mem ptr_abs(Ptr pAbs, const X86Reg& index, uint32_t shift = 0, int
|
|||||||
return ptr(label, index, shift, disp, _Size_); \
|
return ptr(label, index, shift, disp, _Size_); \
|
||||||
} \
|
} \
|
||||||
/*! Create `[RIP + disp]` memory operand. */ \
|
/*! Create `[RIP + disp]` memory operand. */ \
|
||||||
static ASMJIT_INLINE X86Mem _Prefix_##ptr(const X86RipReg& rip, int32_t disp = 0) { \
|
static ASMJIT_INLINE X86Mem _Prefix_##ptr(const X86RipReg& rip_, int32_t disp = 0) { \
|
||||||
return ptr(rip, disp, _Size_); \
|
return ptr(rip_, disp, _Size_); \
|
||||||
} \
|
} \
|
||||||
/*! Create `[pAbs + disp]` memory operand. */ \
|
/*! Create `[pAbs + disp]` memory operand. */ \
|
||||||
static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, int32_t disp = 0) { \
|
static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, int32_t disp = 0) { \
|
||||||
|
|||||||
@@ -89,7 +89,8 @@ int main(int argc, char* argv[]) {
|
|||||||
perf.end();
|
perf.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("Opcode | Time: %-6u [ms] | Speed: %-9u [inst/s]\n",
|
printf("%-22s | Time: %-6u [ms] | Speed: %-9u [inst/s]\n",
|
||||||
|
"Assembler [GenOpCode]",
|
||||||
perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenOpCodeInstCount));
|
perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenOpCodeInstCount));
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
@@ -112,7 +113,8 @@ int main(int argc, char* argv[]) {
|
|||||||
perf.end();
|
perf.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("Blend | Time: %-6u [ms] | Speed: %-9u [inst/s]\n",
|
printf("%-22s | Time: %-6u [ms] | Speed: %-9u [inst/s]\n",
|
||||||
|
"Compiler [GenBlend]",
|
||||||
perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenBlendInstCount));
|
perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenBlendInstCount));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
Reference in New Issue
Block a user