diff --git a/src/asmjit/base/runtime.cpp b/src/asmjit/base/runtime.cpp index dc0c960..88e39dd 100644 --- a/src/asmjit/base/runtime.cpp +++ b/src/asmjit/base/runtime.cpp @@ -63,12 +63,13 @@ uint32_t HostRuntime::getStackAlignment() { void HostRuntime::flush(void* p, size_t size) { // Only useful on non-x86 architectures. #if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64 - +# if ASMJIT_OS_WINDOWS // Windows has built-in support in kernel32.dll. -#if ASMJIT_OS_WINDOWS ::FlushInstructionCache(_memMgr.getProcessHandle(), p, size); -#endif // ASMJIT_OS_WINDOWS - +# endif // ASMJIT_OS_WINDOWS +#else + ASMJIT_UNUSED(p); + ASMJIT_UNUSED(size); #endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64 } diff --git a/src/asmjit/x86/x86compilercontext.cpp b/src/asmjit/x86/x86compilercontext.cpp index b7547af..8434539 100644 --- a/src/asmjit/x86/x86compilercontext.cpp +++ b/src/asmjit/x86/x86compilercontext.cpp @@ -2184,45 +2184,45 @@ Error X86Context::fetch() { if (vaCount == 0 && clobberedRegs.isEmpty()) \ break; \ \ - X86VarMap* map = newVarMap(vaCount); \ - if (map == nullptr) \ + X86RegCount _vaIndex; \ + _vaIndex.indexFromRegCount(regCount); \ + \ + X86VarMap* _map = newVarMap(vaCount); \ + if (_map == nullptr) \ goto _NoMemory; \ \ - X86RegCount vaIndex; \ - vaIndex.indexFromRegCount(regCount); \ + _map->_vaCount = vaCount; \ + _map->_count = regCount; \ + _map->_start = _vaIndex; \ \ - map->_vaCount = vaCount; \ - map->_count = regCount; \ - map->_start = vaIndex; \ + _map->_inRegs = inRegs; \ + _map->_outRegs = outRegs; \ + _map->_clobberedRegs = clobberedRegs; \ \ - map->_inRegs = inRegs; \ - map->_outRegs = outRegs; \ - map->_clobberedRegs = clobberedRegs; \ - \ - VarAttr* va = vaTmpList; \ + VarAttr* _va = vaTmpList; \ while (vaCount) { \ - VarData* vd = va->getVd(); \ + VarData* _vd = _va->getVd(); \ \ - uint32_t class_ = vd->getClass(); \ - uint32_t index = vaIndex.get(class_); \ + uint32_t _class = _vd->getClass(); \ + uint32_t _index = _vaIndex.get(_class); \ \ - vaIndex.add(class_); \ + _vaIndex.add(_class); \ \ - if (va->_inRegs) \ - va->_allocableRegs = va->_inRegs; \ - else if (va->_outRegIndex != kInvalidReg) \ - va->_allocableRegs = Utils::mask(va->_outRegIndex); \ + if (_va->_inRegs) \ + _va->_allocableRegs = _va->_inRegs; \ + else if (_va->_outRegIndex != kInvalidReg) \ + _va->_allocableRegs = Utils::mask(_va->_outRegIndex); \ else \ - va->_allocableRegs &= ~inRegs.get(class_); \ + _va->_allocableRegs &= ~inRegs.get(_class); \ \ - vd->_va = nullptr; \ - map->getVa(index)[0] = va[0]; \ + _vd->_va = nullptr; \ + _map->getVa(_index)[0] = _va[0]; \ \ - va++; \ + _va++; \ vaCount--; \ } \ \ - _Node_->setMap(map); \ + _Node_->setMap(_map); \ } while (0) #define VI_ADD_VAR(_Vd_, _Va_, _Flags_, _NewAllocable_) \ @@ -3367,15 +3367,12 @@ ASMJIT_INLINE void X86VarAlloc::plan() { return; uint32_t i; - uint32_t willAlloc = _willAlloc.get(C); uint32_t willFree = 0; VarAttr* list = getVaListByClass(C); uint32_t count = getVaCountByClass(C); - X86VarState* state = getState(); - VarData** sVars = state->getListByClass(C); // Calculate 'willAlloc' and 'willFree' masks based on mandatory masks. for (i = 0; i < count; i++) { @@ -3633,15 +3630,12 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { if (isVaDone(C)) return; - VarAttr* list = getVaListByClass(C); - uint32_t count = getVaCountByClass(C); - - X86VarState* state = getState(); - VarData** sVars = state->getListByClass(C); - uint32_t i; bool didWork; + VarAttr* list = getVaListByClass(C); + uint32_t count = getVaCountByClass(C); + // Alloc 'in' regs. do { didWork = false; @@ -3714,7 +3708,7 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { ASMJIT_ASSERT(regIndex != kInvalidReg); if (vd->getRegIndex() != regIndex) { - ASMJIT_ASSERT(sVars[regIndex] == nullptr); + ASMJIT_ASSERT(getState()->getListByClass(C)[regIndex] == nullptr); _context->attach(vd, regIndex, false); } @@ -4040,7 +4034,6 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe return safeRegs; } - template ASMJIT_INLINE uint32_t X86VarAlloc::guessSpill(VarData* vd, uint32_t allocableRegs) { ASMJIT_ASSERT(allocableRegs != 0); @@ -4106,7 +4099,6 @@ struct X86CallAlloc : public X86BaseAlloc { protected: // Just to prevent calling these methods from X86Context::translate(). - ASMJIT_INLINE void init(X86CallNode* node, X86VarMap* map); ASMJIT_INLINE void cleanup(); @@ -4282,7 +4274,6 @@ ASMJIT_INLINE void X86CallAlloc::plan() { uint32_t count = getVaCountByClass(C); X86VarState* state = getState(); - VarData** sVars = state->getListByClass(C); // Calculate 'willAlloc' and 'willFree' masks based on mandatory masks. for (i = 0; i < count; i++) { @@ -4412,10 +4403,9 @@ ASMJIT_INLINE void X86CallAlloc::spill() { ASMJIT_ASSERT(vd->getVa() == nullptr); if (vd->isModified() && availableRegs) { - uint32_t m = guessSpill(vd, availableRegs); - - if (m != 0) { - uint32_t regIndex = Utils::findFirstBit(m); + uint32_t available = guessSpill(vd, availableRegs); + if (available != 0) { + uint32_t regIndex = Utils::findFirstBit(available); uint32_t regMask = Utils::mask(regIndex); _context->move(vd, regIndex); @@ -5669,6 +5659,7 @@ Error X86Context::schedule() { HLNode* node_ = getFunc(); HLNode* stop = getStop(); + ASMJIT_UNUSED(stop); // Unused in release mode. PodList::Link* jLink = _jccList.getFirst(); diff --git a/src/asmjit/x86/x86operand.h b/src/asmjit/x86/x86operand.h index 7711c20..a3bb0bd 100644 --- a/src/asmjit/x86/x86operand.h +++ b/src/asmjit/x86/x86operand.h @@ -1219,6 +1219,7 @@ struct X86Mem : public BaseMem { } ASMJIT_INLINE X86Mem(const X86RipReg& rip, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_UNUSED(rip); _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeRip, 0, kInvalidValue); _init_packed_d2_d3(kInvalidValue, disp); } @@ -1606,7 +1607,7 @@ struct X86Mem : public BaseMem { } //! Reset memory operand relative displacement. - ASMJIT_INLINE X86Mem& resetDisplacement(int32_t disp) { + ASMJIT_INLINE X86Mem& resetDisplacement() { _vmem.displacement = 0; return *this; } @@ -2421,8 +2422,8 @@ static ASMJIT_INLINE X86Mem ptr(const Label& label, const X86GpReg& index, uint3 } //! Create `[RIP + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86RipReg& rip, int32_t disp = 0, uint32_t size = 0) { - return X86Mem(rip, disp, size); +static ASMJIT_INLINE X86Mem ptr(const X86RipReg& rip_, int32_t disp = 0, uint32_t size = 0) { + return X86Mem(rip_, disp, size); } //! Create `[pAbs + disp]` absolute memory operand with no/custom size information. @@ -2457,8 +2458,8 @@ ASMJIT_API X86Mem ptr_abs(Ptr pAbs, const X86Reg& index, uint32_t shift = 0, int return ptr(label, index, shift, disp, _Size_); \ } \ /*! Create `[RIP + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##ptr(const X86RipReg& rip, int32_t disp = 0) { \ - return ptr(rip, disp, _Size_); \ + static ASMJIT_INLINE X86Mem _Prefix_##ptr(const X86RipReg& rip_, int32_t disp = 0) { \ + return ptr(rip_, disp, _Size_); \ } \ /*! Create `[pAbs + disp]` memory operand. */ \ static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, int32_t disp = 0) { \ diff --git a/src/test/asmjit_bench_x86.cpp b/src/test/asmjit_bench_x86.cpp index e387e22..5e7cd18 100644 --- a/src/test/asmjit_bench_x86.cpp +++ b/src/test/asmjit_bench_x86.cpp @@ -89,7 +89,8 @@ int main(int argc, char* argv[]) { perf.end(); } - printf("Opcode | Time: %-6u [ms] | Speed: %-9u [inst/s]\n", + printf("%-22s | Time: %-6u [ms] | Speed: %-9u [inst/s]\n", + "Assembler [GenOpCode]", perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenOpCodeInstCount)); // -------------------------------------------------------------------------- @@ -112,7 +113,8 @@ int main(int argc, char* argv[]) { perf.end(); } - printf("Blend | Time: %-6u [ms] | Speed: %-9u [inst/s]\n", + printf("%-22s | Time: %-6u [ms] | Speed: %-9u [inst/s]\n", + "Compiler [GenBlend]", perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenBlendInstCount)); return 0;