Mark clobbered registers when allocating "call" (X86Compiler)

This commit is contained in:
kobalicek
2016-04-09 04:55:00 +02:00
parent 02ba5cc35c
commit 83682451b8
4 changed files with 58 additions and 3 deletions

View File

@@ -161,7 +161,7 @@ struct ConstPool {
continue;
}
_Visit:
_Visit:
visitor.visit(node);
link = node->_link[1];

View File

@@ -371,14 +371,13 @@ struct Utils {
// --------------------------------------------------------------------------
static ASMJIT_INLINE uint32_t bitCountSlow(uint32_t x) noexcept {
// From: http://graphics.stanford.edu/~seander/bithacks.html
x = x - ((x >> 1) & 0x55555555U);
x = (x & 0x33333333U) + ((x >> 2) & 0x33333333U);
return (((x + (x >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24;
}
//! Get count of bits in `x`.
//!
//! Taken from http://graphics.stanford.edu/~seander/bithacks.html .
static ASMJIT_INLINE uint32_t bitCount(uint32_t x) noexcept {
#if ASMJIT_CC_GCC || ASMJIT_CC_CLANG
return __builtin_popcount(x);

View File

@@ -4475,6 +4475,7 @@ ASMJIT_INLINE void X86CallAlloc::alloc() {
}
else if (aIndex != kInvalidReg) {
_context->move<C>(aVd, bIndex);
_context->_clobberedRegs.or_(C, Utils::mask(bIndex));
aVa->orFlags(kVarAttrAllocRDone);
addVaDone(C);
@@ -4484,6 +4485,7 @@ ASMJIT_INLINE void X86CallAlloc::alloc() {
}
else {
_context->alloc<C>(aVd, bIndex);
_context->_clobberedRegs.or_(C, Utils::mask(bIndex));
aVa->orFlags(kVarAttrAllocRDone);
addVaDone(C);

View File

@@ -2670,6 +2670,59 @@ struct X86Test_CallMisc4 : public X86Test {
static double calledFunc() { return 3.14; }
};
// ============================================================================
// [X86Test_CallMisc5]
// ============================================================================
// The register allocator should clobber the register used by the `call` itself.
struct X86Test_CallMisc5 : public X86Test {
X86Test_CallMisc5() : X86Test("[Call] Misc #5") {}
static void add(PodVector<X86Test*>& tests) {
tests.append(new X86Test_CallMisc5());
}
virtual void compile(X86Compiler& c) {
X86FuncNode* func = c.addFunc(FuncBuilder1<int, void>(kCallConvHost));
X86GpVar vars[16];
uint32_t i, regCount = c.getRegCount().getGp();
for (i = 0; i < regCount; i++) {
if (i == kX86RegIndexBp || i == kX86RegIndexSp)
continue;
vars[i] = c.newInt32("v%u", static_cast<unsigned int>(i));
c.mov(vars[i], 1);
}
X86CallNode* call = c.call(imm_ptr(calledFunc), FuncBuilder0<void>(kCallConvHost));
for (i = 1; i < regCount; i++) {
if (vars[i].isInitialized())
c.add(vars[0], vars[i]);
}
c.ret(vars[0]);
c.endFunc();
}
virtual bool run(void* _func, StringBuilder& result, StringBuilder& expect) {
typedef int (*Func)(void);
Func func = asmjit_cast<Func>(_func);
int resultRet = func();
int expectRet = sizeof(void*) == 4 ? 6 : 14;
result.setFormat("ret=%d", resultRet);
expect.setFormat("ret=%d", expectRet);
return resultRet == expectRet;
}
static void calledFunc() {}
};
// ============================================================================
// [X86Test_MiscConstPool]
// ============================================================================
@@ -3017,6 +3070,7 @@ X86TestSuite::X86TestSuite() :
ADD_TEST(X86Test_CallMisc2);
ADD_TEST(X86Test_CallMisc3);
ADD_TEST(X86Test_CallMisc4);
ADD_TEST(X86Test_CallMisc5);
// Misc.
ADD_TEST(X86Test_MiscConstPool);