Fixed tracing.

Added possibility to unfollow jump (#7).
Added possibility to return from multiple locations in code (related to unfollow).
Added more tests.
Moved some functions from X86Context to Context.
This commit is contained in:
kobalicek
2015-03-01 17:08:38 +01:00
parent bac4a2b14c
commit 9b48ec0f1e
12 changed files with 334 additions and 89 deletions

View File

@@ -14,6 +14,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <setjmp.h>
using namespace asmjit;
@@ -2601,14 +2602,14 @@ struct X86Test_CallMisc3 : public X86Test {
};
// ============================================================================
// [X86Test_ConstPoolBase]
// [X86Test_MiscConstPool]
// ============================================================================
struct X86Test_ConstPoolBase : public X86Test {
X86Test_ConstPoolBase() : X86Test("[ConstPool] Base") {}
struct X86Test_MiscConstPool : public X86Test {
X86Test_MiscConstPool() : X86Test("[Misc] ConstPool") {}
static void add(PodVector<X86Test*>& tests) {
tests.append(new X86Test_ConstPoolBase());
tests.append(new X86Test_MiscConstPool());
}
virtual void compile(X86Compiler& c) {
@@ -2642,6 +2643,156 @@ struct X86Test_ConstPoolBase : public X86Test {
}
};
// ============================================================================
// [X86Test_MiscMultiRet]
// ============================================================================
struct X86Test_MiscMultiRet : public X86Test {
X86Test_MiscMultiRet() : X86Test("[Misc] MultiRet") {}
static void add(PodVector<X86Test*>& tests) {
tests.append(new X86Test_MiscMultiRet());
}
virtual void compile(X86Compiler& c) {
c.addFunc(kFuncConvHost, FuncBuilder3<int, int, int, int>());
X86GpVar op(c, kVarTypeInt32, "op");
X86GpVar a(c, kVarTypeInt32, "a");
X86GpVar b(c, kVarTypeInt32, "b");
Label L_Zero(c);
Label L_Add(c);
Label L_Sub(c);
Label L_Mul(c);
Label L_Div(c);
c.setArg(0, op);
c.setArg(1, a);
c.setArg(2, b);
c.cmp(op, 0);
c.jz(L_Add);
c.cmp(op, 1);
c.jz(L_Sub);
c.cmp(op, 2);
c.jz(L_Mul);
c.cmp(op, 3);
c.jz(L_Div);
c.bind(L_Zero);
c.xor_(a, a);
c.ret(a);
c.bind(L_Add);
c.add(a, b);
c.ret(a);
c.bind(L_Sub);
c.sub(a, b);
c.ret(a);
c.bind(L_Mul);
c.imul(a, b);
c.ret(a);
c.bind(L_Div);
c.cmp(b, 0);
c.jz(L_Zero);
X86GpVar zero(c, kVarTypeInt32, "zero");
c.xor_(zero, zero);
c.idiv(zero, a, b);
c.ret(a);
c.endFunc();
}
virtual bool run(void* _func, StringBuilder& result, StringBuilder& expect) {
typedef int (*Func)(int, int, int);
Func func = asmjit_cast<Func>(_func);
int a = 44;
int b = 3;
int r0 = func(0, a, b);
int r1 = func(1, a, b);
int r2 = func(2, a, b);
int r3 = func(3, a, b);
int e0 = a + b;
int e1 = a - b;
int e2 = a * b;
int e3 = a / b;
result.setFormat("ret={%d %d %d %d}", r0, r1, r2, r3);
expect.setFormat("ret={%d %d %d %d}", e0, e1, e2, e3);
return result.eq(expect);
}
};
// ============================================================================
// [X86Test_MiscUnfollow]
// ============================================================================
// Global (I didn't find better way to really test this).
static jmp_buf globalJmpBuf;
struct X86Test_MiscUnfollow : public X86Test {
X86Test_MiscUnfollow() : X86Test("[Misc] Unfollow") {}
static void add(PodVector<X86Test*>& tests) {
tests.append(new X86Test_MiscUnfollow());
}
virtual void compile(X86Compiler& c) {
c.addFunc(kFuncConvHost, FuncBuilder2<void, int, void*>());
X86GpVar a(c, kVarTypeInt32);
X86GpVar b(c, kVarTypeIntPtr);
Label tramp(c);
c.setArg(0, a);
c.setArg(1, b);
c.cmp(a, 0);
c.jz(tramp);
c.ret(a);
c.bind(tramp);
c.unfollow().jmp(b);
c.endFunc();
}
virtual bool run(void* _func, StringBuilder& result, StringBuilder& expect) {
typedef int (*Func)(int, void*);
Func func = asmjit_cast<Func>(_func);
int resultRet = 0;
int expectRet = 1;
if (!setjmp(globalJmpBuf))
resultRet = func(0, (void*)handler);
else
resultRet = 1;
result.setFormat("ret={%d}", resultRet);
expect.setFormat("ret={%d}", expectRet);
return resultRet == expectRet;
}
static void handler() { longjmp(globalJmpBuf, 1); }
};
// ============================================================================
// [X86TestSuite]
// ============================================================================
@@ -2730,7 +2881,11 @@ X86TestSuite::X86TestSuite() :
ADD_TEST(X86Test_CallMisc1);
ADD_TEST(X86Test_CallMisc2);
ADD_TEST(X86Test_CallMisc3);
ADD_TEST(X86Test_ConstPoolBase);
// Misc.
ADD_TEST(X86Test_MiscConstPool);
ADD_TEST(X86Test_MiscUnfollow);
ADD_TEST(X86Test_MiscMultiRet);
}
X86TestSuite::~X86TestSuite() {

View File

@@ -68,7 +68,12 @@ ASMJIT_ENUM(InstOptions) {
kInstOptionTaken = 0x00000004,
//! Condition is unlikely to be taken.
kInstOptionNotTaken = 0x00000008
kInstOptionNotTaken = 0x00000008,
//! Don't follow the jump (Compiler-only).
//!
//! Prevents following the jump during compilation.
kInstOptionUnfollow = 0x00000010
};
// ============================================================================

View File

@@ -192,24 +192,26 @@ static ASMJIT_INLINE void BaseCompiler_nodeRemoved(Compiler* self, Node* node_)
JumpNode* node = static_cast<JumpNode*>(node_);
TargetNode* target = node->getTarget();
// Disconnect.
JumpNode** pPrev = &target->_from;
for (;;) {
ASMJIT_ASSERT(*pPrev != NULL);
JumpNode* current = *pPrev;
if (target != NULL) {
// Disconnect.
JumpNode** pPrev = &target->_from;
for (;;) {
ASMJIT_ASSERT(*pPrev != NULL);
JumpNode* current = *pPrev;
if (current == NULL)
break;
if (current == NULL)
break;
if (current == node) {
*pPrev = node->_jumpNext;
break;
if (current == node) {
*pPrev = node->_jumpNext;
break;
}
pPrev = &current->_jumpNext;
}
pPrev = &current->_jumpNext;
target->subNumRefs();
}
target->subNumRefs();
}
}

View File

@@ -48,6 +48,7 @@ void Context::reset(bool releaseMemory) {
_stop = NULL;
_unreachableList.reset();
_returningList.reset();
_jccList.reset();
_contextVd.reset(releaseMemory);
@@ -277,7 +278,7 @@ Error Context::removeUnreachableCode() {
while (link != NULL) {
Node* node = link->getValue();
if (node != NULL && node->getPrev() != NULL) {
if (node != NULL && node->getPrev() != NULL && node != stop) {
// Locate all unreachable nodes.
Node* first = node;
do {
@@ -292,8 +293,10 @@ Error Context::removeUnreachableCode() {
node = first;
do {
Node* next = node->getNext();
if (!node->isInformative() && node->getType() != kNodeTypeAlign)
if (!node->isInformative() && node->getType() != kNodeTypeAlign) {
ASMJIT_TLOG("[%05d] Unreachable\n", node->getFlowId());
compiler->removeNode(node);
}
node = next;
} while (node != end);
}
@@ -321,23 +324,27 @@ struct LivenessTarget {
};
Error Context::livenessAnalysis() {
FuncNode* func = getFunc();
JumpNode* from = NULL;
Node* node = func->getEnd();
uint32_t bLen = static_cast<uint32_t>(
((_contextVd.getLength() + VarBits::kEntityBits - 1) / VarBits::kEntityBits));
LivenessTarget* ltCur = NULL;
LivenessTarget* ltUnused = NULL;
size_t varMapToVaListOffset = _varMapToVaListOffset;
// No variables.
if (bLen == 0)
return kErrorOk;
FuncNode* func = getFunc();
JumpNode* from = NULL;
LivenessTarget* ltCur = NULL;
LivenessTarget* ltUnused = NULL;
PodList<Node*>::Link* retPtr = _returningList.getFirst();
ASMJIT_ASSERT(retPtr != NULL);
Node* node = retPtr->getValue();
size_t varMapToVaListOffset = _varMapToVaListOffset;
VarBits* bCur = newBits(bLen);
if (bCur == NULL)
goto _NoMemory;
@@ -491,6 +498,13 @@ _OnDone:
goto _OnJumpNext;
}
retPtr = retPtr->getNext();
if (retPtr != NULL) {
node = retPtr->getValue();
goto _OnVisit;
}
return kErrorOk;
_NoMemory:

View File

@@ -160,12 +160,53 @@ struct Context {
virtual Error fetch() = 0;
// --------------------------------------------------------------------------
// [RemoveUnreachableCode]
// [Unreachable Code]
// --------------------------------------------------------------------------
//! Add unreachable-flow data to the unreachable flow list.
ASMJIT_INLINE Error addUnreachableNode(Node* node) {
PodList<Node*>::Link* link = _baseZone.allocT<PodList<Node*>::Link>();
if (link == NULL)
return setError(kErrorNoHeapMemory);
link->setValue(node);
_unreachableList.append(link);
return kErrorOk;
}
//! Remove unreachable code.
virtual Error removeUnreachableCode();
// --------------------------------------------------------------------------
// [Code-Flow]
// --------------------------------------------------------------------------
//! Add returning node (i.e. node that returns and where liveness analysis
//! should start).
ASMJIT_INLINE Error addReturningNode(Node* node) {
PodList<Node*>::Link* link = _baseZone.allocT<PodList<Node*>::Link>();
if (link == NULL)
return setError(kErrorNoHeapMemory);
link->setValue(node);
_returningList.append(link);
return kErrorOk;
}
//! Add jump-flow data to the jcc flow list.
ASMJIT_INLINE Error addJccNode(Node* node) {
PodList<Node*>::Link* link = _baseZone.allocT<PodList<Node*>::Link>();
if (link == NULL)
return setError(kErrorNoHeapMemory);
link->setValue(node);
_jccList.append(link);
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Analyze]
// --------------------------------------------------------------------------
@@ -251,6 +292,8 @@ struct Context {
//! Unreachable nodes.
PodList<Node*> _unreachableList;
//! Returning nodes.
PodList<Node*> _returningList;
//! Jump nodes.
PodList<Node*> _jccList;

View File

@@ -277,7 +277,7 @@
namespace asmjit { static inline int disabledTrace(...) { return 0; } }
# if defined(ASMJIT_TRACE)
# define ASMJIT_TSEC(_Section_) _Section_
# define ASMJIT_TLOG ::printf(__VA_ARGS__)
# define ASMJIT_TLOG ::printf
# else
# define ASMJIT_TSEC(_Section_) do {} while(0)
# define ASMJIT_TLOG 0 && ::asmjit::disabledTrace

View File

@@ -5,8 +5,8 @@
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_ARCH_H
#define _ASMJIT_ARCH_H
#ifndef _ASMJIT_HOST_H
#define _ASMJIT_HOST_H
// [Dependencies - Core]
#include "base.h"
@@ -56,4 +56,4 @@ typedef X86YmmVar YmmVar;
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
// [Guard]
#endif // _ASMJIT_ARCH_H
#endif // _ASMJIT_HOST_H

View File

@@ -793,14 +793,24 @@ static ASMJIT_INLINE size_t X86Compiler_getInstSize(uint32_t code) {
static InstNode* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) {
if (IntUtil::inInterval<uint32_t>(code, _kX86InstIdJbegin, _kX86InstIdJend)) {
JumpNode* node = new(p) JumpNode(self, code, options, opList, opCount);
TargetNode* jTarget = self->getTargetById(opList[0].getId());
TargetNode* jTarget = NULL;
if ((options & kInstOptionUnfollow) == 0) {
if (opList[0].isLabel())
jTarget = self->getTargetById(opList[0].getId());
else
options |= kInstOptionUnfollow;
}
node->orFlags(code == kX86InstIdJmp ? kNodeFlagIsJmp | kNodeFlagIsTaken : kNodeFlagIsJcc);
node->_target = jTarget;
node->_jumpNext = static_cast<JumpNode*>(jTarget->_from);
node->_jumpNext = NULL;
jTarget->_from = node;
jTarget->addNumRefs();
if (jTarget) {
node->_jumpNext = static_cast<JumpNode*>(jTarget->_from);
jTarget->_from = node;
jTarget->addNumRefs();
}
// The 'jmp' is always taken, conditional jump can contain hint, we detect it.
if (code == kX86InstIdJmp)

View File

@@ -2448,6 +2448,12 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler {
ASMJIT_X86_EMIT_OPTIONS(X86Compiler)
//! Force the compiler to not follow the conditional or unconditional jump.
ASMJIT_INLINE X86Compiler& unfollow() {
_instOptions |= kInstOptionUnfollow;
return *this;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------

View File

@@ -1895,35 +1895,6 @@ static void X86Context_prepareSingleVarInst(uint32_t instId, VarAttr* va) {
// [asmjit::X86Context - Helpers]
// ============================================================================
//! \internal
//!
//! Add unreachable-flow data to the unreachable flow list.
static ASMJIT_INLINE Error X86Context_addUnreachableNode(X86Context* self, Node* node) {
PodList<Node*>::Link* link = self->_baseZone.allocT<PodList<Node*>::Link>();
if (link == NULL)
return self->setError(kErrorNoHeapMemory);
link->setValue(node);
self->_unreachableList.append(link);
return kErrorOk;
}
//! \internal
//!
//! Add jump-flow data to the jcc flow list.
static ASMJIT_INLINE Error X86Context_addJccNode(X86Context* self, Node* node) {
PodList<Node*>::Link* link = self->_baseZone.allocT<PodList<Node*>::Link>();
if (link == NULL)
ASMJIT_PROPAGATE_ERROR(self->setError(kErrorNoHeapMemory));
link->setValue(node);
self->_jccList.append(link);
return kErrorOk;
}
//! \internal
//!
//! Get mask of all registers actually used to pass function arguments.
@@ -2181,7 +2152,7 @@ Error X86Context::fetch() {
// Function flags.
func->clearFuncFlags(
kFuncFlagIsNaked |
kFuncFlagIsNaked |
kX86FuncFlagPushPop |
kX86FuncFlagEmms |
kX86FuncFlagSFence |
@@ -2660,8 +2631,6 @@ _NextGroup:
// Handle conditional/unconditional jump.
if (node->isJmpOrJcc()) {
JumpNode* jNode = static_cast<JumpNode*>(node);
Node* jNext = jNode->getNext();
TargetNode* jTarget = jNode->getTarget();
// If this jump is unconditional we put next node to unreachable node
@@ -2671,13 +2640,23 @@ _NextGroup:
// We also advance our node pointer to the target node to simulate
// natural flow of the function.
if (jNode->isJmp()) {
if (!jNext->isFetched())
ASMJIT_PROPAGATE_ERROR(X86Context_addUnreachableNode(this, jNext));
if (!next->isFetched())
ASMJIT_PROPAGATE_ERROR(addUnreachableNode(next));
// Jump not followed.
if (jTarget == NULL) {
ASMJIT_PROPAGATE_ERROR(addReturningNode(jNode));
goto _NextGroup;
}
node_ = jTarget;
goto _Do;
}
else {
// Jump not followed.
if (jTarget == NULL)
break;
if (jTarget->isFetched()) {
uint32_t jTargetFlowId = jTarget->getFlowId();
@@ -2688,13 +2667,12 @@ _NextGroup:
jNode->orFlags(kNodeFlagIsTaken);
}
}
else if (jNext->isFetched()) {
else if (next->isFetched()) {
node_ = jTarget;
goto _Do;
}
else {
ASMJIT_PROPAGATE_ERROR(X86Context_addJccNode(this, jNode));
ASMJIT_PROPAGATE_ERROR(addJccNode(jNode));
node_ = X86Context_getJccFlow(jNode);
goto _Do;
}
@@ -2759,6 +2737,7 @@ _NextGroup:
// ----------------------------------------------------------------------
case kNodeTypeEnd: {
ASMJIT_PROPAGATE_ERROR(addReturningNode(node_));
goto _NextGroup;
}
@@ -2768,8 +2747,9 @@ _NextGroup:
case kNodeTypeRet: {
RetNode* node = static_cast<RetNode*>(node_);
X86FuncDecl* decl = func->getDecl();
ASMJIT_PROPAGATE_ERROR(addReturningNode(node));
X86FuncDecl* decl = func->getDecl();
if (decl->hasRet()) {
const FuncInOut& ret = decl->getRet(0);
uint32_t retClass = x86VarTypeToClass(ret.getVarType());
@@ -2802,7 +2782,10 @@ _NextGroup:
}
VI_END(node_);
}
break;
if (!next->isFetched())
ASMJIT_PROPAGATE_ERROR(addUnreachableNode(next));
goto _NextGroup;
}
// ----------------------------------------------------------------------
@@ -2946,6 +2929,14 @@ _NextGroup:
} while (node_ != stop);
_Done:
// Mark exit label and end node as fetched, otherwise they can be removed by
// `removeUnreachableCode()`, which would lead to crash in some later step.
node_ = func->getEnd();
if (!node_->isFetched()) {
func->getExitNode()->setFlowId(++flowId);
node_->setFlowId(++flowId);
}
ASMJIT_TLOG("[Fetch] === Done ===\n\n");
return kErrorOk;
@@ -3771,8 +3762,12 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe
break;
// Advance on non-conditional jump.
if (node->hasFlag(kNodeFlagIsJmp))
if (node->hasFlag(kNodeFlagIsJmp)) {
node = static_cast<JumpNode*>(node)->getTarget();
// Stop on jump that is not followed.
if (node == NULL)
break;
}
node = node->getNext();
ASMJIT_ASSERT(node != NULL);
@@ -4378,8 +4373,12 @@ ASMJIT_INLINE uint32_t X86CallAlloc::guessAlloc(VarData* vd, uint32_t allocableR
break;
// Advance on non-conditional jump.
if (node->hasFlag(kNodeFlagIsJmp))
if (node->hasFlag(kNodeFlagIsJmp)) {
node = static_cast<JumpNode*>(node)->getTarget();
// Stop on jump that is not followed.
if (node == NULL)
break;
}
node = node->getNext();
ASMJIT_ASSERT(node != NULL);
@@ -5317,6 +5316,14 @@ _NextGroup:
JumpNode* node = static_cast<JumpNode*>(node_);
TargetNode* jTarget = node->getTarget();
// Target not followed.
if (jTarget == NULL) {
if (node->isJmp())
goto _NextGroup;
else
break;
}
if (node->isJmp()) {
if (jTarget->hasState()) {
compiler->_setCursor(node->getPrev());
@@ -5526,7 +5533,10 @@ _NextGroup:
// If node is `jmp` we follow it as well.
if (node_->isJmp()) {
node_ = static_cast<JumpNode*>(node_)->getTarget();
goto _Advance;
if (node_ == NULL)
goto _NextGroup;
else
goto _Advance;
}
// Handle stop nodes.

View File

@@ -1164,12 +1164,6 @@ ASMJIT_ENUM(X86InstId) {
//! X86/X64 instruction emit options, mainly for internal purposes.
ASMJIT_ENUM(X86InstOptions) {
//! Emit instruction with LOCK prefix.
//!
//! If this option is used and instruction doesn't support LOCK prefix an
//! invalid instruction error is generated.
kX86InstOptionLock = 0x00000010,
//! Force REX prefix (X64).
//!
//! This option should be used carefully as there are combinations of
@@ -1183,11 +1177,17 @@ ASMJIT_ENUM(X86InstOptions) {
//! Reserved by `X86Assembler`, do not use!
_kX86InstOptionNoRex = 0x00000080,
//! Emit instruction with LOCK prefix.
//!
//! If this option is used and instruction doesn't support LOCK prefix an
//! invalid instruction error is generated.
kX86InstOptionLock = 0x00000100,
//! Force 3-byte VEX prefix even if the instruction is encodable by 2-byte
//! VEX prefix (AVX).
//!
//! Ignored if the instruction is not AVX or `kX86InstOptionEVEX` is used.
kX86InstOptionVex3 = 0x00000100,
kX86InstOptionVex3 = 0x00001000,
//! Force 4-byte EVEX prefix even if the instruction is encodable by using
//! VEX prefix. Please note that all higher bits from `kX86InstOptionEvex`

View File

@@ -282,7 +282,7 @@ struct X86ZmmReg { Operand::VRegOp data; };
//! 16 Xmm/Ymm/Zmm registers. AVX512 instruction set doubles the number of SIMD
//! registers (Xmm/Ymm/Zmm) to 32, this mode has to be explicitly enabled to
//! take effect as it changes some assumptions.
//!
//!
//! `X86RegCount` is also used extensively by `X86Compiler`'s register allocator
//! and data structures. Fp registers were omitted as they are never mapped to
//! variables, thus, not needed to be managed.