Added proof-of-concept constant pool implementation.

Fixed bug when c.alloc() is called with a specific register.
This commit is contained in:
kobalicekp
2014-04-21 03:28:22 +02:00
parent eaba64c306
commit 0af60d6eb4
21 changed files with 1214 additions and 98 deletions

View File

@@ -8,8 +8,7 @@ CMake_Minimum_Required(VERSION 2.8.12)
# [AsmJit - Configuration] # [AsmJit - Configuration]
# ============================================================================= # =============================================================================
# Whether not to build anything (the source will be included by another library # Whether not to build anything (default FALSE).
# or executable).
# Set(ASMJIT_EMBED FALSE) # Set(ASMJIT_EMBED FALSE)
# Whether to build static library (default FALSE). # Whether to build static library (default FALSE).
@@ -114,6 +113,7 @@ If(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX)
If(WIN32) If(WIN32)
List(APPEND ASMJIT_CFLAGS -D_UNICODE) List(APPEND ASMJIT_CFLAGS -D_UNICODE)
EndIf() EndIf()
If(ASMJIT_STATIC) If(ASMJIT_STATIC)
List(APPEND ASMJIT_CFLAGS -DASMJIT_STATIC) List(APPEND ASMJIT_CFLAGS -DASMJIT_STATIC)
EndIf() EndIf()
@@ -214,6 +214,8 @@ AsmJit_AddSource(ASMJIT_SRC asmjit/base
codegen.h codegen.h
compiler.cpp compiler.cpp
compiler.h compiler.h
constpool.cpp
constpool.h
context.cpp context.cpp
context_p.h context_p.h
cpuinfo.cpp cpuinfo.cpp
@@ -310,6 +312,7 @@ If(ASMJIT_BUILD_SAMPLES)
testdummy testdummy
testmem testmem
testopcode testopcode
testpool
testsizeof testsizeof
testx86 testx86
) )

View File

@@ -65,7 +65,7 @@ int main(int argc, char* argv[]) {
size_t i; size_t i;
size_t count = 200000; size_t count = 200000;
printf("Memory alloc/free test - %d allocations\n\n", (int)count); printf("Memory alloc/free test - %d allocations.\n\n", (int)count);
void** a = (void**)::malloc(sizeof(void*) * count); void** a = (void**)::malloc(sizeof(void*) * count);
void** b = (void**)::malloc(sizeof(void*) * count); void** b = (void**)::malloc(sizeof(void*) * count);
@@ -83,7 +83,7 @@ int main(int argc, char* argv[]) {
::memset(a[i], 0, r); ::memset(a[i], 0, r);
} }
printf("done\n"); printf("Done.\n");
stats(memmgr); stats(memmgr);
printf("\n"); printf("\n");
@@ -91,16 +91,16 @@ int main(int argc, char* argv[]) {
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (memmgr->release(a[i]) != kErrorOk) { if (memmgr->release(a[i]) != kErrorOk) {
printf("Failed to free %p\n", b[i]); printf("Failed to free %p.\n", b[i]);
problems++; problems++;
} }
} }
printf("done\n"); printf("Done.\n");
stats(memmgr); stats(memmgr);
printf("\n"); printf("\n");
printf("Verified alloc/free test - %d allocations\n\n", (int)count); printf("Verified alloc/free test - %d allocations.\n\n", (int)count);
printf("Alloc..."); printf("Alloc...");
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
@@ -112,25 +112,25 @@ int main(int argc, char* argv[]) {
gen(a[i], b[i], r); gen(a[i], b[i], r);
} }
printf("done\n"); printf("Done.\n");
stats(memmgr); stats(memmgr);
printf("\n"); printf("\n");
printf("Shuffling..."); printf("Shuffling...");
shuffle(a, b, count); shuffle(a, b, count);
printf("done\n"); printf("Done.\n");
printf("\n"); printf("\n");
printf("Verify and free..."); printf("Verify and free...");
for (i = 0; i < count / 2; i++) { for (i = 0; i < count / 2; i++) {
verify(a[i], b[i]); verify(a[i], b[i]);
if (memmgr->release(a[i]) != kErrorOk) { if (memmgr->release(a[i]) != kErrorOk) {
printf("Failed to free %p\n", a[i]); printf("Failed to free %p.\n", a[i]);
problems++; problems++;
} }
free(b[i]); free(b[i]);
} }
printf("done\n"); printf("Done.\n");
stats(memmgr); stats(memmgr);
printf("\n"); printf("\n");
@@ -144,7 +144,7 @@ int main(int argc, char* argv[]) {
gen(a[i], b[i], r); gen(a[i], b[i], r);
} }
printf("done\n"); printf("Done.\n");
stats(memmgr); stats(memmgr);
printf("\n"); printf("\n");
@@ -152,12 +152,12 @@ int main(int argc, char* argv[]) {
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
verify(a[i], b[i]); verify(a[i], b[i]);
if (memmgr->release(a[i]) != kErrorOk) { if (memmgr->release(a[i]) != kErrorOk) {
printf("Failed to free %p\n", a[i]); printf("Failed to free %p.\n", a[i]);
problems++; problems++;
} }
free(b[i]); free(b[i]);
} }
printf("done\n"); printf("Done.\n");
stats(memmgr); stats(memmgr);
printf("\n"); printf("\n");

204
src/app/test/testpool.cpp Normal file
View File

@@ -0,0 +1,204 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Dependencies - AsmJit]
#include <asmjit/asmjit.h>
// [Dependencies - C]
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
using namespace asmjit;
// ============================================================================
// [EXPECT]
// ============================================================================
static void expectFailed(const char* msg) {
printf("Failure: %s\n", msg);
abort();
}
#define EXPECT(_Exp_, _Msg_) \
do { \
if (!(_Exp_)) { \
expectFailed(_Msg_); \
} \
} while(0)
// ============================================================================
// [Main]
// ============================================================================
int main(int argc, char* argv[]) {
Zone zone(16192);
ConstPool pool(&zone);
uint32_t i;
uint32_t kCount = 1000000;
printf("Adding %u constants to the pool.\n", kCount);
{
size_t prevOffset;
size_t curOffset;
uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(prevOffset == 0,
"pool.add() - First constant should have zero offset.");
for (i = 1; i < kCount; i++) {
c++;
EXPECT(pool.add(&c, 8, curOffset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(prevOffset + 8 == curOffset,
"pool.add() - Returned incorrect curOffset.");
EXPECT(pool.getSize() == (i + 1) * 8,
"pool.getSize() - Reports incorrect size.");
prevOffset = curOffset;
}
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
}
printf("Done.\n");
printf("Retrieving %u constants from the pool.\n", kCount);
{
uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == i * 8,
"pool.add() - Should have reused constant.");
c++;
}
}
printf("Done.\n");
printf("Checking if the constants were split into 4-byte patterns.\n");
{
uint32_t c = 0x01010101;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 4, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == i * 8,
"pool.add() - Should reuse existing constant.");
c++;
}
}
printf("Done.\n");
printf("Adding 2 byte constant to misalign the current offset.\n");
{
uint16_t c = 0xFFFF;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == kCount * 8,
"pool.add() - Didn't return expected position.");
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
}
printf("Done.\n");
printf("Adding 8 byte constant to check if pool gets aligned again.\n");
{
uint64_t c = ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF);
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == kCount * 8 + 8,
"pool.add() - Didn't return aligned offset.");
}
printf("Done.\n");
printf("Adding 2 byte constant verify the gap is filled.\n");
{
uint16_t c = 0xFFFE;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == kCount * 8 + 2,
"pool.add() - Didn't fill the gap.");
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
}
printf("Done.\n");
printf("Checking reset functionality.\n");
{
pool.reset();
EXPECT(pool.getSize() == 0,
"pool.getSize() - Expected pool size to be zero.");
EXPECT(pool.getAlignment() == 0,
"pool.getSize() - Expected pool alignment to be zero.");
}
printf("Done.\n");
printf("Checking pool alignment when combined constants are added.\n");
{
uint8_t bytes[32] = { 0 };
uint64_t c = 0;
size_t offset;
pool.add(bytes, 1, offset);
EXPECT(pool.getSize() == 1,
"pool.getSize() - Expected pool size to be 1 byte.");
EXPECT(pool.getAlignment() == 1,
"pool.getSize() - Expected pool alignment to be 1 byte.");
EXPECT(offset == 0,
"pool.getSize() - Expected offset returned to be zero.");
pool.add(bytes, 2, offset);
EXPECT(pool.getSize() == 4,
"pool.getSize() - Expected pool size to be 4 bytes.");
EXPECT(pool.getAlignment() == 2,
"pool.getSize() - Expected pool alignment to be 2 bytes.");
EXPECT(offset == 2,
"pool.getSize() - Expected offset returned to be 2.");
pool.add(bytes, 4, offset);
EXPECT(pool.getSize() == 8,
"pool.getSize() - Expected pool size to be 8 bytes.");
EXPECT(pool.getAlignment() == 4,
"pool.getSize() - Expected pool alignment to be 4 bytes.");
EXPECT(offset == 4,
"pool.getSize() - Expected offset returned to be 4.");
pool.add(bytes, 4, offset);
EXPECT(pool.getSize() == 8,
"pool.getSize() - Expected pool size to be 8 bytes.");
EXPECT(pool.getAlignment() == 4,
"pool.getSize() - Expected pool alignment to be 4 bytes.");
EXPECT(offset == 4,
"pool.getSize() - Expected offset returned to be 8.");
pool.add(bytes, 32, offset);
EXPECT(pool.getSize() == 64,
"pool.getSize() - Expected pool size to be 64 bytes.");
EXPECT(pool.getAlignment() == 32,
"pool.getSize() - Expected pool alignment to be 32 bytes.");
EXPECT(offset == 32,
"pool.getSize() - Expected offset returned to be 32.");
}
printf("Done.\n");
return 0;
}

View File

@@ -402,7 +402,7 @@ struct X86Test_AllocManual : public X86Test {
c.addFunc(kFuncConvHost, FuncBuilder0<int>()); c.addFunc(kFuncConvHost, FuncBuilder0<int>());
GpVar v0(c, kVarTypeInt32, "v0"); GpVar v0(c, kVarTypeInt32, "v0");
GpVar v1(c, kVarTypeInt32, "v0"); GpVar v1(c, kVarTypeInt32, "v1");
GpVar cnt(c, kVarTypeInt32, "cnt"); GpVar cnt(c, kVarTypeInt32, "cnt");
c.xor_(v0, v0); c.xor_(v0, v0);
@@ -2344,6 +2344,49 @@ struct X86Test_CallMisc1 : public X86Test {
} }
}; };
// ============================================================================
// [X86Test_ConstPoolBase]
// ============================================================================
struct X86Test_ConstPoolBase : public X86Test {
X86Test_ConstPoolBase() : X86Test("[ConstPool] Base") {}
static void add(PodVector<X86Test*>& tests) {
tests.append(new X86Test_ConstPoolBase());
}
virtual void compile(Compiler& c) {
c.addFunc(kFuncConvHost, FuncBuilder0<int>());
GpVar v0(c, kVarTypeInt32, "v0");
GpVar v1(c, kVarTypeInt32, "v1");
Mem c0(c.newConst4(kConstScopeLocal, 200));
Mem c1(c.newConst4(kConstScopeLocal, 33));
c.mov(v0, c0);
c.mov(v1, c1);
c.add(v0, v1);
c.ret(v0);
c.endFunc();
}
virtual bool run(void* _func, StringBuilder& result, StringBuilder& expect) {
typedef int (*Func)(void);
Func func = asmjit_cast<Func>(_func);
int resultRet = func();
int expectRet = 233;
result.setFormat("ret=%d", resultRet);
expect.setFormat("ret=%d", expectRet);
return resultRet == expectRet;
}
};
// ============================================================================ // ============================================================================
// [X86Test_Dummy] // [X86Test_Dummy]
// ============================================================================ // ============================================================================
@@ -2470,6 +2513,7 @@ X86TestSuite::X86TestSuite() :
ADD_TEST(X86Test_CallMultiple); ADD_TEST(X86Test_CallMultiple);
ADD_TEST(X86Test_CallRecursive); ADD_TEST(X86Test_CallRecursive);
ADD_TEST(X86Test_CallMisc1); ADD_TEST(X86Test_CallMisc1);
ADD_TEST(X86Test_ConstPoolBase);
// Dummy. // Dummy.
// ADD_TEST(X86Test_Dummy); // ADD_TEST(X86Test_Dummy);

View File

@@ -14,6 +14,7 @@
#include "base/assembler.h" #include "base/assembler.h"
#include "base/codegen.h" #include "base/codegen.h"
#include "base/compiler.h" #include "base/compiler.h"
#include "base/constpool.h"
#include "base/cpuinfo.h" #include "base/cpuinfo.h"
#include "base/cputicks.h" #include "base/cputicks.h"
#include "base/defs.h" #include "base/defs.h"

View File

@@ -48,7 +48,7 @@ void BaseAssembler::clear() {
void BaseAssembler::reset() { void BaseAssembler::reset() {
_purge(); _purge();
_zoneAllocator.reset(); _baseZone.reset();
if (_buffer != NULL) { if (_buffer != NULL) {
::free(_buffer); ::free(_buffer);
@@ -63,7 +63,7 @@ void BaseAssembler::reset() {
} }
void BaseAssembler::_purge() { void BaseAssembler::_purge() {
_zoneAllocator.clear(); _baseZone.clear();
_cursor = _buffer; _cursor = _buffer;
_options = 0; _options = 0;
@@ -188,7 +188,7 @@ LabelLink* BaseAssembler::_newLabelLink() {
_unusedLinks = link->prev; _unusedLinks = link->prev;
} }
else { else {
link = _zoneAllocator.allocT<LabelLink>(); link = _baseZone.allocT<LabelLink>();
if (link == NULL) if (link == NULL)
return NULL; return NULL;
} }

View File

@@ -29,7 +29,7 @@ CodeGen::CodeGen(BaseRuntime* runtime) :
_error(kErrorOk), _error(kErrorOk),
_features(IntUtil::mask(kCodeGenOptimizedAlign)), _features(IntUtil::mask(kCodeGenOptimizedAlign)),
_options(0), _options(0),
_zoneAllocator(16384 - sizeof(Zone::Chunk) - kMemAllocOverhead) {} _baseZone(16384 - sizeof(Zone::Chunk) - kMemAllocOverhead) {}
CodeGen::~CodeGen() { CodeGen::~CodeGen() {
if (_errorHandler != NULL) if (_errorHandler != NULL)

View File

@@ -189,8 +189,8 @@ struct CodeGen {
//! @brief Options for the next generated instruction (only 8-bits used). //! @brief Options for the next generated instruction (only 8-bits used).
uint32_t _options; uint32_t _options;
//! @brief Zone memory allocator. //! @brief Base zone.
Zone _zoneAllocator; Zone _baseZone;
}; };
//! @} //! @}

View File

@@ -44,8 +44,11 @@ BaseCompiler::BaseCompiler(BaseRuntime* runtime) :
_lastNode(NULL), _lastNode(NULL),
_cursor(NULL), _cursor(NULL),
_func(NULL), _func(NULL),
_varAllocator(4096 - kMemAllocOverhead), _varZone(4096 - kMemAllocOverhead),
_stringAllocator(4096 - kMemAllocOverhead) {} _stringZone(4096 - kMemAllocOverhead),
_localConstZone(4096 - kMemAllocOverhead),
_localConstPool(&_localConstZone),
_globalConstPool(&_baseZone) {}
BaseCompiler::~BaseCompiler() { BaseCompiler::~BaseCompiler() {
reset(); reset();
@@ -61,20 +64,24 @@ void BaseCompiler::clear() {
void BaseCompiler::reset() { void BaseCompiler::reset() {
_purge(); _purge();
_zoneAllocator.reset();
_varAllocator.reset(); _localConstPool.reset();
_stringAllocator.reset(); _globalConstPool.reset();
_targets.reset(); _targets.reset();
_vars.reset(); _vars.reset();
_baseZone.reset();
_varZone.reset();
_stringZone.reset();
_localConstZone.reset();
} }
void BaseCompiler::_purge() { void BaseCompiler::_purge() {
_zoneAllocator.clear(); _baseZone.clear();
_varAllocator.clear(); _varZone.clear();
_stringAllocator.clear(); _stringZone.clear();
_options = 0; _options = 0;
@@ -343,11 +350,12 @@ EmbedNode* BaseCompiler::newEmbed(const void* data, uint32_t size) {
EmbedNode* node; EmbedNode* node;
if (size > EmbedNode::kInlineBufferSize) { if (size > EmbedNode::kInlineBufferSize) {
void* clonedData = _stringAllocator.alloc(size); void* clonedData = _stringZone.alloc(size);
if (clonedData == NULL) if (clonedData == NULL)
goto _NoMemory; goto _NoMemory;
::memcpy(clonedData, data, size); if (data != NULL)
::memcpy(clonedData, data, size);
data = clonedData; data = clonedData;
} }
@@ -376,7 +384,7 @@ CommentNode* BaseCompiler::newComment(const char* str) {
CommentNode* node; CommentNode* node;
if (str != NULL && str[0]) { if (str != NULL && str[0]) {
str = _stringAllocator.sdup(str); str = _stringZone.sdup(str);
if (str == NULL) if (str == NULL)
goto _NoMemory; goto _NoMemory;
} }
@@ -452,7 +460,7 @@ HintNode* BaseCompiler::addHint(BaseVar& var, uint32_t hint, uint32_t value) {
// ============================================================================ // ============================================================================
VarData* BaseCompiler:: _newVd(uint32_t type, uint32_t size, uint32_t c, const char* name) { VarData* BaseCompiler:: _newVd(uint32_t type, uint32_t size, uint32_t c, const char* name) {
VarData* vd = reinterpret_cast<VarData*>(_varAllocator.alloc(sizeof(VarData))); VarData* vd = reinterpret_cast<VarData*>(_varZone.alloc(sizeof(VarData)));
if (vd == NULL) if (vd == NULL)
goto _NoMemory; goto _NoMemory;
@@ -461,7 +469,7 @@ VarData* BaseCompiler:: _newVd(uint32_t type, uint32_t size, uint32_t c, const c
vd->_contextId = kInvalidValue; vd->_contextId = kInvalidValue;
if (name != NULL && name[0] != '\0') { if (name != NULL && name[0] != '\0') {
vd->_name = _stringAllocator.sdup(name); vd->_name = _stringZone.sdup(name);
} }
vd->_type = static_cast<uint8_t>(type); vd->_type = static_cast<uint8_t>(type);
@@ -567,7 +575,7 @@ void BaseCompiler::rename(BaseVar& var, const char* name) {
vd->_name = noName; vd->_name = noName;
if (name != NULL && name[0] != '\0') { if (name != NULL && name[0] != '\0') {
vd->_name = _stringAllocator.sdup(name); vd->_name = _stringZone.sdup(name);
} }
} }

View File

@@ -11,6 +11,7 @@
// [Dependencies - AsmJit] // [Dependencies - AsmJit]
#include "../base/assembler.h" #include "../base/assembler.h"
#include "../base/codegen.h" #include "../base/codegen.h"
#include "../base/constpool.h"
#include "../base/defs.h" #include "../base/defs.h"
#include "../base/error.h" #include "../base/error.h"
#include "../base/func.h" #include "../base/func.h"
@@ -18,6 +19,7 @@
#include "../base/podlist.h" #include "../base/podlist.h"
#include "../base/podvector.h" #include "../base/podvector.h"
#include "../base/runtime.h" #include "../base/runtime.h"
#include "../base/zone.h"
// [Api-Begin] // [Api-Begin]
#include "../apibegin.h" #include "../apibegin.h"
@@ -40,6 +42,23 @@ struct EndNode;
struct InstNode; struct InstNode;
struct JumpNode; struct JumpNode;
// ============================================================================
// [asmjit::kConstScope]
// ============================================================================
//! @brief Type of constant in constant pool
ASMJIT_ENUM(kConstScope) {
//! @brief Local constant.
//!
//! Local constant is always embedded right after the current function.
kConstScopeLocal = 0,
//! @brief Global constant.
//!
//! Global constant is embedded at the end of the currently compiled code.
kConstScopeGlobal = 1
};
// ============================================================================ // ============================================================================
// [asmjit::kVarAttrFlags] // [asmjit::kVarAttrFlags]
// ============================================================================ // ============================================================================
@@ -486,7 +505,7 @@ struct VarData {
uint8_t _modified : 1; uint8_t _modified : 1;
//! @internal //! @internal
uint8_t _reserved0 : 3; uint8_t _reserved0 : 3;
//! @brief Varialbe natural alignment. //! @brief Variable natural alignment.
uint8_t _alignment; uint8_t _alignment;
//! @brief Variable size. //! @brief Variable size.
@@ -896,10 +915,13 @@ struct EmbedNode : public BaseNode {
//! @brief Create a new @ref EmbedNode instance. //! @brief Create a new @ref EmbedNode instance.
ASMJIT_INLINE EmbedNode(BaseCompiler* compiler, void* data, uint32_t size) : BaseNode(compiler, kNodeTypeEmbed) { ASMJIT_INLINE EmbedNode(BaseCompiler* compiler, void* data, uint32_t size) : BaseNode(compiler, kNodeTypeEmbed) {
_size = size; _size = size;
if (size <= kInlineBufferSize) if (size <= kInlineBufferSize) {
::memcpy(_data.buf, data, size); if (data != NULL)
else ::memcpy(_data.buf, data, size);
}
else {
_data.ptr = static_cast<uint8_t*>(data); _data.ptr = static_cast<uint8_t*>(data);
}
} }
//! @brief Destroy the @ref EmbedNode instance. //! @brief Destroy the @ref EmbedNode instance.
@@ -1100,48 +1122,74 @@ struct InstNode : public BaseNode {
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! @brief Get instruction code, see @c kInstCode. //! @brief Get instruction code, see @c kInstCode.
ASMJIT_INLINE uint32_t getCode() const ASMJIT_INLINE uint32_t getCode() const {
{ return _code; } return _code;
}
//! @brief Set instruction code to @a code. //! @brief Set instruction code to @a code.
//! //!
//! Please do not modify instruction code if you are not know what you are //! Please do not modify instruction code if you are not know what you are
//! doing. Incorrect instruction code or operands can raise assertion() at //! doing. Incorrect instruction code or operands can raise assertion() at
//! runtime. //! runtime.
ASMJIT_INLINE void setCode(uint32_t code) ASMJIT_INLINE void setCode(uint32_t code) {
{ _code = static_cast<uint16_t>(code); } _code = static_cast<uint16_t>(code);
}
//! @brief Whether the instruction is an unconditional jump or whether the //! @brief Whether the instruction is an unconditional jump or whether the
//! instruction is a conditional jump which is likely to be taken. //! instruction is a conditional jump which is likely to be taken.
ASMJIT_INLINE bool isTaken() const { return hasFlag(kNodeFlagIsTaken); } ASMJIT_INLINE bool isTaken() const {
return hasFlag(kNodeFlagIsTaken);
}
//! @brief Get emit options. //! @brief Get emit options.
ASMJIT_INLINE uint32_t getOptions() const { return _options; } ASMJIT_INLINE uint32_t getOptions() const {
return _options;
}
//! @brief Set emit options. //! @brief Set emit options.
ASMJIT_INLINE void setOptions(uint32_t options) { _options = static_cast<uint8_t>(options); } ASMJIT_INLINE void setOptions(uint32_t options) {
_options = static_cast<uint8_t>(options);
}
//! @brief Add emit options. //! @brief Add emit options.
ASMJIT_INLINE void addOptions(uint32_t options) { _options |= static_cast<uint8_t>(options); } ASMJIT_INLINE void addOptions(uint32_t options) {
_options |= static_cast<uint8_t>(options);
}
//! @brief Mask emit options. //! @brief Mask emit options.
ASMJIT_INLINE void andOptions(uint32_t options) { _options &= static_cast<uint8_t>(options); } ASMJIT_INLINE void andOptions(uint32_t options) {
_options &= static_cast<uint8_t>(options);
}
//! @brief Clear emit options. //! @brief Clear emit options.
ASMJIT_INLINE void delOptions(uint32_t options) { _options &= static_cast<uint8_t>(~options); } ASMJIT_INLINE void delOptions(uint32_t options) {
_options &= static_cast<uint8_t>(~options);
}
//! @brief Get operands list. //! @brief Get operands list.
ASMJIT_INLINE Operand* getOpList() { return _opList; } ASMJIT_INLINE Operand* getOpList() {
return _opList;
}
//! @overload //! @overload
ASMJIT_INLINE const Operand* getOpList() const { return _opList; } ASMJIT_INLINE const Operand* getOpList() const {
return _opList;
}
//! @brief Get operands count. //! @brief Get operands count.
ASMJIT_INLINE uint32_t getOpCount() const { return _opCount; } ASMJIT_INLINE uint32_t getOpCount() const {
return _opCount;
}
//! @brief Get whether the instruction contains a memory operand. //! @brief Get whether the instruction contains a memory operand.
ASMJIT_INLINE bool hasMemOp() const { return _memOpIndex != 0xFF; } ASMJIT_INLINE bool hasMemOp() const {
return _memOpIndex != 0xFF;
}
//! @brief Set memory operand index (in opList), 0xFF means that instruction //! @brief Set memory operand index (in opList), 0xFF means that instruction
//! doesn't have a memory operand. //! doesn't have a memory operand.
ASMJIT_INLINE void setMemOpIndex(uint32_t index) { _memOpIndex = static_cast<uint8_t>(index); } ASMJIT_INLINE void setMemOpIndex(uint32_t index) {
_memOpIndex = static_cast<uint8_t>(index);
}
//! @brief Reset memory operand index, setting it to 0xFF. //! @brief Reset memory operand index, setting it to 0xFF.
ASMJIT_INLINE void resetMemOpIndex() { _memOpIndex = 0xFF; } ASMJIT_INLINE void resetMemOpIndex() {
_memOpIndex = 0xFF;
}
//! @brief Get memory operand. //! @brief Get memory operand.
//! //!
@@ -1678,25 +1726,25 @@ struct BaseCompiler : public CodeGen {
template<typename T> template<typename T>
ASMJIT_INLINE T* newNode() { ASMJIT_INLINE T* newNode() {
void* p = _zoneAllocator.alloc(sizeof(T)); void* p = _baseZone.alloc(sizeof(T));
return new(p) T(this); return new(p) T(this);
} }
template<typename T, typename P0> template<typename T, typename P0>
ASMJIT_INLINE T* newNode(P0 p0) { ASMJIT_INLINE T* newNode(P0 p0) {
void* p = _zoneAllocator.alloc(sizeof(T)); void* p = _baseZone.alloc(sizeof(T));
return new(p) T(this, p0); return new(p) T(this, p0);
} }
template<typename T, typename P0, typename P1> template<typename T, typename P0, typename P1>
ASMJIT_INLINE T* newNode(P0 p0, P1 p1) { ASMJIT_INLINE T* newNode(P0 p0, P1 p1) {
void* p = _zoneAllocator.alloc(sizeof(T)); void* p = _baseZone.alloc(sizeof(T));
return new(p) T(this, p0, p1); return new(p) T(this, p0, p1);
} }
template<typename T, typename P0, typename P1, typename P2> template<typename T, typename P0, typename P1, typename P2>
ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) { ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) {
void* p = _zoneAllocator.alloc(sizeof(T)); void* p = _baseZone.alloc(sizeof(T));
return new(p) T(this, p0, p1, p2); return new(p) T(this, p0, p1, p2);
} }
@@ -1906,9 +1954,20 @@ struct BaseCompiler : public CodeGen {
// [Stack] // [Stack]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! @brief Create a new @ref BaseMem. //! @internal
//!
//! @brief Create a new memory chunk allocated on the current function's stack.
virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) = 0; virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) = 0;
// --------------------------------------------------------------------------
// [Const]
// --------------------------------------------------------------------------
//! @internal
//!
//! @brief Put data to a constant-pool and get a memory reference to it.
virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) = 0;
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Serialize] // [Serialize]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
@@ -1941,15 +2000,27 @@ struct BaseCompiler : public CodeGen {
//! @brief Current function. //! @brief Current function.
FuncNode* _func; FuncNode* _func;
//! @brief Variable allocator. //! @brief Variable zone.
Zone _varAllocator; Zone _varZone;
//! @brief String/data allocator. //! @brief String/data zone.
Zone _stringAllocator; Zone _stringZone;
//! @brief Local constant pool zone.
Zone _localConstZone;
//! @brief Targets. //! @brief Targets.
PodVector<TargetNode*> _targets; PodVector<TargetNode*> _targets;
//! @brief Variables. //! @brief Variables.
PodVector<VarData*> _vars; PodVector<VarData*> _vars;
//! @brief Local constant pool, flushed at the end of each function.
ConstPool _localConstPool;
//! @brief Global constant pool, flushed at the end of the compilation.
ConstPool _globalConstPool;
//! @brief Label to start of the local constant pool.
Label _localConstPoolLabel;
//! @brief Label to start of the global constant pool.
Label _globalConstPoolLabel;
}; };
// ============================================================================ // ============================================================================

View File

@@ -0,0 +1,369 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies - AsmJit]
#include "../base/constpool.h"
#include "../base/intutil.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// Binary tree code is based on Julienne Walker's "Andersson Binary Trees"
// article and implementation. However, only three operations are implemented -
// get, insert and traverse.
// ============================================================================
// [asmjit::ConstPoolTree - Ops]
// ============================================================================
const ConstPoolNode ConstPoolTree::_sentinel = { {
const_cast<ConstPoolNode*>(&ConstPoolTree::_sentinel),
const_cast<ConstPoolNode*>(&ConstPoolTree::_sentinel)
}, 0, 0, 0 };
//! @internal
//!
//! @brief Remove left horizontal links.
static ASMJIT_INLINE ConstPoolNode* ConstPoolTree_skewNode(ConstPoolNode* node) {
if (node->_link[0]->_level == node->_level && node->_level != 0 ) {
ConstPoolNode *save = node->_link[0];
node->_link[0] = save->_link[1];
save->_link[1] = node;
node = save;
}
return node;
}
//! @internal
//!
//! @brief Remove consecutive horizontal links.
static ASMJIT_INLINE ConstPoolNode* ConstPoolTree_splitNode(ConstPoolNode* node) {
if (node->_link[1]->_link[1]->_level == node->_level && node->_level != 0) {
ConstPoolNode *save = node->_link[1];
node->_link[1] = save->_link[0];
save->_link[0] = node;
node = save;
node->_level++;
}
return node;
}
ConstPoolNode* ConstPoolTree::get(const void* data) {
ConstPoolNode* sentinel = const_cast<ConstPoolNode*>(&_sentinel);
ConstPoolNode* node = _root;
size_t dataSize = _dataSize;
while (node != sentinel) {
int c = ::memcmp(node->getData(), data, dataSize);
if (c == 0)
return node;
node = node->_link[c < 0];
}
return NULL;
}
void ConstPoolTree::put(ConstPoolNode* newNode) {
ConstPoolNode* sentinel = const_cast<ConstPoolNode*>(&_sentinel);
size_t dataSize = _dataSize;
_length++;
if (_root == sentinel) {
_root = newNode;
return;
}
ConstPoolNode* node = _root;
ConstPoolNode* stack[kHeightLimit];
unsigned int top = 0;
unsigned int dir;
// Find a spot and save the stack.
for (;;) {
stack[top++] = node;
dir = ::memcmp(node->getData(), newNode->getData(), dataSize) < 0;
if (node->_link[dir] == sentinel)
break;
node = node->_link[dir];
}
// Link and rebalance.
node->_link[dir] = newNode;
while (top > 0) {
// Which child?
node = stack[--top];
if (top != 0)
dir = stack[top - 1]->_link[1] == node;
node = ConstPoolTree_skewNode(node);
node = ConstPoolTree_splitNode(node);
// Fix the parent.
if (top != 0)
stack[top - 1]->_link[dir] = node;
else
_root = node;
}
}
// ============================================================================
// [asmjit::ConstPool - Construction / Destruction]
// ============================================================================
ConstPool::ConstPool(Zone* zone) {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].setDataSize(dataSize);
_gaps[i] = NULL;
dataSize <<= 1;
}
_gapPool = NULL;
_size = 0;
_alignment = 0;
}
ConstPool::~ConstPool() {}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
void ConstPool::reset() {
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_gaps[i] = NULL;
}
_gapPool = NULL;
_size = 0;
_alignment = 0;
}
// ============================================================================
// [asmjit::ConstPool - Ops]
// ============================================================================
ASMJIT_INLINE size_t ConstPool_getGapIndex(size_t size) {
if (size <= 1)
return ConstPool::kIndex1;
else if (size <= 3)
return ConstPool::kIndex2;
else if (size <= 7)
return ConstPool::kIndex4;
else if (size <= 15)
return ConstPool::kIndex8;
else
return ConstPool::kIndex16;
}
ASMJIT_INLINE ConstPoolGap* ConstPool_allocGap(ConstPool* self) {
ConstPoolGap* gap = self->_gapPool;
if (gap == NULL)
return self->_zone->allocT<ConstPoolGap>();
self->_gapPool = gap->_next;
return gap;
}
ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPoolGap* gap) {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) {
ASMJIT_ASSERT(length > 0);
while (length > 0) {
size_t gapIndex;
size_t gapLength;
if (length >= 16 && IntUtil::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
gapLength = 16;
}
else if (length >= 8 && IntUtil::isAligned<size_t>(offset, 8)) {
gapIndex = ConstPool::kIndex8;
gapLength = 8;
}
else if (length >= 4 && IntUtil::isAligned<size_t>(offset, 4)) {
gapIndex = ConstPool::kIndex4;
gapLength = 4;
}
else if (length >= 2 && IntUtil::isAligned<size_t>(offset, 2)) {
gapIndex = ConstPool::kIndex2;
gapLength = 2;
}
else {
gapIndex = ConstPool::kIndex1;
gapLength = 1;
}
// We don't have to check for errors here, if this failed nothing really
// happened (just the gap won't be visible) and it will fail again at
// place where checking will cause kErrorNoHeapMemory.
ConstPoolGap* gap = ConstPool_allocGap(self);
if (gap == NULL)
return;
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
gap->_offset = offset;
gap->_length = gapLength;
offset += gapLength;
length -= gapLength;
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) {
size_t treeIndex;
if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
else if (size == 8)
treeIndex = kIndex8;
else if (size == 4)
treeIndex = kIndex4;
else if (size == 2)
treeIndex = kIndex2;
else if (size == 1)
treeIndex = kIndex1;
else
return kErrorInvalidArgument;
ConstPoolNode* node = _tree[treeIndex].get(data);
if (node != NULL) {
dstOffset = node->_offset;
return kErrorOk;
}
// Before incrementing the current offset try if there is a gap that can
// be used for the requested data.
size_t offset = ~static_cast<size_t>(0);
size_t gapIndex = treeIndex;
while (gapIndex != kIndexCount - 1) {
ConstPoolGap* gap = _gaps[treeIndex];
// Check if there is a gap.
if (gap != NULL) {
size_t gapOffset = gap->_offset;
size_t gapLength = gap->_length;
// Destroy the gap for now.
_gaps[treeIndex] = gap->_next;
ConstPool_freeGap(this, gap);
offset = gapOffset;
ASMJIT_ASSERT(IntUtil::isAligned<size_t>(offset, size));
gapLength -= size;
if (gapLength > 0)
ConstPool_addGap(this, gapOffset, gapLength);
}
gapIndex++;
}
if (offset == ~static_cast<size_t>(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly
// to the 'size'.
size_t deltaTo = IntUtil::deltaTo<size_t>(_size, size);
if (deltaTo != 0) {
ConstPool_addGap(this, _size, deltaTo);
_size += deltaTo;
}
offset = _size;
_size += size;
}
// Add the initial node to the right index.
node = ConstPoolTree::_newNode(_zone, data, size, offset, false);
if (node == NULL)
return kErrorNoHeapMemory;
_tree[treeIndex].put(node);
_alignment = IntUtil::iMax<size_t>(_alignment, size);
dstOffset = offset;
// Now create a bunch of shared constants that are based on the data pattern.
// We stop at size 4, it probably doesn't make sense to split constants down
// to 1 byte.
size_t pCount = 1;
while (size > 4) {
size >>= 1;
pCount <<= 1;
ASMJIT_ASSERT(treeIndex != 0);
treeIndex--;
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += size) {
node = _tree[treeIndex].get(pData);
if (node != NULL)
continue;
node = ConstPoolTree::_newNode(_zone, pData, size, offset + (i * size), true);
_tree[treeIndex].put(node);
}
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
struct ConstPoolFill {
ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) :
_dst(dst),
_dataSize(dataSize) {}
ASMJIT_INLINE void visit(const ConstPoolNode* node) {
if (!node->_shared)
::memcpy(_dst + node->_offset, node->getData(), _dataSize);
}
uint8_t* _dst;
size_t _dataSize;
};
void ConstPool::fill(void* dst) {
// Clears possible gaps, asmjit should never emit garbage to the output.
::memset(dst, 0, _size);
ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].iterate(filler);
filler._dataSize <<= 1;
}
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

295
src/asmjit/base/constpool.h Normal file
View File

@@ -0,0 +1,295 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CONSTPOOL_H
#define _ASMJIT_BASE_CONSTPOOL_H
// [Dependencies - AsmJit]
#include "../base/error.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::ConstPoolNode]
// ============================================================================
//! @internal
//!
//! @brief Zone-allocated constant-pool node.
struct ConstPoolNode {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE void* getData() const {
return static_cast<void*>(const_cast<ConstPoolNode*>(this) + 1);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! @brief Left/Right nodes.
ConstPoolNode* _link[2];
//! @brief Horizontal level for balance.
uint32_t _level : 31;
//! @brief Whether this constant is shared with another.
uint32_t _shared : 1;
//! @brief Data offset from the beginning of the pool.
uint32_t _offset;
};
// ============================================================================
// [asmjit::ConstPoolTree]
// ============================================================================
//! @internal
//!
//! @brief Zone-allocated constant-pool tree.
struct ConstPoolTree {
enum {
//! @brief Maximum tree height == log2(1 << 64).
kHeightLimit = 64
};
ASMJIT_API static const ConstPoolNode _sentinel;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE ConstPoolTree(size_t dataSize = 0) :
_root(const_cast<ConstPoolNode*>(&_sentinel)),
_length(0),
_dataSize(dataSize) {}
ASMJIT_INLINE ~ConstPoolTree() {}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() {
_root = const_cast<ConstPoolNode*>(&_sentinel);
_length = 0;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isEmpty() const {
return _length == 0;
}
ASMJIT_INLINE size_t getLength() const {
return _length;
}
ASMJIT_INLINE void setDataSize(size_t dataSize) {
ASMJIT_ASSERT(isEmpty());
_dataSize = dataSize;
}
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
ASMJIT_API ConstPoolNode* get(const void* data);
ASMJIT_API void put(ConstPoolNode* node);
// --------------------------------------------------------------------------
// [Iterate]
// --------------------------------------------------------------------------
template<typename Visitor>
ASMJIT_INLINE void iterate(Visitor& visitor) const {
ConstPoolNode* node = const_cast<ConstPoolNode*>(_root);
ConstPoolNode* link;
ConstPoolNode* stack[kHeightLimit];
ConstPoolNode* sentinel = const_cast<ConstPoolNode*>(&_sentinel);
if (node == sentinel)
return;
size_t top = 0;
for (;;) {
link = node->_link[0];
if (link != sentinel) {
ASMJIT_ASSERT(top != kHeightLimit);
stack[top++] = node;
continue;
}
visitor.visit(node);
link = node->_link[1];
if (link != sentinel) {
node = link;
continue;
}
if (top == 0)
break;
node = stack[--top];
}
}
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
static ASMJIT_INLINE ConstPoolNode* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) {
ConstPoolNode* node = zone->allocT<ConstPoolNode>(sizeof(ConstPoolNode) + size);
if (node == NULL)
return NULL;
node->_link[0] = const_cast<ConstPoolNode*>(&_sentinel);
node->_link[1] = const_cast<ConstPoolNode*>(&_sentinel);
node->_level = 1;
node->_shared = shared;
node->_offset = static_cast<uint32_t>(offset);
::memcpy(node->getData(), data, size);
return node;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! @brief Root of the tree
ConstPoolNode* _root;
//! @brief Length of the tree (count of nodes).
size_t _length;
//! @brief Size of the data.
size_t _dataSize;
};
// ============================================================================
// [asmjit::ConstPoolGap]
// ============================================================================
//! @internal
//!
//! @brief Zone-allocated constant-pool gap.
struct ConstPoolGap {
//! @brief Link to the next gap
ConstPoolGap* _next;
//! @brief Offset of the gap.
size_t _offset;
//! @brief Remaining bytes of the gap (basically a gap size).
size_t _length;
};
// ============================================================================
// [asmjit::ConstPool]
// ============================================================================
struct ConstPool {
ASMJIT_NO_COPY(ConstPool)
enum {
kIndex1 = 0,
kIndex2 = 1,
kIndex4 = 2,
kIndex8 = 3,
kIndex16 = 4,
kIndex32 = 5,
kIndexCount = 6
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API ConstPool(Zone* zone);
ASMJIT_API ~ConstPool();
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_API void reset();
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! @brief Get whether the constant-pool is empty.
ASMJIT_INLINE bool isEmpty() const {
return _size == 0;
}
//! @brief Get the size of the constant-pool in bytes.
ASMJIT_INLINE size_t getSize() const {
return _size;
}
ASMJIT_INLINE size_t getAlignment() const {
return _alignment;
}
//! @brief Add a constant to the constant pool.
//!
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
//! The constant is added to the pool only if it doesn't not exist, otherwise
//! cached value is returned.
//!
//! AsmJit is able to subdivide added constants, so for example if you add
//! 8-byte constant 0x1122334455667788 it will create the following slots:
//!
//! 8-byte: 0x1122334455667788
//! 4-byte: 0x11223344, 0x55667788
//!
//! The reason is that when combining MMX/SSE/AVX code some patterns are used
//! frequently. However, AsmJit is not able to reallocate a constant that has
//! been already added. For example if you try to add 4-byte constant and then
//! 8-byte constant having the same 4-byte pattern as the previous one, two
//! independent slots will be generated by the pool.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset);
// --------------------------------------------------------------------------
// [Fill]
// --------------------------------------------------------------------------
//! @brief Fill the destination with the constants from the pool.
ASMJIT_API void fill(void* dst);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! @brief Zone allocator.
Zone* _zone;
//! @brief Tree per size.
ConstPoolTree _tree[kIndexCount];
//! @brief Gaps per size.
ConstPoolGap* _gaps[kIndexCount];
//! @brief Gaps pool
ConstPoolGap* _gapPool;
//! @brief Size of the pool (in bytes).
size_t _size;
//! @brief Alignemnt.
size_t _alignment;
};
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CONSTPOOL_H

View File

@@ -22,7 +22,7 @@ namespace asmjit {
BaseContext::BaseContext(BaseCompiler* compiler) : BaseContext::BaseContext(BaseCompiler* compiler) :
_compiler(compiler), _compiler(compiler),
_zoneAllocator(8192 - sizeof(Zone::Chunk) - kMemAllocOverhead) { _baseZone(8192 - sizeof(Zone::Chunk) - kMemAllocOverhead) {
BaseContext::reset(); BaseContext::reset();
} }
@@ -34,7 +34,7 @@ BaseContext::~BaseContext() {}
// ============================================================================ // ============================================================================
void BaseContext::reset() { void BaseContext::reset() {
_zoneAllocator.clear(); _baseZone.clear();
_func = NULL; _func = NULL;
_start = NULL; _start = NULL;
@@ -101,7 +101,7 @@ MemCell* BaseContext::_newVarCell(VarData* vd) {
return NULL; return NULL;
} }
else { else {
cell = static_cast<MemCell*>(_zoneAllocator.alloc(sizeof(MemCell))); cell = static_cast<MemCell*>(_baseZone.alloc(sizeof(MemCell)));
if (cell == NULL) if (cell == NULL)
goto _NoMemory; goto _NoMemory;
@@ -136,7 +136,7 @@ _NoMemory:
} }
MemCell* BaseContext::_newStackCell(uint32_t size, uint32_t alignment) { MemCell* BaseContext::_newStackCell(uint32_t size, uint32_t alignment) {
MemCell* cell = static_cast<MemCell*>(_zoneAllocator.alloc(sizeof(MemCell))); MemCell* cell = static_cast<MemCell*>(_baseZone.alloc(sizeof(MemCell)));
if (cell == NULL) if (cell == NULL)
goto _NoMemory; goto _NoMemory;

View File

@@ -128,12 +128,12 @@ struct BaseContext {
ASMJIT_INLINE VarBits* newBits(uint32_t len) { ASMJIT_INLINE VarBits* newBits(uint32_t len) {
return static_cast<VarBits*>( return static_cast<VarBits*>(
_zoneAllocator.calloc(static_cast<size_t>(len) * VarBits::kEntitySize)); _baseZone.calloc(static_cast<size_t>(len) * VarBits::kEntitySize));
} }
ASMJIT_INLINE VarBits* copyBits(const VarBits* src, uint32_t len) { ASMJIT_INLINE VarBits* copyBits(const VarBits* src, uint32_t len) {
return static_cast<VarBits*>( return static_cast<VarBits*>(
_zoneAllocator.dup(src, static_cast<size_t>(len) * VarBits::kEntitySize)); _baseZone.dup(src, static_cast<size_t>(len) * VarBits::kEntitySize));
} }
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
@@ -211,7 +211,7 @@ struct BaseContext {
FuncNode* _func; FuncNode* _func;
//! @brief Zone allocator. //! @brief Zone allocator.
Zone _zoneAllocator; Zone _baseZone;
//! @brief Start of the current active scope. //! @brief Start of the current active scope.
BaseNode* _start; BaseNode* _start;

View File

@@ -1117,8 +1117,7 @@ struct Label : public Operand {
//! @brief Create new, unassociated label. //! @brief Create new, unassociated label.
ASMJIT_INLINE Label() : Operand(NoInit) { ASMJIT_INLINE Label() : Operand(NoInit) {
_init_packed_op_sz_b0_b1_id(kOperandTypeLabel, 0, 0, 0, kInvalidValue); reset();
_init_packed_d2_d3(0, 0);
} }
explicit ASMJIT_INLINE Label(uint32_t id) : Operand(NoInit) { explicit ASMJIT_INLINE Label(uint32_t id) : Operand(NoInit) {
@@ -1136,6 +1135,15 @@ struct Label : public Operand {
explicit ASMJIT_INLINE Label(const _NoInit&) : Operand(NoInit) {} explicit ASMJIT_INLINE Label(const _NoInit&) : Operand(NoInit) {}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() {
_init_packed_op_sz_b0_b1_id(kOperandTypeLabel, 0, 0, 0, kInvalidValue);
_init_packed_d2_d3(0, 0);
}
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Operator Overload] // [Operator Overload]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------

View File

@@ -8,6 +8,9 @@
#ifndef _ASMJIT_BASE_ZONE_H #ifndef _ASMJIT_BASE_ZONE_H
#define _ASMJIT_BASE_ZONE_H #define _ASMJIT_BASE_ZONE_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin] // [Api-Begin]
#include "../apibegin.h" #include "../apibegin.h"

View File

@@ -63,12 +63,18 @@
defined(__x86_64__) defined(__x86_64__)
# define ASMJIT_HOST_X64 # define ASMJIT_HOST_X64
# define ASMJIT_HOST_LE # define ASMJIT_HOST_LE
# define ASMJIT_HOST_UNALIGNED_16
# define ASMJIT_HOST_UNALIGNED_32
# define ASMJIT_HOST_UNALIGNED_64
#elif \ #elif \
defined(_M_IX86 ) || \ defined(_M_IX86 ) || \
defined(__INTEL__) || \ defined(__INTEL__) || \
defined(__i386__ ) defined(__i386__ )
# define ASMJIT_HOST_X86 # define ASMJIT_HOST_X86
# define ASMJIT_HOST_LE # define ASMJIT_HOST_LE
# define ASMJIT_HOST_UNALIGNED_16
# define ASMJIT_HOST_UNALIGNED_32
# define ASMJIT_HOST_UNALIGNED_64
#elif \ #elif \
defined(_ARM ) || \ defined(_ARM ) || \
defined(_M_ARM_FP ) || \ defined(_M_ARM_FP ) || \

View File

@@ -68,6 +68,30 @@ bool X86X64CallNode::_setRet(uint32_t i, const Operand& op) {
return true; return true;
} }
// ============================================================================
// [asmjit::x86x64::X86X64Compiler - Helpers (Private)]
// ============================================================================
static Error X86X64Compiler_emitConstPool(X86X64Compiler* self,
Label& label, ConstPool& pool) {
if (label.getId() == kInvalidValue)
return kErrorOk;
self->align(static_cast<uint32_t>(pool.getAlignment()));
self->bind(label);
EmbedNode* embedNode = self->embed(NULL, static_cast<uint32_t>(pool.getSize()));
if (embedNode == NULL)
return kErrorNoHeapMemory;
pool.fill(embedNode->getData());
pool.reset();
label.reset();
return kErrorOk;
}
// ============================================================================ // ============================================================================
// [asmjit::x86x64::X86X64Compiler - Construction / Destruction] // [asmjit::x86x64::X86X64Compiler - Construction / Destruction]
// ============================================================================ // ============================================================================
@@ -114,7 +138,7 @@ static InstNode* X86X64Compiler_newInst(X86X64Compiler* self, void* p, uint32_t
InstNode* X86X64Compiler::newInst(uint32_t code) { InstNode* X86X64Compiler::newInst(uint32_t code) {
size_t size = X86X64Compiler_getInstSize(code); size_t size = X86X64Compiler_getInstSize(code);
InstNode* inst = static_cast<InstNode*>(_zoneAllocator.alloc(size)); InstNode* inst = static_cast<InstNode*>(_baseZone.alloc(size));
if (inst == NULL) if (inst == NULL)
goto _NoMemory; goto _NoMemory;
@@ -128,7 +152,7 @@ _NoMemory:
InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0) { InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0) {
size_t size = X86X64Compiler_getInstSize(code); size_t size = X86X64Compiler_getInstSize(code);
InstNode* inst = static_cast<InstNode*>(_zoneAllocator.alloc(size + 1 * sizeof(Operand))); InstNode* inst = static_cast<InstNode*>(_baseZone.alloc(size + 1 * sizeof(Operand)));
if (inst == NULL) if (inst == NULL)
goto _NoMemory; goto _NoMemory;
@@ -147,7 +171,7 @@ _NoMemory:
InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) { InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) {
size_t size = X86X64Compiler_getInstSize(code); size_t size = X86X64Compiler_getInstSize(code);
InstNode* inst = static_cast<InstNode*>(_zoneAllocator.alloc(size + 2 * sizeof(Operand))); InstNode* inst = static_cast<InstNode*>(_baseZone.alloc(size + 2 * sizeof(Operand)));
if (inst == NULL) if (inst == NULL)
goto _NoMemory; goto _NoMemory;
@@ -168,7 +192,7 @@ _NoMemory:
InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) {
size_t size = X86X64Compiler_getInstSize(code); size_t size = X86X64Compiler_getInstSize(code);
InstNode* inst = static_cast<InstNode*>(_zoneAllocator.alloc(size + 3 * sizeof(Operand))); InstNode* inst = static_cast<InstNode*>(_baseZone.alloc(size + 3 * sizeof(Operand)));
if (inst == NULL) if (inst == NULL)
goto _NoMemory; goto _NoMemory;
@@ -191,7 +215,7 @@ _NoMemory:
InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) {
size_t size = X86X64Compiler_getInstSize(code); size_t size = X86X64Compiler_getInstSize(code);
InstNode* inst = static_cast<InstNode*>(_zoneAllocator.alloc(size + 4 * sizeof(Operand))); InstNode* inst = static_cast<InstNode*>(_baseZone.alloc(size + 4 * sizeof(Operand)));
if (inst == NULL) if (inst == NULL)
goto _NoMemory; goto _NoMemory;
@@ -216,7 +240,7 @@ _NoMemory:
InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) { InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) {
size_t size = X86X64Compiler_getInstSize(code); size_t size = X86X64Compiler_getInstSize(code);
InstNode* inst = static_cast<InstNode*>(_zoneAllocator.alloc(size + 5 * sizeof(Operand))); InstNode* inst = static_cast<InstNode*>(_baseZone.alloc(size + 5 * sizeof(Operand)));
if (inst == NULL) if (inst == NULL)
goto _NoMemory; goto _NoMemory;
@@ -348,7 +372,7 @@ X86X64FuncNode* X86X64Compiler::newFunc(uint32_t conv, const FuncPrototype& p) {
// Allocate space for function arguments. // Allocate space for function arguments.
func->_argList = NULL; func->_argList = NULL;
if (func->getArgCount() != 0) { if (func->getArgCount() != 0) {
func->_argList = _zoneAllocator.allocT<VarData*>(func->getArgCount() * sizeof(VarData*)); func->_argList = _baseZone.allocT<VarData*>(func->getArgCount() * sizeof(VarData*));
if (func->_argList == NULL) if (func->_argList == NULL)
goto _NoMemory; goto _NoMemory;
::memset(func->_argList, 0, func->getArgCount() * sizeof(VarData*)); ::memset(func->_argList, 0, func->getArgCount() * sizeof(VarData*));
@@ -382,9 +406,16 @@ EndNode* X86X64Compiler::endFunc() {
X86X64FuncNode* func = getFunc(); X86X64FuncNode* func = getFunc();
ASMJIT_ASSERT(func != NULL); ASMJIT_ASSERT(func != NULL);
// App function exit / epilog marker.
addNode(func->getExitNode()); addNode(func->getExitNode());
// Add local constant pool at the end of the function (if exist).
X86X64Compiler_emitConstPool(this, _localConstPoolLabel, _localConstPool);
// Add function end marker.
addNode(func->getEnd()); addNode(func->getEnd());
// Finalize...
func->addFuncFlags(kFuncFlagIsFinished); func->addFuncFlags(kFuncFlagIsFinished);
_func = NULL; _func = NULL;
@@ -434,7 +465,7 @@ X86X64CallNode* X86X64Compiler::newCall(const Operand& o0, uint32_t conv, const
if ((nArgs = p.getArgCount()) == 0) if ((nArgs = p.getArgCount()) == 0)
return node; return node;
node->_args = static_cast<Operand*>(_zoneAllocator.alloc(nArgs * sizeof(Operand))); node->_args = static_cast<Operand*>(_baseZone.alloc(nArgs * sizeof(Operand)));
if (node->_args == NULL) if (node->_args == NULL)
goto _NoMemory; goto _NoMemory;
@@ -513,6 +544,47 @@ Error X86X64Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment,
return kErrorOk; return kErrorOk;
} }
// ============================================================================
// [asmjit::x86x64::X86X64Compiler - Const]
// ============================================================================
Error X86X64Compiler::_newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) {
Error error = kErrorOk;
size_t offset;
Label* dstLabel;
ConstPool* dstPool;
if (scope == kConstScopeLocal) {
dstLabel = &_localConstPoolLabel;
dstPool = &_localConstPool;
}
else if (scope == kConstScopeGlobal) {
dstLabel = &_globalConstPoolLabel;
dstPool = &_globalConstPool;
}
else {
error = kErrorInvalidArgument;
goto _OnError;
}
error = dstPool->add(data, size, offset);
if (error != kErrorOk)
goto _OnError;
if (dstLabel->getId() == kInvalidValue) {
error = _newLabel(dstLabel);
if (error != kErrorOk)
goto _OnError;
}
*static_cast<Mem*>(mem) = ptr(*dstLabel, static_cast<int32_t>(offset), static_cast<uint32_t>(size));
return kErrorOk;
_OnError:
return error;
}
// ============================================================================ // ============================================================================
// [asmjit::x86x64::X86X64Compiler - Make] // [asmjit::x86x64::X86X64Compiler - Make]
// ============================================================================ // ============================================================================
@@ -550,6 +622,9 @@ static ASMJIT_INLINE void* X86X64Compiler_make(X86X64Compiler* self) {
} }
void* X86X64Compiler::make() { void* X86X64Compiler::make() {
// Flush global constant pool
X86X64Compiler_emitConstPool(this, _globalConstPoolLabel, _globalConstPool);
#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64) #if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64)
return X86X64Compiler_make<x86::Assembler>(this); return X86X64Compiler_make<x86::Assembler>(this);
#elif !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64) #elif !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64)

View File

@@ -10,6 +10,7 @@
// [Dependencies - AsmJit] // [Dependencies - AsmJit]
#include "../base/compiler.h" #include "../base/compiler.h"
#include "../base/vectypes.h"
#include "../x86/x86assembler.h" #include "../x86/x86assembler.h"
#include "../x86/x86defs.h" #include "../x86/x86defs.h"
#include "../x86/x86func.h" #include "../x86/x86func.h"
@@ -1455,13 +1456,44 @@ struct X86X64Compiler : public BaseCompiler {
//! @overridden //! @overridden
ASMJIT_API virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name); ASMJIT_API virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name);
//! @brief Create a new memory chunk allocated on the stack. //! @brief Create a new memory chunk allocated on the current function's stack.
ASMJIT_INLINE Mem newStack(uint32_t size, uint32_t alignment, const char* name = NULL) { ASMJIT_INLINE Mem newStack(uint32_t size, uint32_t alignment, const char* name = NULL) {
Mem m(NoInit); Mem m(NoInit);
_newStack(&m, size, alignment, name); _newStack(&m, size, alignment, name);
return m; return m;
} }
// --------------------------------------------------------------------------
// [Const]
// --------------------------------------------------------------------------
//! @overridden
ASMJIT_API virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size);
//! @brief Put data to a constant-pool and get a memory reference to it.
ASMJIT_INLINE Mem newConst(uint32_t scope, const void* data, size_t size) {
Mem m(NoInit);
_newConst(&m, scope, data, size);
return m;
}
ASMJIT_INLINE Mem newConst1(uint32_t scope, uint8_t val) { return newConst(scope, &val, 1); }
ASMJIT_INLINE Mem newConst2(uint32_t scope, int16_t val) { return newConst(scope, &val, 2); }
ASMJIT_INLINE Mem newConst2(uint32_t scope, uint16_t val) { return newConst(scope, &val, 2); }
ASMJIT_INLINE Mem newConst4(uint32_t scope, int32_t val) { return newConst(scope, &val, 4); }
ASMJIT_INLINE Mem newConst4(uint32_t scope, uint32_t val) { return newConst(scope, &val, 4); }
ASMJIT_INLINE Mem newConst4(uint32_t scope, float val) { return newConst(scope, &val, 4); }
ASMJIT_INLINE Mem newConst8(uint32_t scope, int64_t val) { return newConst(scope, &val, 8); }
ASMJIT_INLINE Mem newConst8(uint32_t scope, uint64_t val) { return newConst(scope, &val, 8); }
ASMJIT_INLINE Mem newConst8(uint32_t scope, double val) { return newConst(scope, &val, 8); }
ASMJIT_INLINE Mem newConst8(uint32_t scope, const Vec64Data& val) { return newConst(scope, &val, 8); }
ASMJIT_INLINE Mem newConst16(uint32_t scope, const Vec128Data& val) { return newConst(scope, &val, 16); }
ASMJIT_INLINE Mem newConst32(uint32_t scope, const Vec256Data& val) { return newConst(scope, &val, 32); }
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Embed] // [Embed]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------

View File

@@ -452,7 +452,7 @@ void X86X64Context::emitLoad(VarData* vd, uint32_t regIndex, const char* reason)
return; return;
_Comment: _Comment:
node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName()));
} }
// ============================================================================ // ============================================================================
@@ -533,7 +533,7 @@ void X86X64Context::emitSave(VarData* vd, uint32_t regIndex, const char* reason)
return; return;
_Comment: _Comment:
node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName()));
} }
// ============================================================================ // ============================================================================
@@ -602,7 +602,7 @@ void X86X64Context::emitMove(VarData* vd, uint32_t toRegIndex, uint32_t fromRegI
return; return;
_Comment: _Comment:
node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName()));
} }
// ============================================================================ // ============================================================================
@@ -633,7 +633,7 @@ void X86X64Context::emitSwapGp(VarData* aVd, VarData* bVd, uint32_t aIndex, uint
return; return;
_Comment: _Comment:
node->setComment(compiler->_stringAllocator.sformat("[%s] %s, %s", reason, aVd->getName(), bVd->getName())); node->setComment(compiler->_stringZone.sformat("[%s] %s, %s", reason, aVd->getName(), bVd->getName()));
} }
// ============================================================================ // ============================================================================
@@ -1305,7 +1305,7 @@ BaseVarState* X86X64Context::saveState() {
sizeof(VarState) + vdCount * sizeof(StateCell), sizeof(void*)); sizeof(VarState) + vdCount * sizeof(StateCell), sizeof(void*));
VarState* cur = getState(); VarState* cur = getState();
VarState* dst = _zoneAllocator.allocT<VarState>(size); VarState* dst = _baseZone.allocT<VarState>(size);
if (dst == NULL) if (dst == NULL)
return NULL; return NULL;
@@ -1559,7 +1559,7 @@ static void X86X64Context_prepareSingleVarInst(uint32_t code, VarAttr* va) {
//! //!
//! @brief Add unreachable-flow data to the unreachable flow list. //! @brief Add unreachable-flow data to the unreachable flow list.
static ASMJIT_INLINE Error X86X64Context_addUnreachableNode(X86X64Context* self, BaseNode* node) { static ASMJIT_INLINE Error X86X64Context_addUnreachableNode(X86X64Context* self, BaseNode* node) {
PodList<BaseNode*>::Link* link = self->_zoneAllocator.allocT<PodList<BaseNode*>::Link>(); PodList<BaseNode*>::Link* link = self->_baseZone.allocT<PodList<BaseNode*>::Link>();
if (link == NULL) if (link == NULL)
return self->setError(kErrorNoHeapMemory); return self->setError(kErrorNoHeapMemory);
@@ -1573,7 +1573,7 @@ static ASMJIT_INLINE Error X86X64Context_addUnreachableNode(X86X64Context* self,
//! //!
//! @brief Add jump-flow data to the jcc flow list. //! @brief Add jump-flow data to the jcc flow list.
static ASMJIT_INLINE Error X86X64Context_addJccNode(X86X64Context* self, BaseNode* node) { static ASMJIT_INLINE Error X86X64Context_addJccNode(X86X64Context* self, BaseNode* node) {
PodList<BaseNode*>::Link* link = self->_zoneAllocator.allocT<PodList<BaseNode*>::Link>(); PodList<BaseNode*>::Link* link = self->_baseZone.allocT<PodList<BaseNode*>::Link>();
if (link == NULL) if (link == NULL)
ASMJIT_PROPAGATE_ERROR(self->setError(kErrorNoHeapMemory)); ASMJIT_PROPAGATE_ERROR(self->setError(kErrorNoHeapMemory));
@@ -1998,16 +1998,13 @@ _NextGroup:
VI_BEGIN(); VI_BEGIN();
if (node->getHint() == kVarHintAlloc) { if (node->getHint() == kVarHintAlloc) {
HintNode* cur = node;
uint32_t remain[kRegClassCount]; uint32_t remain[kRegClassCount];
RegMask inRegs; HintNode* cur = node;
remain[kRegClassGp] = _baseRegsCount - 1 - func->hasFuncFlag(kFuncFlagIsNaked); remain[kRegClassGp] = _baseRegsCount - 1 - func->hasFuncFlag(kFuncFlagIsNaked);
remain[kRegClassFp] = kRegCountFp; remain[kRegClassFp] = kRegCountFp;
remain[kRegClassMm] = kRegCountMm; remain[kRegClassMm] = kRegCountMm;
remain[kRegClassXy] = _baseRegsCount; remain[kRegClassXy] = _baseRegsCount;
inRegs.reset();
// Merge as many alloc-hints as possible. // Merge as many alloc-hints as possible.
for (;;) { for (;;) {
@@ -2695,7 +2692,7 @@ _OnTarget:
ltUnused = ltUnused->prev; ltUnused = ltUnused->prev;
} }
else { else {
ltTmp = _zoneAllocator.allocT<LivenessTarget>( ltTmp = _baseZone.allocT<LivenessTarget>(
sizeof(LivenessTarget) - sizeof(VarBits) + bLen * sizeof(uintptr_t)); sizeof(LivenessTarget) - sizeof(VarBits) + bLen * sizeof(uintptr_t));
if (ltTmp == NULL) if (ltTmp == NULL)
@@ -2885,7 +2882,7 @@ Error X86X64Context::annotate() {
BaseNode* node_ = func; BaseNode* node_ = func;
BaseNode* end = func->getEnd(); BaseNode* end = func->getEnd();
Zone& sa = _compiler->_stringAllocator; Zone& sa = _compiler->_stringZone;
StringBuilderT<128> sb; StringBuilderT<128> sb;
uint32_t maxLen = 0; uint32_t maxLen = 0;
@@ -4947,8 +4944,8 @@ static void X86X64Context_translateJump(X86X64Context* self, JumpNode* jNode, Ta
compiler->_setCursor(extNode); compiler->_setCursor(extNode);
self->switchState(jTarget->getState()); self->switchState(jTarget->getState());
// If any instruction was added during switchState() we have to wrap the // If one or more instruction has been added during switchState() it will be
// generated code in a block. // moved at the end of the function body.
if (compiler->getCursor() != extNode) { if (compiler->getCursor() != extNode) {
TargetNode* jTrampolineTarget = compiler->newTarget(); TargetNode* jTrampolineTarget = compiler->newTarget();

View File

@@ -83,7 +83,7 @@ struct X86X64Context : public BaseContext {
ASMJIT_INLINE VarInst* newVarInst(uint32_t vaCount) { ASMJIT_INLINE VarInst* newVarInst(uint32_t vaCount) {
return static_cast<VarInst*>( return static_cast<VarInst*>(
_zoneAllocator.alloc(sizeof(VarInst) + vaCount * sizeof(VarAttr))); _baseZone.alloc(sizeof(VarInst) + vaCount * sizeof(VarAttr)));
} }
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------