diff --git a/CMakeLists.txt b/CMakeLists.txt index 1027959..fa061e8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,8 +8,7 @@ CMake_Minimum_Required(VERSION 2.8.12) # [AsmJit - Configuration] # ============================================================================= -# Whether not to build anything (the source will be included by another library -# or executable). +# Whether not to build anything (default FALSE). # Set(ASMJIT_EMBED FALSE) # Whether to build static library (default FALSE). @@ -114,6 +113,7 @@ If(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX) If(WIN32) List(APPEND ASMJIT_CFLAGS -D_UNICODE) EndIf() + If(ASMJIT_STATIC) List(APPEND ASMJIT_CFLAGS -DASMJIT_STATIC) EndIf() @@ -214,6 +214,8 @@ AsmJit_AddSource(ASMJIT_SRC asmjit/base codegen.h compiler.cpp compiler.h + constpool.cpp + constpool.h context.cpp context_p.h cpuinfo.cpp @@ -310,6 +312,7 @@ If(ASMJIT_BUILD_SAMPLES) testdummy testmem testopcode + testpool testsizeof testx86 ) diff --git a/src/app/test/testmem.cpp b/src/app/test/testmem.cpp index feccfab..f30ddec 100644 --- a/src/app/test/testmem.cpp +++ b/src/app/test/testmem.cpp @@ -65,7 +65,7 @@ int main(int argc, char* argv[]) { size_t i; size_t count = 200000; - printf("Memory alloc/free test - %d allocations\n\n", (int)count); + printf("Memory alloc/free test - %d allocations.\n\n", (int)count); void** a = (void**)::malloc(sizeof(void*) * count); void** b = (void**)::malloc(sizeof(void*) * count); @@ -83,7 +83,7 @@ int main(int argc, char* argv[]) { ::memset(a[i], 0, r); } - printf("done\n"); + printf("Done.\n"); stats(memmgr); printf("\n"); @@ -91,16 +91,16 @@ int main(int argc, char* argv[]) { for (i = 0; i < count; i++) { if (memmgr->release(a[i]) != kErrorOk) { - printf("Failed to free %p\n", b[i]); + printf("Failed to free %p.\n", b[i]); problems++; } } - printf("done\n"); + printf("Done.\n"); stats(memmgr); printf("\n"); - printf("Verified alloc/free test - %d allocations\n\n", (int)count); + printf("Verified alloc/free test - %d allocations.\n\n", (int)count); printf("Alloc..."); for (i = 0; i < count; i++) { @@ -112,25 +112,25 @@ int main(int argc, char* argv[]) { gen(a[i], b[i], r); } - printf("done\n"); + printf("Done.\n"); stats(memmgr); printf("\n"); printf("Shuffling..."); shuffle(a, b, count); - printf("done\n"); + printf("Done.\n"); printf("\n"); printf("Verify and free..."); for (i = 0; i < count / 2; i++) { verify(a[i], b[i]); if (memmgr->release(a[i]) != kErrorOk) { - printf("Failed to free %p\n", a[i]); + printf("Failed to free %p.\n", a[i]); problems++; } free(b[i]); } - printf("done\n"); + printf("Done.\n"); stats(memmgr); printf("\n"); @@ -144,7 +144,7 @@ int main(int argc, char* argv[]) { gen(a[i], b[i], r); } - printf("done\n"); + printf("Done.\n"); stats(memmgr); printf("\n"); @@ -152,12 +152,12 @@ int main(int argc, char* argv[]) { for (i = 0; i < count; i++) { verify(a[i], b[i]); if (memmgr->release(a[i]) != kErrorOk) { - printf("Failed to free %p\n", a[i]); + printf("Failed to free %p.\n", a[i]); problems++; } free(b[i]); } - printf("done\n"); + printf("Done.\n"); stats(memmgr); printf("\n"); diff --git a/src/app/test/testpool.cpp b/src/app/test/testpool.cpp new file mode 100644 index 0000000..4d612f6 --- /dev/null +++ b/src/app/test/testpool.cpp @@ -0,0 +1,204 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Dependencies - AsmJit] +#include + +// [Dependencies - C] +#include +#include +#include + +using namespace asmjit; + +// ============================================================================ +// [EXPECT] +// ============================================================================ + +static void expectFailed(const char* msg) { + printf("Failure: %s\n", msg); + abort(); +} + +#define EXPECT(_Exp_, _Msg_) \ + do { \ + if (!(_Exp_)) { \ + expectFailed(_Msg_); \ + } \ + } while(0) + +// ============================================================================ +// [Main] +// ============================================================================ + +int main(int argc, char* argv[]) { + Zone zone(16192); + ConstPool pool(&zone); + + uint32_t i; + uint32_t kCount = 1000000; + + printf("Adding %u constants to the pool.\n", kCount); + { + size_t prevOffset; + size_t curOffset; + uint64_t c = ASMJIT_UINT64_C(0x0101010101010101); + + EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk, + "pool.add() - Returned error."); + EXPECT(prevOffset == 0, + "pool.add() - First constant should have zero offset."); + + for (i = 1; i < kCount; i++) { + c++; + EXPECT(pool.add(&c, 8, curOffset) == kErrorOk, + "pool.add() - Returned error."); + EXPECT(prevOffset + 8 == curOffset, + "pool.add() - Returned incorrect curOffset."); + EXPECT(pool.getSize() == (i + 1) * 8, + "pool.getSize() - Reports incorrect size."); + prevOffset = curOffset; + } + + EXPECT(pool.getAlignment() == 8, + "pool.getAlignment() - Expected 8-byte alignment."); + } + printf("Done.\n"); + + printf("Retrieving %u constants from the pool.\n", kCount); + { + uint64_t c = ASMJIT_UINT64_C(0x0101010101010101); + + for (i = 0; i < kCount; i++) { + size_t offset; + EXPECT(pool.add(&c, 8, offset) == kErrorOk, + "pool.add() - Returned error."); + EXPECT(offset == i * 8, + "pool.add() - Should have reused constant."); + c++; + } + } + printf("Done.\n"); + + printf("Checking if the constants were split into 4-byte patterns.\n"); + { + uint32_t c = 0x01010101; + for (i = 0; i < kCount; i++) { + size_t offset; + EXPECT(pool.add(&c, 4, offset) == kErrorOk, + "pool.add() - Returned error."); + EXPECT(offset == i * 8, + "pool.add() - Should reuse existing constant."); + c++; + } + } + printf("Done.\n"); + + printf("Adding 2 byte constant to misalign the current offset.\n"); + { + uint16_t c = 0xFFFF; + size_t offset; + + EXPECT(pool.add(&c, 2, offset) == kErrorOk, + "pool.add() - Returned error."); + EXPECT(offset == kCount * 8, + "pool.add() - Didn't return expected position."); + EXPECT(pool.getAlignment() == 8, + "pool.getAlignment() - Expected 8-byte alignment."); + } + printf("Done.\n"); + + printf("Adding 8 byte constant to check if pool gets aligned again.\n"); + { + uint64_t c = ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF); + size_t offset; + + EXPECT(pool.add(&c, 8, offset) == kErrorOk, + "pool.add() - Returned error."); + EXPECT(offset == kCount * 8 + 8, + "pool.add() - Didn't return aligned offset."); + } + printf("Done.\n"); + + printf("Adding 2 byte constant verify the gap is filled.\n"); + { + uint16_t c = 0xFFFE; + size_t offset; + + EXPECT(pool.add(&c, 2, offset) == kErrorOk, + "pool.add() - Returned error."); + EXPECT(offset == kCount * 8 + 2, + "pool.add() - Didn't fill the gap."); + EXPECT(pool.getAlignment() == 8, + "pool.getAlignment() - Expected 8-byte alignment."); + } + printf("Done.\n"); + + printf("Checking reset functionality.\n"); + { + pool.reset(); + + EXPECT(pool.getSize() == 0, + "pool.getSize() - Expected pool size to be zero."); + EXPECT(pool.getAlignment() == 0, + "pool.getSize() - Expected pool alignment to be zero."); + } + printf("Done.\n"); + + printf("Checking pool alignment when combined constants are added.\n"); + { + uint8_t bytes[32] = { 0 }; + uint64_t c = 0; + size_t offset; + + pool.add(bytes, 1, offset); + + EXPECT(pool.getSize() == 1, + "pool.getSize() - Expected pool size to be 1 byte."); + EXPECT(pool.getAlignment() == 1, + "pool.getSize() - Expected pool alignment to be 1 byte."); + EXPECT(offset == 0, + "pool.getSize() - Expected offset returned to be zero."); + + pool.add(bytes, 2, offset); + + EXPECT(pool.getSize() == 4, + "pool.getSize() - Expected pool size to be 4 bytes."); + EXPECT(pool.getAlignment() == 2, + "pool.getSize() - Expected pool alignment to be 2 bytes."); + EXPECT(offset == 2, + "pool.getSize() - Expected offset returned to be 2."); + + pool.add(bytes, 4, offset); + + EXPECT(pool.getSize() == 8, + "pool.getSize() - Expected pool size to be 8 bytes."); + EXPECT(pool.getAlignment() == 4, + "pool.getSize() - Expected pool alignment to be 4 bytes."); + EXPECT(offset == 4, + "pool.getSize() - Expected offset returned to be 4."); + + pool.add(bytes, 4, offset); + + EXPECT(pool.getSize() == 8, + "pool.getSize() - Expected pool size to be 8 bytes."); + EXPECT(pool.getAlignment() == 4, + "pool.getSize() - Expected pool alignment to be 4 bytes."); + EXPECT(offset == 4, + "pool.getSize() - Expected offset returned to be 8."); + + pool.add(bytes, 32, offset); + EXPECT(pool.getSize() == 64, + "pool.getSize() - Expected pool size to be 64 bytes."); + EXPECT(pool.getAlignment() == 32, + "pool.getSize() - Expected pool alignment to be 32 bytes."); + EXPECT(offset == 32, + "pool.getSize() - Expected offset returned to be 32."); + } + printf("Done.\n"); + + return 0; +} diff --git a/src/app/test/testx86.cpp b/src/app/test/testx86.cpp index 318fe55..75321d4 100644 --- a/src/app/test/testx86.cpp +++ b/src/app/test/testx86.cpp @@ -402,7 +402,7 @@ struct X86Test_AllocManual : public X86Test { c.addFunc(kFuncConvHost, FuncBuilder0()); GpVar v0(c, kVarTypeInt32, "v0"); - GpVar v1(c, kVarTypeInt32, "v0"); + GpVar v1(c, kVarTypeInt32, "v1"); GpVar cnt(c, kVarTypeInt32, "cnt"); c.xor_(v0, v0); @@ -2344,6 +2344,49 @@ struct X86Test_CallMisc1 : public X86Test { } }; +// ============================================================================ +// [X86Test_ConstPoolBase] +// ============================================================================ + +struct X86Test_ConstPoolBase : public X86Test { + X86Test_ConstPoolBase() : X86Test("[ConstPool] Base") {} + + static void add(PodVector& tests) { + tests.append(new X86Test_ConstPoolBase()); + } + + virtual void compile(Compiler& c) { + c.addFunc(kFuncConvHost, FuncBuilder0()); + + GpVar v0(c, kVarTypeInt32, "v0"); + GpVar v1(c, kVarTypeInt32, "v1"); + + Mem c0(c.newConst4(kConstScopeLocal, 200)); + Mem c1(c.newConst4(kConstScopeLocal, 33)); + + c.mov(v0, c0); + c.mov(v1, c1); + c.add(v0, v1); + + c.ret(v0); + c.endFunc(); + } + + virtual bool run(void* _func, StringBuilder& result, StringBuilder& expect) { + typedef int (*Func)(void); + Func func = asmjit_cast(_func); + + int resultRet = func(); + int expectRet = 233; + + result.setFormat("ret=%d", resultRet); + expect.setFormat("ret=%d", expectRet); + + return resultRet == expectRet; + } +}; + + // ============================================================================ // [X86Test_Dummy] // ============================================================================ @@ -2470,6 +2513,7 @@ X86TestSuite::X86TestSuite() : ADD_TEST(X86Test_CallMultiple); ADD_TEST(X86Test_CallRecursive); ADD_TEST(X86Test_CallMisc1); + ADD_TEST(X86Test_ConstPoolBase); // Dummy. // ADD_TEST(X86Test_Dummy); diff --git a/src/asmjit/base.h b/src/asmjit/base.h index 9d1049b..f1f576f 100644 --- a/src/asmjit/base.h +++ b/src/asmjit/base.h @@ -14,6 +14,7 @@ #include "base/assembler.h" #include "base/codegen.h" #include "base/compiler.h" +#include "base/constpool.h" #include "base/cpuinfo.h" #include "base/cputicks.h" #include "base/defs.h" diff --git a/src/asmjit/base/assembler.cpp b/src/asmjit/base/assembler.cpp index 5f31b4e..73b015c 100644 --- a/src/asmjit/base/assembler.cpp +++ b/src/asmjit/base/assembler.cpp @@ -48,7 +48,7 @@ void BaseAssembler::clear() { void BaseAssembler::reset() { _purge(); - _zoneAllocator.reset(); + _baseZone.reset(); if (_buffer != NULL) { ::free(_buffer); @@ -63,7 +63,7 @@ void BaseAssembler::reset() { } void BaseAssembler::_purge() { - _zoneAllocator.clear(); + _baseZone.clear(); _cursor = _buffer; _options = 0; @@ -188,7 +188,7 @@ LabelLink* BaseAssembler::_newLabelLink() { _unusedLinks = link->prev; } else { - link = _zoneAllocator.allocT(); + link = _baseZone.allocT(); if (link == NULL) return NULL; } diff --git a/src/asmjit/base/codegen.cpp b/src/asmjit/base/codegen.cpp index 6a85985..a92dfc3 100644 --- a/src/asmjit/base/codegen.cpp +++ b/src/asmjit/base/codegen.cpp @@ -29,7 +29,7 @@ CodeGen::CodeGen(BaseRuntime* runtime) : _error(kErrorOk), _features(IntUtil::mask(kCodeGenOptimizedAlign)), _options(0), - _zoneAllocator(16384 - sizeof(Zone::Chunk) - kMemAllocOverhead) {} + _baseZone(16384 - sizeof(Zone::Chunk) - kMemAllocOverhead) {} CodeGen::~CodeGen() { if (_errorHandler != NULL) diff --git a/src/asmjit/base/codegen.h b/src/asmjit/base/codegen.h index 6135474..561fa23 100644 --- a/src/asmjit/base/codegen.h +++ b/src/asmjit/base/codegen.h @@ -189,8 +189,8 @@ struct CodeGen { //! @brief Options for the next generated instruction (only 8-bits used). uint32_t _options; - //! @brief Zone memory allocator. - Zone _zoneAllocator; + //! @brief Base zone. + Zone _baseZone; }; //! @} diff --git a/src/asmjit/base/compiler.cpp b/src/asmjit/base/compiler.cpp index b2e4e5a..c279bde 100644 --- a/src/asmjit/base/compiler.cpp +++ b/src/asmjit/base/compiler.cpp @@ -44,8 +44,11 @@ BaseCompiler::BaseCompiler(BaseRuntime* runtime) : _lastNode(NULL), _cursor(NULL), _func(NULL), - _varAllocator(4096 - kMemAllocOverhead), - _stringAllocator(4096 - kMemAllocOverhead) {} + _varZone(4096 - kMemAllocOverhead), + _stringZone(4096 - kMemAllocOverhead), + _localConstZone(4096 - kMemAllocOverhead), + _localConstPool(&_localConstZone), + _globalConstPool(&_baseZone) {} BaseCompiler::~BaseCompiler() { reset(); @@ -61,20 +64,24 @@ void BaseCompiler::clear() { void BaseCompiler::reset() { _purge(); - _zoneAllocator.reset(); - _varAllocator.reset(); - _stringAllocator.reset(); + _localConstPool.reset(); + _globalConstPool.reset(); _targets.reset(); _vars.reset(); + + _baseZone.reset(); + _varZone.reset(); + _stringZone.reset(); + _localConstZone.reset(); } void BaseCompiler::_purge() { - _zoneAllocator.clear(); + _baseZone.clear(); - _varAllocator.clear(); - _stringAllocator.clear(); + _varZone.clear(); + _stringZone.clear(); _options = 0; @@ -343,11 +350,12 @@ EmbedNode* BaseCompiler::newEmbed(const void* data, uint32_t size) { EmbedNode* node; if (size > EmbedNode::kInlineBufferSize) { - void* clonedData = _stringAllocator.alloc(size); + void* clonedData = _stringZone.alloc(size); if (clonedData == NULL) goto _NoMemory; - ::memcpy(clonedData, data, size); + if (data != NULL) + ::memcpy(clonedData, data, size); data = clonedData; } @@ -376,7 +384,7 @@ CommentNode* BaseCompiler::newComment(const char* str) { CommentNode* node; if (str != NULL && str[0]) { - str = _stringAllocator.sdup(str); + str = _stringZone.sdup(str); if (str == NULL) goto _NoMemory; } @@ -452,7 +460,7 @@ HintNode* BaseCompiler::addHint(BaseVar& var, uint32_t hint, uint32_t value) { // ============================================================================ VarData* BaseCompiler:: _newVd(uint32_t type, uint32_t size, uint32_t c, const char* name) { - VarData* vd = reinterpret_cast(_varAllocator.alloc(sizeof(VarData))); + VarData* vd = reinterpret_cast(_varZone.alloc(sizeof(VarData))); if (vd == NULL) goto _NoMemory; @@ -461,7 +469,7 @@ VarData* BaseCompiler:: _newVd(uint32_t type, uint32_t size, uint32_t c, const c vd->_contextId = kInvalidValue; if (name != NULL && name[0] != '\0') { - vd->_name = _stringAllocator.sdup(name); + vd->_name = _stringZone.sdup(name); } vd->_type = static_cast(type); @@ -567,7 +575,7 @@ void BaseCompiler::rename(BaseVar& var, const char* name) { vd->_name = noName; if (name != NULL && name[0] != '\0') { - vd->_name = _stringAllocator.sdup(name); + vd->_name = _stringZone.sdup(name); } } diff --git a/src/asmjit/base/compiler.h b/src/asmjit/base/compiler.h index d867718..0083623 100644 --- a/src/asmjit/base/compiler.h +++ b/src/asmjit/base/compiler.h @@ -11,6 +11,7 @@ // [Dependencies - AsmJit] #include "../base/assembler.h" #include "../base/codegen.h" +#include "../base/constpool.h" #include "../base/defs.h" #include "../base/error.h" #include "../base/func.h" @@ -18,6 +19,7 @@ #include "../base/podlist.h" #include "../base/podvector.h" #include "../base/runtime.h" +#include "../base/zone.h" // [Api-Begin] #include "../apibegin.h" @@ -40,6 +42,23 @@ struct EndNode; struct InstNode; struct JumpNode; +// ============================================================================ +// [asmjit::kConstScope] +// ============================================================================ + +//! @brief Type of constant in constant pool +ASMJIT_ENUM(kConstScope) { + //! @brief Local constant. + //! + //! Local constant is always embedded right after the current function. + kConstScopeLocal = 0, + + //! @brief Global constant. + //! + //! Global constant is embedded at the end of the currently compiled code. + kConstScopeGlobal = 1 +}; + // ============================================================================ // [asmjit::kVarAttrFlags] // ============================================================================ @@ -486,7 +505,7 @@ struct VarData { uint8_t _modified : 1; //! @internal uint8_t _reserved0 : 3; - //! @brief Varialbe natural alignment. + //! @brief Variable natural alignment. uint8_t _alignment; //! @brief Variable size. @@ -896,10 +915,13 @@ struct EmbedNode : public BaseNode { //! @brief Create a new @ref EmbedNode instance. ASMJIT_INLINE EmbedNode(BaseCompiler* compiler, void* data, uint32_t size) : BaseNode(compiler, kNodeTypeEmbed) { _size = size; - if (size <= kInlineBufferSize) - ::memcpy(_data.buf, data, size); - else + if (size <= kInlineBufferSize) { + if (data != NULL) + ::memcpy(_data.buf, data, size); + } + else { _data.ptr = static_cast(data); + } } //! @brief Destroy the @ref EmbedNode instance. @@ -1100,48 +1122,74 @@ struct InstNode : public BaseNode { // -------------------------------------------------------------------------- //! @brief Get instruction code, see @c kInstCode. - ASMJIT_INLINE uint32_t getCode() const - { return _code; } + ASMJIT_INLINE uint32_t getCode() const { + return _code; + } //! @brief Set instruction code to @a code. //! //! Please do not modify instruction code if you are not know what you are //! doing. Incorrect instruction code or operands can raise assertion() at //! runtime. - ASMJIT_INLINE void setCode(uint32_t code) - { _code = static_cast(code); } + ASMJIT_INLINE void setCode(uint32_t code) { + _code = static_cast(code); + } //! @brief Whether the instruction is an unconditional jump or whether the //! instruction is a conditional jump which is likely to be taken. - ASMJIT_INLINE bool isTaken() const { return hasFlag(kNodeFlagIsTaken); } + ASMJIT_INLINE bool isTaken() const { + return hasFlag(kNodeFlagIsTaken); + } //! @brief Get emit options. - ASMJIT_INLINE uint32_t getOptions() const { return _options; } + ASMJIT_INLINE uint32_t getOptions() const { + return _options; + } //! @brief Set emit options. - ASMJIT_INLINE void setOptions(uint32_t options) { _options = static_cast(options); } + ASMJIT_INLINE void setOptions(uint32_t options) { + _options = static_cast(options); + } //! @brief Add emit options. - ASMJIT_INLINE void addOptions(uint32_t options) { _options |= static_cast(options); } + ASMJIT_INLINE void addOptions(uint32_t options) { + _options |= static_cast(options); + } //! @brief Mask emit options. - ASMJIT_INLINE void andOptions(uint32_t options) { _options &= static_cast(options); } + ASMJIT_INLINE void andOptions(uint32_t options) { + _options &= static_cast(options); + } //! @brief Clear emit options. - ASMJIT_INLINE void delOptions(uint32_t options) { _options &= static_cast(~options); } + ASMJIT_INLINE void delOptions(uint32_t options) { + _options &= static_cast(~options); + } //! @brief Get operands list. - ASMJIT_INLINE Operand* getOpList() { return _opList; } + ASMJIT_INLINE Operand* getOpList() { + return _opList; + } //! @overload - ASMJIT_INLINE const Operand* getOpList() const { return _opList; } + ASMJIT_INLINE const Operand* getOpList() const { + return _opList; + } //! @brief Get operands count. - ASMJIT_INLINE uint32_t getOpCount() const { return _opCount; } + ASMJIT_INLINE uint32_t getOpCount() const { + return _opCount; + } //! @brief Get whether the instruction contains a memory operand. - ASMJIT_INLINE bool hasMemOp() const { return _memOpIndex != 0xFF; } + ASMJIT_INLINE bool hasMemOp() const { + return _memOpIndex != 0xFF; + } //! @brief Set memory operand index (in opList), 0xFF means that instruction //! doesn't have a memory operand. - ASMJIT_INLINE void setMemOpIndex(uint32_t index) { _memOpIndex = static_cast(index); } + ASMJIT_INLINE void setMemOpIndex(uint32_t index) { + _memOpIndex = static_cast(index); + } //! @brief Reset memory operand index, setting it to 0xFF. - ASMJIT_INLINE void resetMemOpIndex() { _memOpIndex = 0xFF; } + ASMJIT_INLINE void resetMemOpIndex() { + _memOpIndex = 0xFF; + } //! @brief Get memory operand. //! @@ -1678,25 +1726,25 @@ struct BaseCompiler : public CodeGen { template ASMJIT_INLINE T* newNode() { - void* p = _zoneAllocator.alloc(sizeof(T)); + void* p = _baseZone.alloc(sizeof(T)); return new(p) T(this); } template ASMJIT_INLINE T* newNode(P0 p0) { - void* p = _zoneAllocator.alloc(sizeof(T)); + void* p = _baseZone.alloc(sizeof(T)); return new(p) T(this, p0); } template ASMJIT_INLINE T* newNode(P0 p0, P1 p1) { - void* p = _zoneAllocator.alloc(sizeof(T)); + void* p = _baseZone.alloc(sizeof(T)); return new(p) T(this, p0, p1); } template ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) { - void* p = _zoneAllocator.alloc(sizeof(T)); + void* p = _baseZone.alloc(sizeof(T)); return new(p) T(this, p0, p1, p2); } @@ -1906,9 +1954,20 @@ struct BaseCompiler : public CodeGen { // [Stack] // -------------------------------------------------------------------------- - //! @brief Create a new @ref BaseMem. + //! @internal + //! + //! @brief Create a new memory chunk allocated on the current function's stack. virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) = 0; + // -------------------------------------------------------------------------- + // [Const] + // -------------------------------------------------------------------------- + + //! @internal + //! + //! @brief Put data to a constant-pool and get a memory reference to it. + virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) = 0; + // -------------------------------------------------------------------------- // [Serialize] // -------------------------------------------------------------------------- @@ -1941,15 +2000,27 @@ struct BaseCompiler : public CodeGen { //! @brief Current function. FuncNode* _func; - //! @brief Variable allocator. - Zone _varAllocator; - //! @brief String/data allocator. - Zone _stringAllocator; + //! @brief Variable zone. + Zone _varZone; + //! @brief String/data zone. + Zone _stringZone; + //! @brief Local constant pool zone. + Zone _localConstZone; //! @brief Targets. PodVector _targets; //! @brief Variables. PodVector _vars; + + //! @brief Local constant pool, flushed at the end of each function. + ConstPool _localConstPool; + //! @brief Global constant pool, flushed at the end of the compilation. + ConstPool _globalConstPool; + + //! @brief Label to start of the local constant pool. + Label _localConstPoolLabel; + //! @brief Label to start of the global constant pool. + Label _globalConstPoolLabel; }; // ============================================================================ diff --git a/src/asmjit/base/constpool.cpp b/src/asmjit/base/constpool.cpp new file mode 100644 index 0000000..4a07712 --- /dev/null +++ b/src/asmjit/base/constpool.cpp @@ -0,0 +1,369 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Export] +#define ASMJIT_EXPORTS + +// [Dependencies - AsmJit] +#include "../base/constpool.h" +#include "../base/intutil.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +// Binary tree code is based on Julienne Walker's "Andersson Binary Trees" +// article and implementation. However, only three operations are implemented - +// get, insert and traverse. + +// ============================================================================ +// [asmjit::ConstPoolTree - Ops] +// ============================================================================ + +const ConstPoolNode ConstPoolTree::_sentinel = { { + const_cast(&ConstPoolTree::_sentinel), + const_cast(&ConstPoolTree::_sentinel) +}, 0, 0, 0 }; + +//! @internal +//! +//! @brief Remove left horizontal links. +static ASMJIT_INLINE ConstPoolNode* ConstPoolTree_skewNode(ConstPoolNode* node) { + if (node->_link[0]->_level == node->_level && node->_level != 0 ) { + ConstPoolNode *save = node->_link[0]; + node->_link[0] = save->_link[1]; + save->_link[1] = node; + node = save; + } + + return node; +} + +//! @internal +//! +//! @brief Remove consecutive horizontal links. +static ASMJIT_INLINE ConstPoolNode* ConstPoolTree_splitNode(ConstPoolNode* node) { + if (node->_link[1]->_link[1]->_level == node->_level && node->_level != 0) { + ConstPoolNode *save = node->_link[1]; + node->_link[1] = save->_link[0]; + save->_link[0] = node; + node = save; + node->_level++; + } + + return node; +} + +ConstPoolNode* ConstPoolTree::get(const void* data) { + ConstPoolNode* sentinel = const_cast(&_sentinel); + ConstPoolNode* node = _root; + size_t dataSize = _dataSize; + + while (node != sentinel) { + int c = ::memcmp(node->getData(), data, dataSize); + if (c == 0) + return node; + node = node->_link[c < 0]; + } + + return NULL; +} + +void ConstPoolTree::put(ConstPoolNode* newNode) { + ConstPoolNode* sentinel = const_cast(&_sentinel); + size_t dataSize = _dataSize; + + _length++; + if (_root == sentinel) { + _root = newNode; + return; + } + + ConstPoolNode* node = _root; + ConstPoolNode* stack[kHeightLimit]; + + unsigned int top = 0; + unsigned int dir; + + // Find a spot and save the stack. + for (;;) { + stack[top++] = node; + dir = ::memcmp(node->getData(), newNode->getData(), dataSize) < 0; + if (node->_link[dir] == sentinel) + break; + node = node->_link[dir]; + } + + // Link and rebalance. + node->_link[dir] = newNode; + + while (top > 0) { + // Which child? + node = stack[--top]; + + if (top != 0) + dir = stack[top - 1]->_link[1] == node; + + node = ConstPoolTree_skewNode(node); + node = ConstPoolTree_splitNode(node); + + // Fix the parent. + if (top != 0) + stack[top - 1]->_link[dir] = node; + else + _root = node; + } +} + +// ============================================================================ +// [asmjit::ConstPool - Construction / Destruction] +// ============================================================================ + +ConstPool::ConstPool(Zone* zone) { + _zone = zone; + + size_t dataSize = 1; + for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { + _tree[i].setDataSize(dataSize); + _gaps[i] = NULL; + dataSize <<= 1; + } + + _gapPool = NULL; + _size = 0; + _alignment = 0; +} + +ConstPool::~ConstPool() {} + +// ============================================================================ +// [asmjit::ConstPool - Reset] +// ============================================================================ + +void ConstPool::reset() { + for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { + _tree[i].reset(); + _gaps[i] = NULL; + } + + _gapPool = NULL; + _size = 0; + _alignment = 0; +} + +// ============================================================================ +// [asmjit::ConstPool - Ops] +// ============================================================================ + +ASMJIT_INLINE size_t ConstPool_getGapIndex(size_t size) { + if (size <= 1) + return ConstPool::kIndex1; + else if (size <= 3) + return ConstPool::kIndex2; + else if (size <= 7) + return ConstPool::kIndex4; + else if (size <= 15) + return ConstPool::kIndex8; + else + return ConstPool::kIndex16; +} + +ASMJIT_INLINE ConstPoolGap* ConstPool_allocGap(ConstPool* self) { + ConstPoolGap* gap = self->_gapPool; + if (gap == NULL) + return self->_zone->allocT(); + + self->_gapPool = gap->_next; + return gap; +} + +ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPoolGap* gap) { + gap->_next = self->_gapPool; + self->_gapPool = gap; +} + +static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) { + ASMJIT_ASSERT(length > 0); + + while (length > 0) { + size_t gapIndex; + size_t gapLength; + + if (length >= 16 && IntUtil::isAligned(offset, 16)) { + gapIndex = ConstPool::kIndex16; + gapLength = 16; + } + else if (length >= 8 && IntUtil::isAligned(offset, 8)) { + gapIndex = ConstPool::kIndex8; + gapLength = 8; + } + else if (length >= 4 && IntUtil::isAligned(offset, 4)) { + gapIndex = ConstPool::kIndex4; + gapLength = 4; + } + else if (length >= 2 && IntUtil::isAligned(offset, 2)) { + gapIndex = ConstPool::kIndex2; + gapLength = 2; + } + else { + gapIndex = ConstPool::kIndex1; + gapLength = 1; + } + + // We don't have to check for errors here, if this failed nothing really + // happened (just the gap won't be visible) and it will fail again at + // place where checking will cause kErrorNoHeapMemory. + ConstPoolGap* gap = ConstPool_allocGap(self); + if (gap == NULL) + return; + + gap->_next = self->_gaps[gapIndex]; + self->_gaps[gapIndex] = gap; + + gap->_offset = offset; + gap->_length = gapLength; + + offset += gapLength; + length -= gapLength; + } +} + +Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { + size_t treeIndex; + + if (size == 32) + treeIndex = kIndex32; + else if (size == 16) + treeIndex = kIndex16; + else if (size == 8) + treeIndex = kIndex8; + else if (size == 4) + treeIndex = kIndex4; + else if (size == 2) + treeIndex = kIndex2; + else if (size == 1) + treeIndex = kIndex1; + else + return kErrorInvalidArgument; + + ConstPoolNode* node = _tree[treeIndex].get(data); + if (node != NULL) { + dstOffset = node->_offset; + return kErrorOk; + } + + // Before incrementing the current offset try if there is a gap that can + // be used for the requested data. + size_t offset = ~static_cast(0); + size_t gapIndex = treeIndex; + + while (gapIndex != kIndexCount - 1) { + ConstPoolGap* gap = _gaps[treeIndex]; + + // Check if there is a gap. + if (gap != NULL) { + size_t gapOffset = gap->_offset; + size_t gapLength = gap->_length; + + // Destroy the gap for now. + _gaps[treeIndex] = gap->_next; + ConstPool_freeGap(this, gap); + + offset = gapOffset; + ASMJIT_ASSERT(IntUtil::isAligned(offset, size)); + + gapLength -= size; + if (gapLength > 0) + ConstPool_addGap(this, gapOffset, gapLength); + } + + gapIndex++; + } + + if (offset == ~static_cast(0)) { + // Get how many bytes have to be skipped so the address is aligned accordingly + // to the 'size'. + size_t deltaTo = IntUtil::deltaTo(_size, size); + + if (deltaTo != 0) { + ConstPool_addGap(this, _size, deltaTo); + _size += deltaTo; + } + + offset = _size; + _size += size; + } + + // Add the initial node to the right index. + node = ConstPoolTree::_newNode(_zone, data, size, offset, false); + if (node == NULL) + return kErrorNoHeapMemory; + + _tree[treeIndex].put(node); + _alignment = IntUtil::iMax(_alignment, size); + + dstOffset = offset; + + // Now create a bunch of shared constants that are based on the data pattern. + // We stop at size 4, it probably doesn't make sense to split constants down + // to 1 byte. + size_t pCount = 1; + while (size > 4) { + size >>= 1; + pCount <<= 1; + + ASMJIT_ASSERT(treeIndex != 0); + treeIndex--; + + const uint8_t* pData = static_cast(data); + for (size_t i = 0; i < pCount; i++, pData += size) { + node = _tree[treeIndex].get(pData); + + if (node != NULL) + continue; + + node = ConstPoolTree::_newNode(_zone, pData, size, offset + (i * size), true); + _tree[treeIndex].put(node); + } + } + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::ConstPool - Reset] +// ============================================================================ + +struct ConstPoolFill { + ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) : + _dst(dst), + _dataSize(dataSize) {} + + ASMJIT_INLINE void visit(const ConstPoolNode* node) { + if (!node->_shared) + ::memcpy(_dst + node->_offset, node->getData(), _dataSize); + } + + uint8_t* _dst; + size_t _dataSize; +}; + +void ConstPool::fill(void* dst) { + // Clears possible gaps, asmjit should never emit garbage to the output. + ::memset(dst, 0, _size); + + ConstPoolFill filler(static_cast(dst), 1); + for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { + _tree[i].iterate(filler); + filler._dataSize <<= 1; + } +} + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" diff --git a/src/asmjit/base/constpool.h b/src/asmjit/base/constpool.h new file mode 100644 index 0000000..dc67da0 --- /dev/null +++ b/src/asmjit/base/constpool.h @@ -0,0 +1,295 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Guard] +#ifndef _ASMJIT_BASE_CONSTPOOL_H +#define _ASMJIT_BASE_CONSTPOOL_H + +// [Dependencies - AsmJit] +#include "../base/error.h" +#include "../base/zone.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +// ============================================================================ +// [asmjit::ConstPoolNode] +// ============================================================================ + +//! @internal +//! +//! @brief Zone-allocated constant-pool node. +struct ConstPoolNode { + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void* getData() const { + return static_cast(const_cast(this) + 1); + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! @brief Left/Right nodes. + ConstPoolNode* _link[2]; + //! @brief Horizontal level for balance. + uint32_t _level : 31; + //! @brief Whether this constant is shared with another. + uint32_t _shared : 1; + //! @brief Data offset from the beginning of the pool. + uint32_t _offset; +}; + +// ============================================================================ +// [asmjit::ConstPoolTree] +// ============================================================================ + +//! @internal +//! +//! @brief Zone-allocated constant-pool tree. +struct ConstPoolTree { + enum { + //! @brief Maximum tree height == log2(1 << 64). + kHeightLimit = 64 + }; + + ASMJIT_API static const ConstPoolNode _sentinel; + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE ConstPoolTree(size_t dataSize = 0) : + _root(const_cast(&_sentinel)), + _length(0), + _dataSize(dataSize) {} + ASMJIT_INLINE ~ConstPoolTree() {} + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void reset() { + _root = const_cast(&_sentinel); + _length = 0; + } + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE bool isEmpty() const { + return _length == 0; + } + + ASMJIT_INLINE size_t getLength() const { + return _length; + } + + ASMJIT_INLINE void setDataSize(size_t dataSize) { + ASMJIT_ASSERT(isEmpty()); + _dataSize = dataSize; + } + + // -------------------------------------------------------------------------- + // [Ops] + // -------------------------------------------------------------------------- + + ASMJIT_API ConstPoolNode* get(const void* data); + ASMJIT_API void put(ConstPoolNode* node); + + // -------------------------------------------------------------------------- + // [Iterate] + // -------------------------------------------------------------------------- + + template + ASMJIT_INLINE void iterate(Visitor& visitor) const { + ConstPoolNode* node = const_cast(_root); + ConstPoolNode* link; + + ConstPoolNode* stack[kHeightLimit]; + ConstPoolNode* sentinel = const_cast(&_sentinel); + + if (node == sentinel) + return; + + size_t top = 0; + + for (;;) { + link = node->_link[0]; + + if (link != sentinel) { + ASMJIT_ASSERT(top != kHeightLimit); + stack[top++] = node; + continue; + } + + visitor.visit(node); + link = node->_link[1]; + + if (link != sentinel) { + node = link; + continue; + } + + if (top == 0) + break; + + node = stack[--top]; + } + } + + // -------------------------------------------------------------------------- + // [Helpers] + // -------------------------------------------------------------------------- + + static ASMJIT_INLINE ConstPoolNode* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) { + ConstPoolNode* node = zone->allocT(sizeof(ConstPoolNode) + size); + if (node == NULL) + return NULL; + + node->_link[0] = const_cast(&_sentinel); + node->_link[1] = const_cast(&_sentinel); + node->_level = 1; + node->_shared = shared; + node->_offset = static_cast(offset); + + ::memcpy(node->getData(), data, size); + return node; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! @brief Root of the tree + ConstPoolNode* _root; + //! @brief Length of the tree (count of nodes). + size_t _length; + //! @brief Size of the data. + size_t _dataSize; +}; + +// ============================================================================ +// [asmjit::ConstPoolGap] +// ============================================================================ + +//! @internal +//! +//! @brief Zone-allocated constant-pool gap. +struct ConstPoolGap { + //! @brief Link to the next gap + ConstPoolGap* _next; + //! @brief Offset of the gap. + size_t _offset; + //! @brief Remaining bytes of the gap (basically a gap size). + size_t _length; +}; + +// ============================================================================ +// [asmjit::ConstPool] +// ============================================================================ + +struct ConstPool { + ASMJIT_NO_COPY(ConstPool) + + enum { + kIndex1 = 0, + kIndex2 = 1, + kIndex4 = 2, + kIndex8 = 3, + kIndex16 = 4, + kIndex32 = 5, + kIndexCount = 6 + }; + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_API ConstPool(Zone* zone); + ASMJIT_API ~ConstPool(); + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + ASMJIT_API void reset(); + + // -------------------------------------------------------------------------- + // [Ops] + // -------------------------------------------------------------------------- + + //! @brief Get whether the constant-pool is empty. + ASMJIT_INLINE bool isEmpty() const { + return _size == 0; + } + + //! @brief Get the size of the constant-pool in bytes. + ASMJIT_INLINE size_t getSize() const { + return _size; + } + + ASMJIT_INLINE size_t getAlignment() const { + return _alignment; + } + + //! @brief Add a constant to the constant pool. + //! + //! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes. + //! The constant is added to the pool only if it doesn't not exist, otherwise + //! cached value is returned. + //! + //! AsmJit is able to subdivide added constants, so for example if you add + //! 8-byte constant 0x1122334455667788 it will create the following slots: + //! + //! 8-byte: 0x1122334455667788 + //! 4-byte: 0x11223344, 0x55667788 + //! + //! The reason is that when combining MMX/SSE/AVX code some patterns are used + //! frequently. However, AsmJit is not able to reallocate a constant that has + //! been already added. For example if you try to add 4-byte constant and then + //! 8-byte constant having the same 4-byte pattern as the previous one, two + //! independent slots will be generated by the pool. + ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset); + + // -------------------------------------------------------------------------- + // [Fill] + // -------------------------------------------------------------------------- + + //! @brief Fill the destination with the constants from the pool. + ASMJIT_API void fill(void* dst); + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! @brief Zone allocator. + Zone* _zone; + //! @brief Tree per size. + ConstPoolTree _tree[kIndexCount]; + //! @brief Gaps per size. + ConstPoolGap* _gaps[kIndexCount]; + //! @brief Gaps pool + ConstPoolGap* _gapPool; + + //! @brief Size of the pool (in bytes). + size_t _size; + //! @brief Alignemnt. + size_t _alignment; +}; + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" + +// [Guard] +#endif // _ASMJIT_BASE_CONSTPOOL_H diff --git a/src/asmjit/base/context.cpp b/src/asmjit/base/context.cpp index b2f7845..59be4e7 100644 --- a/src/asmjit/base/context.cpp +++ b/src/asmjit/base/context.cpp @@ -22,7 +22,7 @@ namespace asmjit { BaseContext::BaseContext(BaseCompiler* compiler) : _compiler(compiler), - _zoneAllocator(8192 - sizeof(Zone::Chunk) - kMemAllocOverhead) { + _baseZone(8192 - sizeof(Zone::Chunk) - kMemAllocOverhead) { BaseContext::reset(); } @@ -34,7 +34,7 @@ BaseContext::~BaseContext() {} // ============================================================================ void BaseContext::reset() { - _zoneAllocator.clear(); + _baseZone.clear(); _func = NULL; _start = NULL; @@ -101,7 +101,7 @@ MemCell* BaseContext::_newVarCell(VarData* vd) { return NULL; } else { - cell = static_cast(_zoneAllocator.alloc(sizeof(MemCell))); + cell = static_cast(_baseZone.alloc(sizeof(MemCell))); if (cell == NULL) goto _NoMemory; @@ -136,7 +136,7 @@ _NoMemory: } MemCell* BaseContext::_newStackCell(uint32_t size, uint32_t alignment) { - MemCell* cell = static_cast(_zoneAllocator.alloc(sizeof(MemCell))); + MemCell* cell = static_cast(_baseZone.alloc(sizeof(MemCell))); if (cell == NULL) goto _NoMemory; diff --git a/src/asmjit/base/context_p.h b/src/asmjit/base/context_p.h index d5334c7..b870a35 100644 --- a/src/asmjit/base/context_p.h +++ b/src/asmjit/base/context_p.h @@ -128,12 +128,12 @@ struct BaseContext { ASMJIT_INLINE VarBits* newBits(uint32_t len) { return static_cast( - _zoneAllocator.calloc(static_cast(len) * VarBits::kEntitySize)); + _baseZone.calloc(static_cast(len) * VarBits::kEntitySize)); } ASMJIT_INLINE VarBits* copyBits(const VarBits* src, uint32_t len) { return static_cast( - _zoneAllocator.dup(src, static_cast(len) * VarBits::kEntitySize)); + _baseZone.dup(src, static_cast(len) * VarBits::kEntitySize)); } // -------------------------------------------------------------------------- @@ -211,7 +211,7 @@ struct BaseContext { FuncNode* _func; //! @brief Zone allocator. - Zone _zoneAllocator; + Zone _baseZone; //! @brief Start of the current active scope. BaseNode* _start; diff --git a/src/asmjit/base/defs.h b/src/asmjit/base/defs.h index b400754..ca61f09 100644 --- a/src/asmjit/base/defs.h +++ b/src/asmjit/base/defs.h @@ -1117,8 +1117,7 @@ struct Label : public Operand { //! @brief Create new, unassociated label. ASMJIT_INLINE Label() : Operand(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeLabel, 0, 0, 0, kInvalidValue); - _init_packed_d2_d3(0, 0); + reset(); } explicit ASMJIT_INLINE Label(uint32_t id) : Operand(NoInit) { @@ -1136,6 +1135,15 @@ struct Label : public Operand { explicit ASMJIT_INLINE Label(const _NoInit&) : Operand(NoInit) {} + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void reset() { + _init_packed_op_sz_b0_b1_id(kOperandTypeLabel, 0, 0, 0, kInvalidValue); + _init_packed_d2_d3(0, 0); + } + // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- diff --git a/src/asmjit/base/zone.h b/src/asmjit/base/zone.h index b85f586..312836f 100644 --- a/src/asmjit/base/zone.h +++ b/src/asmjit/base/zone.h @@ -8,6 +8,9 @@ #ifndef _ASMJIT_BASE_ZONE_H #define _ASMJIT_BASE_ZONE_H +// [Dependencies] +#include "../base/globals.h" + // [Api-Begin] #include "../apibegin.h" diff --git a/src/asmjit/build.h b/src/asmjit/build.h index 2b9539f..fdf2322 100644 --- a/src/asmjit/build.h +++ b/src/asmjit/build.h @@ -63,12 +63,18 @@ defined(__x86_64__) # define ASMJIT_HOST_X64 # define ASMJIT_HOST_LE +# define ASMJIT_HOST_UNALIGNED_16 +# define ASMJIT_HOST_UNALIGNED_32 +# define ASMJIT_HOST_UNALIGNED_64 #elif \ defined(_M_IX86 ) || \ defined(__INTEL__) || \ defined(__i386__ ) # define ASMJIT_HOST_X86 # define ASMJIT_HOST_LE +# define ASMJIT_HOST_UNALIGNED_16 +# define ASMJIT_HOST_UNALIGNED_32 +# define ASMJIT_HOST_UNALIGNED_64 #elif \ defined(_ARM ) || \ defined(_M_ARM_FP ) || \ diff --git a/src/asmjit/x86/x86compiler.cpp b/src/asmjit/x86/x86compiler.cpp index d272fd1..c70f3e5 100644 --- a/src/asmjit/x86/x86compiler.cpp +++ b/src/asmjit/x86/x86compiler.cpp @@ -68,6 +68,30 @@ bool X86X64CallNode::_setRet(uint32_t i, const Operand& op) { return true; } +// ============================================================================ +// [asmjit::x86x64::X86X64Compiler - Helpers (Private)] +// ============================================================================ + +static Error X86X64Compiler_emitConstPool(X86X64Compiler* self, + Label& label, ConstPool& pool) { + + if (label.getId() == kInvalidValue) + return kErrorOk; + + self->align(static_cast(pool.getAlignment())); + self->bind(label); + + EmbedNode* embedNode = self->embed(NULL, static_cast(pool.getSize())); + if (embedNode == NULL) + return kErrorNoHeapMemory; + + pool.fill(embedNode->getData()); + pool.reset(); + label.reset(); + + return kErrorOk; +} + // ============================================================================ // [asmjit::x86x64::X86X64Compiler - Construction / Destruction] // ============================================================================ @@ -114,7 +138,7 @@ static InstNode* X86X64Compiler_newInst(X86X64Compiler* self, void* p, uint32_t InstNode* X86X64Compiler::newInst(uint32_t code) { size_t size = X86X64Compiler_getInstSize(code); - InstNode* inst = static_cast(_zoneAllocator.alloc(size)); + InstNode* inst = static_cast(_baseZone.alloc(size)); if (inst == NULL) goto _NoMemory; @@ -128,7 +152,7 @@ _NoMemory: InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0) { size_t size = X86X64Compiler_getInstSize(code); - InstNode* inst = static_cast(_zoneAllocator.alloc(size + 1 * sizeof(Operand))); + InstNode* inst = static_cast(_baseZone.alloc(size + 1 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -147,7 +171,7 @@ _NoMemory: InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) { size_t size = X86X64Compiler_getInstSize(code); - InstNode* inst = static_cast(_zoneAllocator.alloc(size + 2 * sizeof(Operand))); + InstNode* inst = static_cast(_baseZone.alloc(size + 2 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -168,7 +192,7 @@ _NoMemory: InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { size_t size = X86X64Compiler_getInstSize(code); - InstNode* inst = static_cast(_zoneAllocator.alloc(size + 3 * sizeof(Operand))); + InstNode* inst = static_cast(_baseZone.alloc(size + 3 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -191,7 +215,7 @@ _NoMemory: InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { size_t size = X86X64Compiler_getInstSize(code); - InstNode* inst = static_cast(_zoneAllocator.alloc(size + 4 * sizeof(Operand))); + InstNode* inst = static_cast(_baseZone.alloc(size + 4 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -216,7 +240,7 @@ _NoMemory: InstNode* X86X64Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) { size_t size = X86X64Compiler_getInstSize(code); - InstNode* inst = static_cast(_zoneAllocator.alloc(size + 5 * sizeof(Operand))); + InstNode* inst = static_cast(_baseZone.alloc(size + 5 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -348,7 +372,7 @@ X86X64FuncNode* X86X64Compiler::newFunc(uint32_t conv, const FuncPrototype& p) { // Allocate space for function arguments. func->_argList = NULL; if (func->getArgCount() != 0) { - func->_argList = _zoneAllocator.allocT(func->getArgCount() * sizeof(VarData*)); + func->_argList = _baseZone.allocT(func->getArgCount() * sizeof(VarData*)); if (func->_argList == NULL) goto _NoMemory; ::memset(func->_argList, 0, func->getArgCount() * sizeof(VarData*)); @@ -382,9 +406,16 @@ EndNode* X86X64Compiler::endFunc() { X86X64FuncNode* func = getFunc(); ASMJIT_ASSERT(func != NULL); + // App function exit / epilog marker. addNode(func->getExitNode()); + + // Add local constant pool at the end of the function (if exist). + X86X64Compiler_emitConstPool(this, _localConstPoolLabel, _localConstPool); + + // Add function end marker. addNode(func->getEnd()); + // Finalize... func->addFuncFlags(kFuncFlagIsFinished); _func = NULL; @@ -434,7 +465,7 @@ X86X64CallNode* X86X64Compiler::newCall(const Operand& o0, uint32_t conv, const if ((nArgs = p.getArgCount()) == 0) return node; - node->_args = static_cast(_zoneAllocator.alloc(nArgs * sizeof(Operand))); + node->_args = static_cast(_baseZone.alloc(nArgs * sizeof(Operand))); if (node->_args == NULL) goto _NoMemory; @@ -513,6 +544,47 @@ Error X86X64Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, return kErrorOk; } +// ============================================================================ +// [asmjit::x86x64::X86X64Compiler - Const] +// ============================================================================ + +Error X86X64Compiler::_newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) { + Error error = kErrorOk; + size_t offset; + + Label* dstLabel; + ConstPool* dstPool; + + if (scope == kConstScopeLocal) { + dstLabel = &_localConstPoolLabel; + dstPool = &_localConstPool; + } + else if (scope == kConstScopeGlobal) { + dstLabel = &_globalConstPoolLabel; + dstPool = &_globalConstPool; + } + else { + error = kErrorInvalidArgument; + goto _OnError; + } + + error = dstPool->add(data, size, offset); + if (error != kErrorOk) + goto _OnError; + + if (dstLabel->getId() == kInvalidValue) { + error = _newLabel(dstLabel); + if (error != kErrorOk) + goto _OnError; + } + + *static_cast(mem) = ptr(*dstLabel, static_cast(offset), static_cast(size)); + return kErrorOk; + +_OnError: + return error; +} + // ============================================================================ // [asmjit::x86x64::X86X64Compiler - Make] // ============================================================================ @@ -550,6 +622,9 @@ static ASMJIT_INLINE void* X86X64Compiler_make(X86X64Compiler* self) { } void* X86X64Compiler::make() { + // Flush global constant pool + X86X64Compiler_emitConstPool(this, _globalConstPoolLabel, _globalConstPool); + #if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64) return X86X64Compiler_make(this); #elif !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64) diff --git a/src/asmjit/x86/x86compiler.h b/src/asmjit/x86/x86compiler.h index fc4274f..be0b680 100644 --- a/src/asmjit/x86/x86compiler.h +++ b/src/asmjit/x86/x86compiler.h @@ -10,6 +10,7 @@ // [Dependencies - AsmJit] #include "../base/compiler.h" +#include "../base/vectypes.h" #include "../x86/x86assembler.h" #include "../x86/x86defs.h" #include "../x86/x86func.h" @@ -1455,13 +1456,44 @@ struct X86X64Compiler : public BaseCompiler { //! @overridden ASMJIT_API virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name); - //! @brief Create a new memory chunk allocated on the stack. + //! @brief Create a new memory chunk allocated on the current function's stack. ASMJIT_INLINE Mem newStack(uint32_t size, uint32_t alignment, const char* name = NULL) { Mem m(NoInit); _newStack(&m, size, alignment, name); return m; } + // -------------------------------------------------------------------------- + // [Const] + // -------------------------------------------------------------------------- + + //! @overridden + ASMJIT_API virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size); + + //! @brief Put data to a constant-pool and get a memory reference to it. + ASMJIT_INLINE Mem newConst(uint32_t scope, const void* data, size_t size) { + Mem m(NoInit); + _newConst(&m, scope, data, size); + return m; + } + + ASMJIT_INLINE Mem newConst1(uint32_t scope, uint8_t val) { return newConst(scope, &val, 1); } + + ASMJIT_INLINE Mem newConst2(uint32_t scope, int16_t val) { return newConst(scope, &val, 2); } + ASMJIT_INLINE Mem newConst2(uint32_t scope, uint16_t val) { return newConst(scope, &val, 2); } + + ASMJIT_INLINE Mem newConst4(uint32_t scope, int32_t val) { return newConst(scope, &val, 4); } + ASMJIT_INLINE Mem newConst4(uint32_t scope, uint32_t val) { return newConst(scope, &val, 4); } + ASMJIT_INLINE Mem newConst4(uint32_t scope, float val) { return newConst(scope, &val, 4); } + + ASMJIT_INLINE Mem newConst8(uint32_t scope, int64_t val) { return newConst(scope, &val, 8); } + ASMJIT_INLINE Mem newConst8(uint32_t scope, uint64_t val) { return newConst(scope, &val, 8); } + ASMJIT_INLINE Mem newConst8(uint32_t scope, double val) { return newConst(scope, &val, 8); } + ASMJIT_INLINE Mem newConst8(uint32_t scope, const Vec64Data& val) { return newConst(scope, &val, 8); } + + ASMJIT_INLINE Mem newConst16(uint32_t scope, const Vec128Data& val) { return newConst(scope, &val, 16); } + ASMJIT_INLINE Mem newConst32(uint32_t scope, const Vec256Data& val) { return newConst(scope, &val, 32); } + // -------------------------------------------------------------------------- // [Embed] // -------------------------------------------------------------------------- diff --git a/src/asmjit/x86/x86context.cpp b/src/asmjit/x86/x86context.cpp index 8932704..2c4d2c7 100644 --- a/src/asmjit/x86/x86context.cpp +++ b/src/asmjit/x86/x86context.cpp @@ -452,7 +452,7 @@ void X86X64Context::emitLoad(VarData* vd, uint32_t regIndex, const char* reason) return; _Comment: - node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); + node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName())); } // ============================================================================ @@ -533,7 +533,7 @@ void X86X64Context::emitSave(VarData* vd, uint32_t regIndex, const char* reason) return; _Comment: - node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); + node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName())); } // ============================================================================ @@ -602,7 +602,7 @@ void X86X64Context::emitMove(VarData* vd, uint32_t toRegIndex, uint32_t fromRegI return; _Comment: - node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); + node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName())); } // ============================================================================ @@ -633,7 +633,7 @@ void X86X64Context::emitSwapGp(VarData* aVd, VarData* bVd, uint32_t aIndex, uint return; _Comment: - node->setComment(compiler->_stringAllocator.sformat("[%s] %s, %s", reason, aVd->getName(), bVd->getName())); + node->setComment(compiler->_stringZone.sformat("[%s] %s, %s", reason, aVd->getName(), bVd->getName())); } // ============================================================================ @@ -1305,7 +1305,7 @@ BaseVarState* X86X64Context::saveState() { sizeof(VarState) + vdCount * sizeof(StateCell), sizeof(void*)); VarState* cur = getState(); - VarState* dst = _zoneAllocator.allocT(size); + VarState* dst = _baseZone.allocT(size); if (dst == NULL) return NULL; @@ -1559,7 +1559,7 @@ static void X86X64Context_prepareSingleVarInst(uint32_t code, VarAttr* va) { //! //! @brief Add unreachable-flow data to the unreachable flow list. static ASMJIT_INLINE Error X86X64Context_addUnreachableNode(X86X64Context* self, BaseNode* node) { - PodList::Link* link = self->_zoneAllocator.allocT::Link>(); + PodList::Link* link = self->_baseZone.allocT::Link>(); if (link == NULL) return self->setError(kErrorNoHeapMemory); @@ -1573,7 +1573,7 @@ static ASMJIT_INLINE Error X86X64Context_addUnreachableNode(X86X64Context* self, //! //! @brief Add jump-flow data to the jcc flow list. static ASMJIT_INLINE Error X86X64Context_addJccNode(X86X64Context* self, BaseNode* node) { - PodList::Link* link = self->_zoneAllocator.allocT::Link>(); + PodList::Link* link = self->_baseZone.allocT::Link>(); if (link == NULL) ASMJIT_PROPAGATE_ERROR(self->setError(kErrorNoHeapMemory)); @@ -1998,16 +1998,13 @@ _NextGroup: VI_BEGIN(); if (node->getHint() == kVarHintAlloc) { - HintNode* cur = node; - uint32_t remain[kRegClassCount]; - RegMask inRegs; + HintNode* cur = node; remain[kRegClassGp] = _baseRegsCount - 1 - func->hasFuncFlag(kFuncFlagIsNaked); remain[kRegClassFp] = kRegCountFp; remain[kRegClassMm] = kRegCountMm; remain[kRegClassXy] = _baseRegsCount; - inRegs.reset(); // Merge as many alloc-hints as possible. for (;;) { @@ -2695,7 +2692,7 @@ _OnTarget: ltUnused = ltUnused->prev; } else { - ltTmp = _zoneAllocator.allocT( + ltTmp = _baseZone.allocT( sizeof(LivenessTarget) - sizeof(VarBits) + bLen * sizeof(uintptr_t)); if (ltTmp == NULL) @@ -2885,7 +2882,7 @@ Error X86X64Context::annotate() { BaseNode* node_ = func; BaseNode* end = func->getEnd(); - Zone& sa = _compiler->_stringAllocator; + Zone& sa = _compiler->_stringZone; StringBuilderT<128> sb; uint32_t maxLen = 0; @@ -4947,8 +4944,8 @@ static void X86X64Context_translateJump(X86X64Context* self, JumpNode* jNode, Ta compiler->_setCursor(extNode); self->switchState(jTarget->getState()); - // If any instruction was added during switchState() we have to wrap the - // generated code in a block. + // If one or more instruction has been added during switchState() it will be + // moved at the end of the function body. if (compiler->getCursor() != extNode) { TargetNode* jTrampolineTarget = compiler->newTarget(); diff --git a/src/asmjit/x86/x86context_p.h b/src/asmjit/x86/x86context_p.h index 9168ad6..768150f 100644 --- a/src/asmjit/x86/x86context_p.h +++ b/src/asmjit/x86/x86context_p.h @@ -83,7 +83,7 @@ struct X86X64Context : public BaseContext { ASMJIT_INLINE VarInst* newVarInst(uint32_t vaCount) { return static_cast( - _zoneAllocator.alloc(sizeof(VarInst) + vaCount * sizeof(VarAttr))); + _baseZone.alloc(sizeof(VarInst) + vaCount * sizeof(VarAttr))); } // --------------------------------------------------------------------------