From 5d40561d14f93dc45613bfa03155d1dfb4f5825a Mon Sep 17 00:00:00 2001 From: Petr Kobalicek Date: Tue, 16 Jul 2019 01:24:22 +0200 Subject: [PATCH] Refactored register allocator asm Compiler. (#249) Refactored build system macros (ASMJIT_BUILD_STATIC -> ASMJIT_STATIC) Refactored AVX512 broadcast {1toN} - moved to operand from instruction. Refactored naming - renamed getters to not use get prefix. Refactored code structure - move arch-specific stuff into x86 namespace. Refactored some compiler/arch-specific macros, respect rel/abs option in mov REG, [ADDR]. Refactored StringBuilder (Renamed to String, added small string optimization). Refactored LabelId<->LabelEntry mapping, force label offset to 64-bits on all archs. Renamed Runtime to Target (JitRuntime kept for now). Renamed VirtMemManager to JitAllocator. Renamed VirtMem to JitUtils. Renamed FuncSignatureX to FuncSignatureBuilder. Fixed xchg [mem], rex-lo, refactored RelocEntry. Fixed Logger to always show abs|rel when formatting a memory operand Fixed Logger to prefix HEX numbers with 0x prefix Fixed Support::ctzGeneric to always return uint32_t, T doesn't matter. Fixed LightCall to not save MMX and K registers Fixed CpuInfo constructor to propagate NoInit (#243) Added VAES, AVX512_VBMI2, AVX512_VNNI, and AVX512_BITALG cpu-features and instructions. Added emscripten support (asmjit can be now compiled by emscripten). Added asmjit.natvis for better MSVC experience Added x86::ptr_abs|ptr_rel Added support for multibyte nop r/m (#135) Added support for 32-bit to 64-bit zero-extended addresses, improved validation of memory addresses, and removed wrt address type as this will be reworked Added support for multiple sections, reworked address table support (previously known as trampolines) Added the following x86 modifiers to the x86::Emitter - xacquire(), xrelease(), and k(kreg) Added a possibility to use REP prefix with RET instruction Added a possibility to relocate [rel addr] during relocate() Added a variadic function-call support (Compiler), argument duplication (Compiler), better /dev/shm vs /tmp shared memory handling (VirtMem). Removed imm_u imm_ptr helpers, imm() can now accept any integer and pointer. Changed the default behavior of optimizing instructions to disabled with a possibility to enable that feature through kOptionOptimizedForSize Use default copy construction / assignment to prevent new kind of warnings introduced by GCC 9 --- .appveyor.yml | 54 - .gitignore | 1 + .travis.yml | 277 +- BREAKING.md | 75 - CMakeLists.txt | 451 +- CxxProject.cmake | 335 - LICENSE.md | 2 +- README.md | 1363 +- asmjit.natvis | 54 + cxxconfig.js | 16 - src/asmjit/arm.h | 21 - src/asmjit/asmjit.h | 50 +- src/asmjit/asmjit_apibegin.h | 124 - src/asmjit/asmjit_apiend.h | 74 - src/asmjit/asmjit_build.h | 949 -- src/asmjit/base.h | 34 - src/asmjit/base/arch.cpp | 161 - src/asmjit/base/arch.h | 199 - src/asmjit/base/assembler.cpp | 447 - src/asmjit/base/assembler.h | 154 - src/asmjit/base/codebuilder.cpp | 584 - src/asmjit/base/codebuilder.h | 915 -- src/asmjit/base/codecompiler.cpp | 573 - src/asmjit/base/codecompiler.h | 738 - src/asmjit/base/codeemitter.cpp | 236 - src/asmjit/base/codeemitter.h | 499 - src/asmjit/base/codeholder.cpp | 697 - src/asmjit/base/codeholder.h | 748 - src/asmjit/base/constpool.cpp | 511 - src/asmjit/base/constpool.h | 257 - src/asmjit/base/cpuinfo.cpp | 674 - src/asmjit/base/cpuinfo.h | 373 - src/asmjit/base/func.cpp | 186 - src/asmjit/base/func.h | 1296 -- src/asmjit/base/globals.cpp | 118 - src/asmjit/base/globals.h | 341 - src/asmjit/base/inst.cpp | 77 - src/asmjit/base/inst.h | 108 - src/asmjit/base/logging.cpp | 497 - src/asmjit/base/logging.h | 288 - src/asmjit/base/misc_p.h | 74 - src/asmjit/base/operand.cpp | 209 - src/asmjit/base/operand.h | 1570 --- src/asmjit/base/osutils.cpp | 228 - src/asmjit/base/osutils.h | 178 - src/asmjit/base/regalloc.cpp | 594 - src/asmjit/base/regalloc_p.h | 568 - src/asmjit/base/runtime.cpp | 147 - src/asmjit/base/runtime.h | 198 - src/asmjit/base/string.cpp | 353 - src/asmjit/base/string.h | 289 - src/asmjit/base/utils.cpp | 176 - src/asmjit/base/utils.h | 1358 -- src/asmjit/base/vmem.cpp | 1077 -- src/asmjit/base/vmem.h | 154 - src/asmjit/base/zone.cpp | 833 -- src/asmjit/base/zone.h | 1128 -- src/asmjit/core.h | 85 + src/asmjit/core/arch.cpp | 160 + src/asmjit/core/arch.h | 187 + src/asmjit/core/assembler.cpp | 491 + src/asmjit/core/assembler.h | 165 + src/asmjit/core/build.h | 573 + src/asmjit/core/builder.cpp | 995 ++ src/asmjit/core/builder.h | 1278 ++ src/asmjit/core/callconv.cpp | 43 + src/asmjit/core/callconv.h | 394 + src/asmjit/core/codebufferwriter_p.h | 171 + src/asmjit/core/codeholder.cpp | 1109 ++ src/asmjit/core/codeholder.h | 887 ++ src/asmjit/core/compiler.cpp | 556 + src/asmjit/core/compiler.h | 563 + src/asmjit/core/constpool.cpp | 359 + src/asmjit/core/constpool.h | 240 + src/asmjit/core/cpuinfo.cpp | 81 + src/asmjit/core/cpuinfo.h | 135 + .../{base/simdtypes.h => core/datatypes.h} | 1931 ++- src/asmjit/core/emitter.cpp | 257 + src/asmjit/core/emitter.h | 532 + src/asmjit/core/features.h | 145 + src/asmjit/core/func.cpp | 128 + src/asmjit/core/func.h | 949 ++ src/asmjit/core/globals.cpp | 115 + src/asmjit/core/globals.h | 404 + src/asmjit/core/inst.cpp | 124 + src/asmjit/core/inst.h | 448 + src/asmjit/core/jitallocator.cpp | 1137 ++ src/asmjit/core/jitallocator.h | 261 + src/asmjit/core/jitruntime.cpp | 142 + src/asmjit/core/jitruntime.h | 109 + src/asmjit/core/logging.cpp | 515 + src/asmjit/core/logging.h | 338 + src/asmjit/core/misc_p.h | 32 + src/asmjit/core/operand.cpp | 113 + src/asmjit/core/operand.h | 1316 ++ src/asmjit/core/osutils.cpp | 90 + src/asmjit/core/osutils.h | 96 + src/asmjit/core/raassignment_p.h | 384 + src/asmjit/core/rabuilders_p.h | 420 + src/asmjit/core/radefs_p.h | 1075 ++ src/asmjit/core/ralocal.cpp | 973 ++ src/asmjit/core/ralocal_p.h | 257 + src/asmjit/core/rapass.cpp | 1831 +++ src/asmjit/core/rapass_p.h | 1066 ++ src/asmjit/core/rastack.cpp | 192 + src/asmjit/core/rastack_p.h | 166 + src/asmjit/core/string.cpp | 529 + src/asmjit/core/string.h | 335 + src/asmjit/core/support.cpp | 483 + src/asmjit/core/support.h | 1390 ++ src/asmjit/core/target.cpp | 22 + src/asmjit/core/target.h | 193 + src/asmjit/core/type.cpp | 28 + src/asmjit/core/type.h | 377 + src/asmjit/core/virtmem.cpp | 496 + src/asmjit/core/virtmem.h | 121 + src/asmjit/core/zone.cpp | 366 + src/asmjit/core/zone.h | 625 + src/asmjit/core/zonehash.cpp | 176 + src/asmjit/core/zonehash.h | 184 + src/asmjit/core/zonelist.cpp | 166 + src/asmjit/core/zonelist.h | 186 + src/asmjit/core/zonestack.cpp | 181 + src/asmjit/core/zonestack.h | 217 + src/asmjit/core/zonestring.h | 108 + src/asmjit/core/zonetree.cpp | 102 + src/asmjit/core/zonetree.h | 368 + src/asmjit/core/zonevector.cpp | 359 + src/asmjit/core/zonevector.h | 682 + src/asmjit/x86.h | 16 +- src/asmjit/x86/x86assembler.cpp | 4009 +++--- src/asmjit/x86/x86assembler.h | 103 +- src/asmjit/x86/x86builder.cpp | 62 +- src/asmjit/x86/x86builder.h | 82 +- src/asmjit/x86/x86callconv.cpp | 148 + src/asmjit/x86/x86callconv_p.h | 33 + src/asmjit/x86/x86compiler.cpp | 378 +- src/asmjit/x86/x86compiler.h | 364 +- src/asmjit/x86/x86emitter.h | 10311 +++++++------- src/asmjit/x86/x86features.cpp | 378 + src/asmjit/x86/x86features.h | 269 + src/asmjit/x86/x86globals.h | 2434 +++- src/asmjit/x86/x86inst.cpp | 3727 ----- src/asmjit/x86/x86inst.h | 2518 ---- src/asmjit/x86/x86instapi.cpp | 1507 ++ src/asmjit/x86/x86instapi_p.h | 42 + src/asmjit/x86/x86instdb.cpp | 3943 ++++++ src/asmjit/x86/x86instdb.h | 454 + src/asmjit/x86/x86instdb_p.h | 300 + src/asmjit/x86/x86instimpl.cpp | 731 - src/asmjit/x86/x86instimpl_p.h | 45 - src/asmjit/x86/x86internal.cpp | 1997 +-- src/asmjit/x86/x86internal_p.h | 53 +- src/asmjit/x86/x86logging.cpp | 874 +- src/asmjit/x86/x86logging_p.h | 62 +- src/asmjit/x86/x86misc.h | 388 - src/asmjit/x86/x86opcode_p.h | 435 + src/asmjit/x86/x86operand.cpp | 342 +- src/asmjit/x86/x86operand.h | 1877 ++- src/asmjit/x86/x86operand_regs.cpp | 122 - src/asmjit/x86/x86rapass.cpp | 1154 ++ src/asmjit/x86/x86rapass_p.h | 101 + src/asmjit/x86/x86regalloc.cpp | 4062 ------ src/asmjit/x86/x86regalloc_p.h | 705 - test/asmjit_bench_x86.cpp | 205 +- test/asmjit_test_misc.h | 159 +- test/asmjit_test_opcode.cpp | 56 +- test/asmjit_test_opcode.h | 11679 ++++++++-------- test/asmjit_test_unit.cpp | 458 +- test/asmjit_test_x86_asm.cpp | 156 +- test/asmjit_test_x86_cc.cpp | 3166 +++-- test/asmjit_test_x86_sections.cpp | 156 + test/broken.cpp | 172 +- test/broken.h | 100 +- tools/configure-mac-xcode.sh | 9 - tools/configure-makefiles.sh | 17 + tools/configure-ninja.sh | 17 + tools/configure-unix-makefiles-dbg.sh | 9 - tools/configure-unix-makefiles-rel.sh | 9 - tools/configure-vs-x64.bat | 9 + tools/configure-vs-x86.bat | 9 + tools/configure-win-mingw-dbg.bat | 9 - tools/configure-win-mingw-rel.bat | 9 - tools/configure-win-vs2010-x64.bat | 9 - tools/configure-win-vs2010-x86.bat | 9 - tools/configure-win-vs2013-x64.bat | 9 - tools/configure-win-vs2013-x86.bat | 9 - tools/configure-win-vs2015-x64.bat | 9 - tools/configure-win-vs2015-x86.bat | 9 - tools/configure-xcode.sh | 9 + tools/generate-arm.js | 207 - tools/generate-base.js | 537 - tools/generate-x86.js | 1829 --- tools/tablegen-x86.js | 2412 ++++ tools/tablegen.js | 917 ++ tools/tablegen.sh | 3 + 196 files changed, 65058 insertions(+), 56743 deletions(-) delete mode 100644 .appveyor.yml delete mode 100644 BREAKING.md delete mode 100644 CxxProject.cmake create mode 100644 asmjit.natvis delete mode 100644 cxxconfig.js delete mode 100644 src/asmjit/arm.h delete mode 100644 src/asmjit/asmjit_apibegin.h delete mode 100644 src/asmjit/asmjit_apiend.h delete mode 100644 src/asmjit/asmjit_build.h delete mode 100644 src/asmjit/base.h delete mode 100644 src/asmjit/base/arch.cpp delete mode 100644 src/asmjit/base/arch.h delete mode 100644 src/asmjit/base/assembler.cpp delete mode 100644 src/asmjit/base/assembler.h delete mode 100644 src/asmjit/base/codebuilder.cpp delete mode 100644 src/asmjit/base/codebuilder.h delete mode 100644 src/asmjit/base/codecompiler.cpp delete mode 100644 src/asmjit/base/codecompiler.h delete mode 100644 src/asmjit/base/codeemitter.cpp delete mode 100644 src/asmjit/base/codeemitter.h delete mode 100644 src/asmjit/base/codeholder.cpp delete mode 100644 src/asmjit/base/codeholder.h delete mode 100644 src/asmjit/base/constpool.cpp delete mode 100644 src/asmjit/base/constpool.h delete mode 100644 src/asmjit/base/cpuinfo.cpp delete mode 100644 src/asmjit/base/cpuinfo.h delete mode 100644 src/asmjit/base/func.cpp delete mode 100644 src/asmjit/base/func.h delete mode 100644 src/asmjit/base/globals.cpp delete mode 100644 src/asmjit/base/globals.h delete mode 100644 src/asmjit/base/inst.cpp delete mode 100644 src/asmjit/base/inst.h delete mode 100644 src/asmjit/base/logging.cpp delete mode 100644 src/asmjit/base/logging.h delete mode 100644 src/asmjit/base/misc_p.h delete mode 100644 src/asmjit/base/operand.cpp delete mode 100644 src/asmjit/base/operand.h delete mode 100644 src/asmjit/base/osutils.cpp delete mode 100644 src/asmjit/base/osutils.h delete mode 100644 src/asmjit/base/regalloc.cpp delete mode 100644 src/asmjit/base/regalloc_p.h delete mode 100644 src/asmjit/base/runtime.cpp delete mode 100644 src/asmjit/base/runtime.h delete mode 100644 src/asmjit/base/string.cpp delete mode 100644 src/asmjit/base/string.h delete mode 100644 src/asmjit/base/utils.cpp delete mode 100644 src/asmjit/base/utils.h delete mode 100644 src/asmjit/base/vmem.cpp delete mode 100644 src/asmjit/base/vmem.h delete mode 100644 src/asmjit/base/zone.cpp delete mode 100644 src/asmjit/base/zone.h create mode 100644 src/asmjit/core.h create mode 100644 src/asmjit/core/arch.cpp create mode 100644 src/asmjit/core/arch.h create mode 100644 src/asmjit/core/assembler.cpp create mode 100644 src/asmjit/core/assembler.h create mode 100644 src/asmjit/core/build.h create mode 100644 src/asmjit/core/builder.cpp create mode 100644 src/asmjit/core/builder.h create mode 100644 src/asmjit/core/callconv.cpp create mode 100644 src/asmjit/core/callconv.h create mode 100644 src/asmjit/core/codebufferwriter_p.h create mode 100644 src/asmjit/core/codeholder.cpp create mode 100644 src/asmjit/core/codeholder.h create mode 100644 src/asmjit/core/compiler.cpp create mode 100644 src/asmjit/core/compiler.h create mode 100644 src/asmjit/core/constpool.cpp create mode 100644 src/asmjit/core/constpool.h create mode 100644 src/asmjit/core/cpuinfo.cpp create mode 100644 src/asmjit/core/cpuinfo.h rename src/asmjit/{base/simdtypes.h => core/datatypes.h} (54%) create mode 100644 src/asmjit/core/emitter.cpp create mode 100644 src/asmjit/core/emitter.h create mode 100644 src/asmjit/core/features.h create mode 100644 src/asmjit/core/func.cpp create mode 100644 src/asmjit/core/func.h create mode 100644 src/asmjit/core/globals.cpp create mode 100644 src/asmjit/core/globals.h create mode 100644 src/asmjit/core/inst.cpp create mode 100644 src/asmjit/core/inst.h create mode 100644 src/asmjit/core/jitallocator.cpp create mode 100644 src/asmjit/core/jitallocator.h create mode 100644 src/asmjit/core/jitruntime.cpp create mode 100644 src/asmjit/core/jitruntime.h create mode 100644 src/asmjit/core/logging.cpp create mode 100644 src/asmjit/core/logging.h create mode 100644 src/asmjit/core/misc_p.h create mode 100644 src/asmjit/core/operand.cpp create mode 100644 src/asmjit/core/operand.h create mode 100644 src/asmjit/core/osutils.cpp create mode 100644 src/asmjit/core/osutils.h create mode 100644 src/asmjit/core/raassignment_p.h create mode 100644 src/asmjit/core/rabuilders_p.h create mode 100644 src/asmjit/core/radefs_p.h create mode 100644 src/asmjit/core/ralocal.cpp create mode 100644 src/asmjit/core/ralocal_p.h create mode 100644 src/asmjit/core/rapass.cpp create mode 100644 src/asmjit/core/rapass_p.h create mode 100644 src/asmjit/core/rastack.cpp create mode 100644 src/asmjit/core/rastack_p.h create mode 100644 src/asmjit/core/string.cpp create mode 100644 src/asmjit/core/string.h create mode 100644 src/asmjit/core/support.cpp create mode 100644 src/asmjit/core/support.h create mode 100644 src/asmjit/core/target.cpp create mode 100644 src/asmjit/core/target.h create mode 100644 src/asmjit/core/type.cpp create mode 100644 src/asmjit/core/type.h create mode 100644 src/asmjit/core/virtmem.cpp create mode 100644 src/asmjit/core/virtmem.h create mode 100644 src/asmjit/core/zone.cpp create mode 100644 src/asmjit/core/zone.h create mode 100644 src/asmjit/core/zonehash.cpp create mode 100644 src/asmjit/core/zonehash.h create mode 100644 src/asmjit/core/zonelist.cpp create mode 100644 src/asmjit/core/zonelist.h create mode 100644 src/asmjit/core/zonestack.cpp create mode 100644 src/asmjit/core/zonestack.h create mode 100644 src/asmjit/core/zonestring.h create mode 100644 src/asmjit/core/zonetree.cpp create mode 100644 src/asmjit/core/zonetree.h create mode 100644 src/asmjit/core/zonevector.cpp create mode 100644 src/asmjit/core/zonevector.h create mode 100644 src/asmjit/x86/x86callconv.cpp create mode 100644 src/asmjit/x86/x86callconv_p.h create mode 100644 src/asmjit/x86/x86features.cpp create mode 100644 src/asmjit/x86/x86features.h delete mode 100644 src/asmjit/x86/x86inst.cpp delete mode 100644 src/asmjit/x86/x86inst.h create mode 100644 src/asmjit/x86/x86instapi.cpp create mode 100644 src/asmjit/x86/x86instapi_p.h create mode 100644 src/asmjit/x86/x86instdb.cpp create mode 100644 src/asmjit/x86/x86instdb.h create mode 100644 src/asmjit/x86/x86instdb_p.h delete mode 100644 src/asmjit/x86/x86instimpl.cpp delete mode 100644 src/asmjit/x86/x86instimpl_p.h delete mode 100644 src/asmjit/x86/x86misc.h create mode 100644 src/asmjit/x86/x86opcode_p.h delete mode 100644 src/asmjit/x86/x86operand_regs.cpp create mode 100644 src/asmjit/x86/x86rapass.cpp create mode 100644 src/asmjit/x86/x86rapass_p.h delete mode 100644 src/asmjit/x86/x86regalloc.cpp delete mode 100644 src/asmjit/x86/x86regalloc_p.h create mode 100644 test/asmjit_test_x86_sections.cpp delete mode 100755 tools/configure-mac-xcode.sh create mode 100755 tools/configure-makefiles.sh create mode 100755 tools/configure-ninja.sh delete mode 100755 tools/configure-unix-makefiles-dbg.sh delete mode 100755 tools/configure-unix-makefiles-rel.sh create mode 100644 tools/configure-vs-x64.bat create mode 100644 tools/configure-vs-x86.bat delete mode 100644 tools/configure-win-mingw-dbg.bat delete mode 100644 tools/configure-win-mingw-rel.bat delete mode 100644 tools/configure-win-vs2010-x64.bat delete mode 100644 tools/configure-win-vs2010-x86.bat delete mode 100644 tools/configure-win-vs2013-x64.bat delete mode 100644 tools/configure-win-vs2013-x86.bat delete mode 100644 tools/configure-win-vs2015-x64.bat delete mode 100644 tools/configure-win-vs2015-x86.bat create mode 100755 tools/configure-xcode.sh delete mode 100644 tools/generate-arm.js delete mode 100644 tools/generate-base.js delete mode 100644 tools/generate-x86.js create mode 100644 tools/tablegen-x86.js create mode 100644 tools/tablegen.js create mode 100755 tools/tablegen.sh diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 12f42d4..0000000 --- a/.appveyor.yml +++ /dev/null @@ -1,54 +0,0 @@ -version: "{build}" - -image: Visual Studio 2017 -clone_folder: c:\dev\asmjit - -environment: - matrix: - - BUILD_TYPE: Debug - MINGW_PATH: C:\msys64\mingw64 - TOOLCHAIN: "MinGW Makefiles" - - - BUILD_TYPE: Release - MINGW_PATH: C:\msys64\mingw64 - TOOLCHAIN: "MinGW Makefiles" - - - BUILD_TYPE: Debug - TOOLCHAIN: "Visual Studio 15 2017" - - - BUILD_TYPE: Release - TOOLCHAIN: "Visual Studio 15 2017" - - - BUILD_TYPE: Debug - TOOLCHAIN: "Visual Studio 15 2017 Win64" - - - BUILD_TYPE: Release - TOOLCHAIN: "Visual Studio 15 2017 Win64" - -install: - - if "%TOOLCHAIN%"=="MinGW Makefiles" set PATH=%PATH:C:\Program Files\Git\usr\bin;=% - - if "%TOOLCHAIN%"=="MinGW Makefiles" set PATH=%MINGW_PATH%\bin;%PATH% - -build_script: - - cd c:\dev\asmjit - - md build - - cd build - - if "%TOOLCHAIN%"=="MinGW Makefiles" ( - cmake .. -G"%TOOLCHAIN%" -DCMAKE_PREFIX_PATH="%MINGW_PATH%" -DCMAKE_BUILD_TYPE="%BUILD_TYPE%" -DASMJIT_BUILD_TEST=1 && - mingw32-make - ) - else ( - cmake .. -G"%TOOLCHAIN%" -DASMJIT_BUILD_TEST=1 && - msbuild /m /nologo /v:quiet /p:Configuration=%BUILD_TYPE% asmjit.sln - ) - -test_script: - - if "%TOOLCHAIN%"=="MinGW Makefiles" ( - cd c:\dev\asmjit\build - ) - else ( - cd c:\dev\asmjit\build\%BUILD_TYPE% - ) - - asmjit_test_unit.exe - - asmjit_test_x86_asm.exe - - asmjit_test_x86_cc.exe diff --git a/.gitignore b/.gitignore index 71badb6..34bbad2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.vscode .kdev4 *.kdev4 build diff --git a/.travis.yml b/.travis.yml index dd4fbda..e3b7fda 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,57 +1,254 @@ language: cpp -os: [linux, osx] -compiler: [gcc, clang] - -addons: - apt: - packages: [cmake, gcc-multilib, g++-multilib, valgrind] - sources: [ubuntu-toolchain-r-test] +git: + depth: false env: - matrix: - - BUILD_TYPE=Debug CFLAGS=-m32 CXXFLAGS=-m32 - - BUILD_TYPE=Debug CFLAGS=-m64 CXXFLAGS=-m64 - - BUILD_TYPE=Release CFLAGS=-m32 CXXFLAGS=-m32 - - BUILD_TYPE=Release CFLAGS=-m64 CXXFLAGS=-m64 + global: + - BUILD_TOOLCHAIN="Unix Makefiles" + - MAKEFLAGS="-j2" + +dist: xenial matrix: - exclude: - - os: osx - compiler: gcc - - os: linux - compiler: clang # Clang requires standard library used by GCC 4.9+, which fails on Travis. + include: + - name: "Linux GCC 4.8 [32-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-4.8, g++-4.8-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-4.8 && CXX=g++-4.8" CXXFLAGS=-m32 LDFLAGS=-m32 -install: - - | - if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then - CMAKE_PACKAGE="https://cmake.org/files/v3.6/cmake-3.6.2-Linux-x86_64.tar.gz" - mkdir -p deps/cmake - wget --no-check-certificate --quiet -O - ${CMAKE_PACKAGE} | tar --strip-components=1 -xz -C deps/cmake - export PATH=${TRAVIS_BUILD_DIR}/deps/cmake/bin:${PATH} - else - brew update - brew outdated cmake || brew upgrade cmake - fi + - name: "Linux GCC 4.8 [64-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-4.8] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-4.8 && CXX=g++-4.8" + + - name: "Linux GCC 4.9 [32-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-4.9, g++-4.9-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-4.9 && CXX=g++-4.9" CXXFLAGS=-m32 LDFLAGS=-m32 + + - name: "Linux GCC 4.9 [64-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-4.9] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-4.9 && CXX=g++-4.9" + + - name: "Linux GCC 5.X [32-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-5, g++-5-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-5 && CXX=g++-5" CXXFLAGS=-m32 LDFLAGS=-m32 + + - name: "Linux GCC 5.X [64-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-5] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-5 && CXX=g++-5" + + - name: "Linux GCC 6.X [32-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-6, g++-6-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-6 && CXX=g++-6" CXXFLAGS=-m32 LDFLAGS=-m32 + + - name: "Linux GCC 6.X [64-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-6] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-6 && CXX=g++-6" + + - name: "Linux GCC 7.X [32-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-7, g++-7-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-7 && CXX=g++-7" CXXFLAGS=-m32 LDFLAGS=-m32 + + - name: "Linux GCC 7.X [64-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-7] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-7 && CXX=g++-7" + + - name: "Linux GCC 8.X [32-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-8, g++-8-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-8 && CXX=g++-8" CXXFLAGS=-m32 LDFLAGS=-m32 + + - name: "Linux GCC 8.X [32-bit] [REL]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-8, g++-8-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Release && CC=gcc-8 && CXX=g++-8" CXXFLAGS=-m32 LDFLAGS=-m32 + + - name: "Linux GCC 8.X [64-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-8] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-8 && CXX=g++-8" + + - name: "Linux GCC 8.X [64-bit] [REL]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-8] + env: BUILD_MATRIX="BUILD_TYPE=Release && CC=gcc-8 && CXX=g++-8" + + - name: "Linux GCC 9.X [32-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-9, g++-9-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-9 && CXX=g++-9" CXXFLAGS=-m32 LDFLAGS=-m32 + + - name: "Linux GCC 9.X [32-bit] [REL]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-9, g++-9-multilib, "linux-libc-dev:i386"] + env: BUILD_MATRIX="BUILD_TYPE=Release && CC=gcc-9 && CXX=g++-9" CXXFLAGS=-m32 LDFLAGS=-m32 + + - name: "Linux GCC 9.X [64-bit] [DBG]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-9] + env: BUILD_MATRIX="BUILD_TYPE=Debug && CC=gcc-9 && CXX=g++-9" + + - name: "Linux GCC 9.X [64-bit] [REL]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [g++-9] + env: BUILD_MATRIX="BUILD_TYPE=Release && CC=gcc-9 && CXX=g++-9" + + - name: "Linux GCC Default [64-bit] [DBG + Valgrind]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [valgrind] + env: BUILD_MATRIX="BUILD_TYPE=Debug" USE_VALGRIND=1 + + - name: "Linux GCC Default [64-bit] [REL + Valgrind]" + os: linux + addons: + apt: + sources: [ubuntu-toolchain-r-test] + packages: [valgrind] + env: BUILD_MATRIX="BUILD_TYPE=Release" USE_VALGRIND=1 + + - name: "OSX Clang XCode 9.4 [32-bit] [DBG]" + os: osx + osx_image: xcode9.4 + env: BUILD_MATRIX="BUILD_TYPE=Debug" CXXFLAGS=-m32 && LDFLAGS=-m32 + + - name: "OSX Clang XCode 9.4 [32-bit] [REL]" + os: osx + osx_image: xcode9.4 + env: BUILD_MATRIX="BUILD_TYPE=Release" CXXFLAGS=-m32 && LDFLAGS=-m32 + + - name: "OSX Clang XCode 9.4 [64-bit] [DBG]" + os: osx + osx_image: xcode9.4 + env: BUILD_MATRIX="BUILD_TYPE=Debug" + + - name: "OSX Clang XCode 9.4 [64-bit] [REL]" + os: osx + osx_image: xcode9.4 + env: BUILD_MATRIX="BUILD_TYPE=Release" + + - name: "OSX Clang XCode 10.2 [64-bit] [DBG]" + os: osx + osx_image: xcode10.2 + env: BUILD_MATRIX="BUILD_TYPE=Debug" + + - name: "OSX Clang XCode 10.2 [64-bit] [REL]" + os: osx + osx_image: xcode10.2 + env: BUILD_MATRIX="BUILD_TYPE=Release" + + - name: "Windows VS2017 [32-bit] [DBG]" + os: windows + env: BUILD_MATRIX="BUILD_TYPE=Debug" BUILD_TOOLCHAIN="Visual Studio 15 2017" + + - name: "Windows VS2017 [32-bit] [REL]" + os: windows + env: BUILD_MATRIX="BUILD_TYPE=Release" BUILD_TOOLCHAIN="Visual Studio 15 2017" + + - name: "Windows VS2017 [64-bit] [DBG]" + os: windows + env: BUILD_MATRIX="BUILD_TYPE=Debug" BUILD_TOOLCHAIN="Visual Studio 15 2017 Win64" + + - name: "Windows VS2017 [64-bit] [REL]" + os: windows + env: BUILD_MATRIX="BUILD_TYPE=Release" BUILD_TOOLCHAIN="Visual Studio 15 2017 Win64" + +before_install: + - eval "$BUILD_MATRIX" before_script: - mkdir build - cd build - - cmake --version - - cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE="$BUILD_TYPE" -DASMJIT_BUILD_TEST=1 + - | + if [[ "$BUILD_TOOLCHAIN" =~ ^Visual\ Studio ]]; then + cmake .. -G"${BUILD_TOOLCHAIN}" -DASMJIT_TEST=1 + else + cmake .. -G"${BUILD_TOOLCHAIN}" -DASMJIT_TEST=1 -DCMAKE_PREFIX_PATH="$MINGW_PATH" -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" + fi - cd .. script: - cd build - - make - - cd .. + - | + if [[ "$BUILD_TOOLCHAIN" =~ ^Visual\ Studio ]]; then + cmake --build . --config ${BUILD_TYPE} -- -nologo -v:quiet + cd ${BUILD_TYPE} + else + cmake --build . + fi - - ./build/asmjit_test_unit - - ./build/asmjit_test_opcode > /dev/null - - ./build/asmjit_test_x86_asm - - ./build/asmjit_test_x86_cc + - | + if [ "$USE_VALGRIND" = "1" ]; then + RUN_CMD="valgrind --leak-check=full --show-reachable=yes --track-origins=yes" + fi -after_success: - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then valgrind --leak-check=full --show-reachable=yes ./build/asmjit_test_unit; fi; - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then valgrind --leak-check=full --show-reachable=yes ./build/asmjit_test_x86_asm; fi; - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then valgrind --leak-check=full --show-reachable=yes ./build/asmjit_test_x86_cc; fi; + - eval "$RUN_CMD ./asmjit_test_unit --quick" + - eval "$RUN_CMD ./asmjit_test_opcode > /dev/null" + - eval "$RUN_CMD ./asmjit_test_x86_asm" + - eval "$RUN_CMD ./asmjit_test_x86_cc" + - eval "$RUN_CMD ./asmjit_test_x86_sections" diff --git a/BREAKING.md b/BREAKING.md deleted file mode 100644 index 2ff5710..0000000 --- a/BREAKING.md +++ /dev/null @@ -1,75 +0,0 @@ -2016-07-20 ----------- - - * Global `asmjit_cast<>` removed and introduced a more type-safe `asmjit::ptr_cast<>`, which can cast a function to `void*` (and vice-versa), but will refuse to cast a function to `void**`, for example. Just change `asmjit_cast` to `asmjit::ptr_cast` and everything should work as usual. As a consequence, the Runtime now contains a typesafe (templated) `add()` and `remove()` methods that accept a function type directly, no need to cast manually to `void*` and `void**`. If you use your own runtime rename your virtual methods from `add` to `_add` and from `release` to `_release` and enjoy the type-safe wrappers. - * Removed `Logger::Style` and `uint32_t style` parameter in Logging API. It was never used for anything so it was removed. - * There is a new `CodeEmitter` base class that defines assembler building blocks that are implemented by `Assembler` and `CodeBuilder`. `CodeCompiler` is now based on `CodeBuilder` and shares its instruction storage functionality. Most API haven't changed, just base classes and new functionality has been added. It's now possible to serialize code for further processing by using `CodeBuilder`. - * Renamed compile-time macro `ASMJIT_DISABLE_LOGGER` to `ASMJIT_DISABLE_LOGGING`. There is a new `Formatter` class which is also disabled with this option. - - * Operand API is mostly intact, omitting Var/Reg should fix most compile-time errors. There is now no difference between a register index and register id internally. If you ever used `reg.getRegIndex()` then use `reg.getId()` instead. Also renamed `isInitialized()` to `isValid()`. - * There are much more changes, but they are mostly internal and keeping most operand methods compatible. - * Added new functionality into `asmjit::x86` namespace related to operands. - * X86Xmm/X86Ymm/X86Zmm register operands now inherit from X86Vec. - * Register kind (was register class) is now part of `Reg` operand, you can get it by using `reg.getRegKind()`. - * Register class enum moved to `X86Reg`, `kX86RegClassGp` is now `X86Reg::kKindGp`. - * Register type enum moved to `X86Reg`, `kX86RegTypeXmm` is now `X86Reg::kRegXmm`. - * Register index enum moved to `X86Gp`, `kX86RegIndexAx` is now `X86Gp::kIdAx`. - * Segment index enum moved to `X86Seg`, `kX86SegFs` is now `X86Seg::kIdFs`. - * If you used `asmjit::noOperand` for any reason, change it to `Operand()`. - - * CodeBuilder and CodeCompiler now contain different prefix of their nodes to distinguish between them: - - * Rename `HLNode` to `CBNode` (CodeBuilder node). - * Rename all other `HL` to `CB`. - * Rename `X86FuncNode` to `CCFunc` (CodeCompiler function), no more arch specific prefixes here. - * Rename `X86CallNode` to `CCFuncCall` (CodeCompiler function-call), also, no more X86 prefix. - - * AsmJit now uses CodeHolder to hold code. You don't need `Runtime` anymore if you don't plan to execute the code or if you plan to relocate it yourself: - -```c++ -CodeHolder code; // Create CodeHolder (holds the code). -code.init(CodeInfo(ArchInfo::kIdX64)); // Initialize CodeHolder to hold X64 code. - -// Everything else as usual: -X86Assembler a(&code); // Create the emitter (Assembler, CodeBuilder, CodeCompiler). -``` - - * Initializing with JitRuntime involves using CodeHolder: - -```c++ -JitRuntime rt; // Create JitRuntime. - -CodeHolder code; // Create CodeHolder. -code.init(rt.getCodeInfo()); // Initialize CodeHolder to match the JitRuntime. - -X86Assembler a(&code); // Create the emitter (Assembler, CodeBuilder, CodeCompiler). -... // Generate some code. - -typedef void (*SomeFunc)(void); // Prototype of the function you generated. - -SomeFunc func; // Function pointer. -Error err = rt.add(&func, &code); // Add the generated function to the runtime. - -rt.remove(func); // Remove the generated function from the runtime. -``` - - * Merged virtual registers (known as variables or Vars) into registers themselves, making the interface simpler: - -```c++ -X86GpReg/X86GpVar merged to X86Gp -X86MmReg/X86MmVar merged to X86Mm -X86XmmReg/X86XmmVar merged to X86Xmm -X86YmmReg/X86YmmVar merged to X86Ymm -``` - - * Refactored instruction database, moved many enums related to instructions into `X86Inst`. Also some instructions were wrong (having wrong signature in Assembler and Compiler) and were fixed. - -```c++ -X86InstInfo renamed to X86Inst -kX86InstIdSomething renamed to X86Inst::kIdSomething -kX86InstOptionSomething renamed to X86Inst::kOptionSomething -kX86CondSomething renamed to X86Inst::kCondSomething -kX86CmpSomething renamed to X86Inst::kCmpSomething -kX86VCmpSomething renamed to X86Inst::kVCmpSomething -kX86PrefetchSomething renamed to X86Inst::kPrefetchSomething -``` diff --git a/CMakeLists.txt b/CMakeLists.txt index a3ab038..eb1c7eb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,11 +1,34 @@ -cmake_minimum_required(VERSION 3.1) +cmake_minimum_required(VERSION 3.8 FATAL_ERROR) + +cmake_policy(PUSH) +cmake_policy(SET CMP0063 NEW) # Honor visibility properties. + +include(CheckCXXCompilerFlag) # Don't create a project if it was already created by another CMakeLists.txt. -# This allows one library to embed another library without a project collision. -if(NOT CMAKE_PROJECT_NAME OR "${CMAKE_PROJECT_NAME}" STREQUAL "asmjit") - project(asmjit C CXX) +# This allows one library to embed another library without making a collision. +if (NOT CMAKE_PROJECT_NAME OR "${CMAKE_PROJECT_NAME}" STREQUAL "asmjit") + project(asmjit CXX) endif() +# ============================================================================= +# [AsmJit - Deprecated] +# ============================================================================= + +if (DEFINED ASMJIT_BUILD_EMBED) + message(DEPRECATION "ASMJIT_BUILD_EMBED is deprecated, use ASMJIT_EMBED") + set(ASMJIT_EMBED "${ASMJIT_BUILD_EMBED}") +endif() + +if (DEFINED ASMJIT_BUILD_STATIC) + message(DEPRECATION "ASMJIT_BUILD_STATIC is deprecated, use ASMJIT_STATIC") + set(ASMJIT_STATIC "${ASMJIT_BUILD_STATIC}") +endif() + +# ============================================================================= +# [AsmJit - Configuration] +# ============================================================================= + if (NOT DEFINED ASMJIT_EMBED) set(ASMJIT_EMBED FALSE) endif() @@ -22,224 +45,334 @@ if (NOT DEFINED ASMJIT_BUILD_X86) set(ASMJIT_BUILD_X86 FALSE) endif() -if (NOT DEFINED ASMJIT_BUILD_TEST) - set(ASMJIT_BUILD_TEST FALSE) +if (NOT DEFINED ASMJIT_TEST) + set(ASMJIT_TEST FALSE) endif() -# ============================================================================= -# [AsmJit - Configuration] -# ============================================================================= +# EMBED implies STATIC. +if (ASMJIT_EMBED AND NOT ASMJIT_STATIC) + set(ASMJIT_STATIC TRUE) +endif() set(ASMJIT_DIR "${CMAKE_CURRENT_LIST_DIR}" CACHE PATH "Location of 'asmjit'") +set(ASMJIT_TEST ${ASMJIT_TEST} CACHE BOOL "Build 'asmjit' test applications") set(ASMJIT_EMBED ${ASMJIT_EMBED} CACHE BOOL "Embed 'asmjit' library (no targets)") set(ASMJIT_STATIC ${ASMJIT_STATIC} CACHE BOOL "Build 'asmjit' library as static") -set(ASMJIT_BUILD_ARM ${ASMJIT_BUILD_ARM} CACHE BOOL "Build ARM32/ARM64 backends") -set(ASMJIT_BUILD_X86 ${ASMJIT_BUILD_X86} CACHE BOOL "Build X86/X64 backends") -set(ASMJIT_BUILD_TEST ${ASMJIT_BUILD_TEST} CACHE BOOL "Build 'asmjit_test' applications") +set(ASMJIT_SANITIZE ${ASMJIT_SANITIZE} CACHE BOOL "Build with C/C++ sanitizers enabled") +set(ASMJIT_BUILD_X86 ${ASMJIT_BUILD_X86} CACHE BOOL "Build X86 backends (X86 and X86_64)") +set(ASMJIT_BUILD_ARM ${ASMJIT_BUILD_ARM} CACHE BOOL "Build ARM backends") + +# ============================================================================= +# [AsmJit - Utilities] +# ============================================================================= + +function(asmjit_detect_cflags out) + set(out_array ${${out}}) + foreach(flag ${ARGN}) + string(REGEX REPLACE "[+]" "x" flag_signature "${flag}") + string(REGEX REPLACE "[-=:;/.\]" "_" flag_signature "${flag_signature}") + check_cxx_compiler_flag(${flag} "__CxxFlag_${flag_signature}") + if (${__CxxFlag_${flag_signature}}) + list(APPEND out_array "${flag}") + endif() + endforeach() + set(${out} "${out_array}" PARENT_SCOPE) +endfunction() + +function(asmjit_add_target target target_type src deps cflags cflags_dbg cflags_rel) + if ("${target_type}" STREQUAL "EXECUTABLE") + add_executable(${target} ${src}) + else() + add_library(${target} ${target_type} ${src}) + endif() + + target_link_libraries(${target} PRIVATE ${deps}) + + # target_link_options was added in cmake 3.13, which doesn't work for us. + # target_link_options(${target} PRIVATE ${ASMJIT_SANITIZE_FLAGS}) + foreach(link_flag ${ASMJIT_SANITIZE_FLAGS}) + set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS " ${link_flag}") + endforeach() + + set_property(TARGET ${target} PROPERTY CXX_EXTENSIONS NO) + set_property(TARGET ${target} PROPERTY CXX_VISIBILITY_PRESET hidden) + target_compile_options(${target} PRIVATE ${cflags} ${ASMJIT_SANITIZE_FLAGS} $<$:${cflags_dbg}> $<$>:${cflags_rel}>) + target_compile_features(${target} PUBLIC cxx_std_11) +endfunction() # ============================================================================= # [AsmJit - Project] # ============================================================================= -include("${ASMJIT_DIR}/CxxProject.cmake") +set(ASMJIT_INCLUDE_DIRS "${ASMJIT_DIR}/src") # Include directory is the same as source dir. +set(ASMJIT_DEPS "") # AsmJit dependencies (libraries) for the linker. +set(ASMJIT_LIBS "") # Dependencies of libs/apps that want to use AsmJit. +set(ASMJIT_CFLAGS "") # Public compiler flags. +set(ASMJIT_PRIVATE_CFLAGS "") # Private compiler flags independent of build type. +set(ASMJIT_PRIVATE_CFLAGS_DBG "") # Private compiler flags used by debug builds. +set(ASMJIT_PRIVATE_CFLAGS_REL "") # Private compiler flags used by release builds. +set(ASMJIT_SANITIZE_FLAGS "") # Sanitizer flags used as compile and link flags. -cxx_project(asmjit) -cxx_detect_standard(ASMJIT_PRIVATE_CFLAGS) +# TODO: Backward compatibility. +set(ASMJIT_INCLUDE_DIR "${ASMJIT_INCLUDE_DIRS}") -if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - set(ASMJIT_PRIVATE_LFLAGS "/OPT:REF /OPT:ICF") +if (NOT ASMJIT_NO_CUSTOM_FLAGS) + if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC" OR "x${CMAKE_CXX_SIMULATE_ID}" STREQUAL "xMSVC") + list(APPEND ASMJIT_PRIVATE_CFLAGS + -MP # [+] Multi-Process Compilation. + -GR- # [-] Runtime type information. + -GF # [+] Eliminate duplicate strings. + -Zc:inline # [+] Remove unreferenced COMDAT. + -Zc:strictStrings # [+] Strict const qualification of string literals. + -Zc:threadSafeInit- # [-] Thread-safe statics. + -W4) # [+] Warning level 4. - list(APPEND ASMJIT_PRIVATE_CFLAGS /GF) - list(APPEND ASMJIT_PRIVATE_CFLAGS_DBG /GS /GR-) - list(APPEND ASMJIT_PRIVATE_CFLAGS_REL /Oi /Oy /GS- /GR-) + list(APPEND ASMJIT_PRIVATE_CFLAGS_DBG + -GS) # [+] Buffer security-check. - # Enable multi-process compilation. - if(NOT MSVC60 AND NOT MSVC70 AND NOT MSVC71) - list(APPEND ASMJIT_PRIVATE_CFLAGS /MP) + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL + -GS- # [-] Buffer security-check. + -O2 # [+] Favor speed over size. + -Oi) # [+] Generate intrinsic functions. + elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(GNU|Clang|AppleClang)$") + list(APPEND ASMJIT_PRIVATE_CFLAGS -Wall -Wextra) + list(APPEND ASMJIT_PRIVATE_CFLAGS -fno-math-errno -fno-threadsafe-statics) + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -O2) + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS_REL -fmerge-all-constants) endif() endif() -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(GNU|Clang)$") - cxx_detect_cflags(ASMJIT_PRIVATE_CFLAGS - "-fno-tree-vectorize" - "-fvisibility=hidden" - "-Winconsistent-missing-override") - cxx_detect_cflags(ASMJIT_PRIVATE_CFLAGS_REL - "-O2" # CMake by default uses -O3, which does nothing useful. - "-fno-keep-static-consts" - "-fmerge-all-constants") +if (ASMJIT_SANITIZE AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "^(GNU|Clang|AppleClang)$") + asmjit_detect_cflags(ASMJIT_SANITIZE_FLAGS "-fsanitize=undefined") endif() -if(WIN32) - list(APPEND ASMJIT_PRIVATE_CFLAGS "${CXX_DEFINE}_UNICODE") -else() +if (NOT WIN32) list(APPEND ASMJIT_DEPS pthread) endif() -if("${CMAKE_SYSTEM_NAME}" MATCHES "Linux") +if ("${CMAKE_SYSTEM_NAME}" MATCHES "Linux") list(APPEND ASMJIT_DEPS rt) endif() set(ASMJIT_LIBS ${ASMJIT_DEPS}) -if(NOT ASMJIT_EMBED) +if (NOT ASMJIT_EMBED) list(INSERT ASMJIT_LIBS 0 asmjit) endif() -foreach(BUILD_OPTION - ASMJIT_BUILD_ARM - ASMJIT_BUILD_X86 - ASMJIT_DISABLE_BUILDER - ASMJIT_DISABLE_COMPILER - ASMJIT_DISABLE_TEXT - ASMJIT_DISABLE_LOGGING - ASMJIT_DISABLE_VALIDATION) - if(${BUILD_OPTION}) - List(APPEND ASMJIT_CFLAGS "${CXX_DEFINE}${BUILD_OPTION}") - List(APPEND ASMJIT_PRIVATE_CFLAGS "${CXX_DEFINE}${BUILD_OPTION}") +if (ASMJIT_EMBED) + set(ASMJIT_TARGET_TYPE "EMBED") +elseif (ASMJIT_STATIC) + set(ASMJIT_TARGET_TYPE "STATIC") +else() + set(ASMJIT_TARGET_TYPE "SHARED") +endif() + +foreach(build_option ASMJIT_STATIC + ASMJIT_BUILD_X86 + #ASMJIT_BUILD_ARM + ASMJIT_BUILD_A64 + ASMJIT_NO_JIT + ASMJIT_NO_LOGGING + ASMJIT_NO_BUILDER + ASMJIT_NO_COMPILER + ASMJIT_NO_TEXT + ASMJIT_NO_VALIDATION + ASMJIT_NO_INTROSPECTION) + if (${build_option}) + List(APPEND ASMJIT_CFLAGS "-D${build_option}") + List(APPEND ASMJIT_PRIVATE_CFLAGS "-D${build_option}") endif() endforeach() -cxx_project_info(asmjit) - # ============================================================================= # [AsmJit - Source] # ============================================================================= +set(ASMJIT_SRC_LIST + asmjit/asmjit.h + + asmjit/core.h + asmjit/core/build.h + asmjit/core/arch.cpp + asmjit/core/arch.h + asmjit/core/assembler.cpp + asmjit/core/assembler.h + asmjit/core/builder.cpp + asmjit/core/builder.h + asmjit/core/callconv.cpp + asmjit/core/callconv.h + asmjit/core/codebufferwriter_p.h + asmjit/core/codeholder.cpp + asmjit/core/codeholder.h + asmjit/core/compiler.cpp + asmjit/core/compiler.h + asmjit/core/constpool.cpp + asmjit/core/constpool.h + asmjit/core/cpuinfo.cpp + asmjit/core/cpuinfo.h + asmjit/core/datatypes.h + asmjit/core/emitter.cpp + asmjit/core/emitter.h + asmjit/core/features.h + asmjit/core/func.cpp + asmjit/core/func.h + asmjit/core/globals.cpp + asmjit/core/globals.h + asmjit/core/inst.cpp + asmjit/core/inst.h + asmjit/core/jitallocator.cpp + asmjit/core/jitallocator.h + asmjit/core/jitruntime.cpp + asmjit/core/jitruntime.h + asmjit/core/logging.cpp + asmjit/core/logging.h + asmjit/core/misc_p.h + asmjit/core/operand.cpp + asmjit/core/operand.h + asmjit/core/osutils.cpp + asmjit/core/osutils.h + asmjit/core/raassignment_p.h + asmjit/core/rabuilders_p.h + asmjit/core/radefs_p.h + asmjit/core/ralocal.cpp + asmjit/core/ralocal_p.h + asmjit/core/rapass.cpp + asmjit/core/rapass_p.h + asmjit/core/rastack.cpp + asmjit/core/rastack_p.h + asmjit/core/string.cpp + asmjit/core/string.h + asmjit/core/support.cpp + asmjit/core/support.h + asmjit/core/target.cpp + asmjit/core/target.h + asmjit/core/type.cpp + asmjit/core/type.h + asmjit/core/virtmem.cpp + asmjit/core/virtmem.h + asmjit/core/zone.cpp + asmjit/core/zone.h + asmjit/core/zonehash.cpp + asmjit/core/zonehash.h + asmjit/core/zonelist.cpp + asmjit/core/zonelist.h + asmjit/core/zonestack.cpp + asmjit/core/zonestack.h + asmjit/core/zonestring.h + asmjit/core/zonetree.cpp + asmjit/core/zonetree.h + asmjit/core/zonevector.cpp + asmjit/core/zonevector.h + + asmjit/x86.h + asmjit/x86/x86assembler.cpp + asmjit/x86/x86assembler.h + asmjit/x86/x86builder.cpp + asmjit/x86/x86builder.h + asmjit/x86/x86callconv.cpp + asmjit/x86/x86callconv_p.h + asmjit/x86/x86compiler.cpp + asmjit/x86/x86compiler.h + asmjit/x86/x86emitter.h + asmjit/x86/x86features.cpp + asmjit/x86/x86features.h + asmjit/x86/x86globals.h + asmjit/x86/x86internal.cpp + asmjit/x86/x86internal_p.h + asmjit/x86/x86instdb.cpp + asmjit/x86/x86instdb.h + asmjit/x86/x86instdb_p.h + asmjit/x86/x86instapi.cpp + asmjit/x86/x86instapi_p.h + asmjit/x86/x86logging.cpp + asmjit/x86/x86logging_p.h + asmjit/x86/x86operand.cpp + asmjit/x86/x86operand.h + asmjit/x86/x86rapass.cpp + asmjit/x86/x86rapass_p.h +) + +#if (MSVC) +# list(APPEND ASMJIT_SRC_LIST asmjit.natvis) +#endif() + set(ASMJIT_SRC "") +foreach(_src_file ${ASMJIT_SRC_LIST}) + list(APPEND ASMJIT_SRC "${ASMJIT_DIR}/src/${_src_file}") +endforeach() +source_group(TREE "${ASMJIT_DIR}" FILES ${ASMJIT_SRC}) -cxx_add_source(asmjit ASMJIT_SRC asmjit - asmjit.h - asmjit_apibegin.h - asmjit_apiend.h - asmjit_build.h - base.h - arm.h - x86.h -) +# ============================================================================= +# [AsmJit - Summary] +# ============================================================================= -cxx_add_source(asmjit ASMJIT_SRC asmjit/base - arch.cpp - arch.h - assembler.cpp - assembler.h - codebuilder.cpp - codebuilder.h - codecompiler.cpp - codecompiler.h - codeemitter.cpp - codeemitter.h - codeholder.cpp - codeholder.h - constpool.cpp - constpool.h - cpuinfo.cpp - cpuinfo.h - func.cpp - func.h - globals.cpp - globals.h - inst.cpp - inst.h - logging.cpp - logging.h - misc_p.h - operand.cpp - operand.h - osutils.cpp - osutils.h - regalloc.cpp - regalloc_p.h - runtime.cpp - runtime.h - simdtypes.h - string.cpp - string.h - utils.cpp - utils.h - vmem.cpp - vmem.h - zone.cpp - zone.h -) - -if(0) -cxx_add_source(asmjit ASMJIT_SRC asmjit/arm - armassembler.cpp - armassembler.h - arminst.cpp - arminst.h - armoperand.cpp - armoperand_regs.cpp - armoperand.h -) -endif() - -cxx_add_source(asmjit ASMJIT_SRC asmjit/x86 - x86assembler.cpp - x86assembler.h - x86builder.cpp - x86builder.h - x86compiler.cpp - x86compiler.h - x86emitter.h - x86globals.h - x86internal.cpp - x86internal_p.h - x86inst.cpp - x86inst.h - x86instimpl.cpp - x86instimpl_p.h - x86logging.cpp - x86logging_p.h - x86misc.h - x86operand.cpp - x86operand_regs.cpp - x86operand.h - x86regalloc.cpp - x86regalloc_p.h -) +message("** AsmJit Summary **") +message(" ASMJIT_DIR=${ASMJIT_DIR}") +message(" ASMJIT_TEST=${ASMJIT_TEST}") +message(" ASMJIT_TARGET_TYPE=${ASMJIT_TARGET_TYPE}") +message(" ASMJIT_DEPS=${ASMJIT_DEPS}") +message(" ASMJIT_LIBS=${ASMJIT_LIBS}") +message(" ASMJIT_CFLAGS=${ASMJIT_CFLAGS}") +message(" ASMJIT_PRIVATE_CFLAGS=${ASMJIT_PRIVATE_CFLAGS}") +message(" ASMJIT_PRIVATE_CFLAGS_DBG=${ASMJIT_PRIVATE_CFLAGS_DBG}") +message(" ASMJIT_PRIVATE_CFLAGS_REL=${ASMJIT_PRIVATE_CFLAGS_REL}") # ============================================================================= # [AsmJit - Targets] # ============================================================================= -if(NOT ASMJIT_EMBED) - # Add `asmjit` library. - cxx_add_library(asmjit asmjit +if (NOT ASMJIT_EMBED) + # Add 'asmjit' target. + asmjit_add_target(asmjit "${ASMJIT_TARGET_TYPE}" "${ASMJIT_SRC}" "${ASMJIT_DEPS}" "${ASMJIT_PRIVATE_CFLAGS}" "${ASMJIT_PRIVATE_CFLAGS_DBG}" "${ASMJIT_PRIVATE_CFLAGS_REL}") + target_include_directories(asmjit BEFORE INTERFACE ${ASMJIT_INCLUDE_DIRS}) + target_compile_options(asmjit INTERFACE ${ASMJIT_CFLAGS}) - foreach(_src_file ${ASMJIT_SRC}) - get_filename_component(_src_dir ${_src_file} PATH) - get_filename_component(_src_name ${_src_file} NAME) - string(REGEX REPLACE "^${ASMJIT_SOURCE_DIR}/" "" targetpath "${_src_dir}") - if("${_src_name}" MATCHES ".h$") - if(NOT "${_src_name}" MATCHES "_p.h$") - install(FILES ${_src_file} DESTINATION "include/${targetpath}") - endif() + # Add AsmJit::AsmJit target (alias to asmjit). + add_library(AsmJit::AsmJit ALIAS asmjit) + + # Install 'asmjit' target (shared or static). + install(TARGETS asmjit RUNTIME DESTINATION "bin" + LIBRARY DESTINATION "lib${LIB_SUFFIX}" + ARCHIVE DESTINATION "lib${LIB_SUFFIX}") + + # Install 'asmjit' header files (private headers are filtered out). + foreach(_src_file ${ASMJIT_SRC_LIST}) + if ("${_src_file}" MATCHES "\\.h$" AND NOT "${_src_file}" MATCHES "_p\\.h$") + get_filename_component(_src_dir ${_src_file} PATH) + install(FILES "${ASMJIT_DIR}/src/${_src_file}" DESTINATION "include/${_src_dir}") endif() endforeach() - # Add `asmjit` tests and samples. - if(ASMJIT_BUILD_TEST) - cxx_add_source(asmjit ASMJIT_TEST_SRC ../test asmjit_test_unit.cpp broken.cpp broken.h) - cxx_add_executable(asmjit asmjit_test_unit + # Add 'asmjit' tests. + if (ASMJIT_TEST) + set(ASMJIT_TEST_SRC test/asmjit_test_unit.cpp test/broken.cpp test/broken.h) + + asmjit_add_target(asmjit_test_unit EXECUTABLE "${ASMJIT_SRC};${ASMJIT_TEST_SRC}" "${ASMJIT_DEPS}" - "${ASMJIT_PRIVATE_CFLAGS};${CXX_DEFINE}ASMJIT_TEST;${CXX_DEFINE}ASMJIT_EMBED" + "${ASMJIT_PRIVATE_CFLAGS}" "${ASMJIT_PRIVATE_CFLAGS_DBG}" "${ASMJIT_PRIVATE_CFLAGS_REL}") + target_compile_definitions(asmjit_test_unit PRIVATE ASMJIT_TEST ASMJIT_STATIC) - foreach(_target asmjit_bench_x86 asmjit_test_opcode asmjit_test_x86_asm asmjit_test_x86_cc) - cxx_add_executable(asmjit ${_target} + foreach(_target asmjit_bench_x86 + asmjit_test_opcode + asmjit_test_x86_asm + asmjit_test_x86_cc + asmjit_test_x86_sections) + asmjit_add_target(${_target} EXECUTABLE "test/${_target}.cpp" "${ASMJIT_LIBS}" - "${ASMJIT_CFLAGS}" + "${ASMJIT_PRIVATE_CFLAGS}" "${ASMJIT_PRIVATE_CFLAGS_DBG}" "${ASMJIT_PRIVATE_CFLAGS_REL}") endforeach() endif() endif() + +cmake_policy(POP) diff --git a/CxxProject.cmake b/CxxProject.cmake deleted file mode 100644 index 7dab27d..0000000 --- a/CxxProject.cmake +++ /dev/null @@ -1,335 +0,0 @@ -# CxxProject 1.0.0 -# ---------------- - -if (NOT __CXX_INCLUDED) - set(__CXX_INCLUDED TRUE) - include(CheckCXXCompilerFlag) - - # --------------------------------------------------------------------------- - # C++ COMPILER SUPPORT: - # - # * cxx_detect_cflags(out, ...) - # * cxx_detect_standard(out) - # --------------------------------------------------------------------------- - function(cxx_detect_cflags out) - set(out_array ${${out}}) - - foreach(flag ${ARGN}) - string(REGEX REPLACE "[-=:;/.\+]" "_" flag_signature "${flag}") - check_cxx_compiler_flag(${flag} "__CxxFlag_${flag_signature}") - if(${__CxxFlag_${flag_signature}}) - list(APPEND out_array "${flag}") - endif() - endforeach() - - set(${out} "${out_array}" PARENT_SCOPE) - endfunction() - - function(cxx_detect_standard out) - set(out_array) - - if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - cxx_detect_cflags(out_array "/std:c++latest" "/std:c++14") - else() - cxx_detect_cflags(out_array "-std=c++17" "-std=c++14" "-std=c++11" "-std=c++0x") - endif() - - # Keep only the first flag detected, which keeps the highest version supported. - if(out_array) - list(GET out_array 0 out_array) - endif() - - set(out_array ${${out}} ${out_array}) - set(${out} "${out_array}" PARENT_SCOPE) - endfunction() - - function(cxx_print_cflags cflags_any cflags_dbg cflags_rel) - foreach(flag ${cflags_any}) - message(" ${flag}") - endforeach() - foreach(flag ${cflags_dbg}) - message(" ${flag} [DEBUG]") - endforeach() - foreach(flag ${cflags_rel}) - message(" ${flag} [RELEASE]") - endforeach() - endfunction() - - # ----------------------------------------------------------------------------- - # This part detects the c++ compiler and fills basic CXX_... variables to make - # integration with that compiler easier. It provides the most common flags in - # a cross-platform way. - # ----------------------------------------------------------------------------- - set(CXX_DEFINE "-D") # Define a preprocessor macro: "${CXX_DEFINE}VAR=1" - set(CXX_INCLUDE "-I") # Define an include directory: "${CXX_INCLUDE}PATH" - - set(CXX_CFLAGS_SSE "") # Compiler flags to build a file that uses SSE intrinsics. - set(CXX_CFLAGS_SSE2 "") # Compiler flags to build a file that uses SSE2 intrinsics. - set(CXX_CFLAGS_SSE3 "") # Compiler flags to build a file that uses SSE3 intrinsics. - set(CXX_CFLAGS_SSSE3 "") # Compiler flags to build a file that uses SSSE3 intrinsics. - set(CXX_CFLAGS_SSE4_1 "") # Compiler flags to build a file that uses SSE4.1 intrinsics. - set(CXX_CFLAGS_SSE4_2 "") # Compiler flags to build a file that uses SSE4.2 intrinsics. - set(CXX_CFLAGS_AVX "") # Compiler flags to build a file that uses AVX intrinsics. - set(CXX_CFLAGS_AVX2 "") # Compiler flags to build a file that uses AVX2 intrinsics. - - if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - set(CXX_DEFINE "/D") - set(CXX_INCLUDE "/I") - - # 64-bit MSVC compiler doesn't like /arch:SSE[2] as it's implicit. - if(NOT CMAKE_CL_64) - list(APPEND CXX_CFLAGS_SSE "/arch:SSE") - list(APPEND CXX_CFLAGS_SSE2 "/arch:SSE2") - list(APPEND CXX_CFLAGS_SSE3 "/arch:SSE2") - list(APPEND CXX_CFLAGS_SSSE3 "/arch:SSE2") - list(APPEND CXX_CFLAGS_SSE4_1 "/arch:SSE2") - list(APPEND CXX_CFLAGS_SSE4_2 "/arch:SSE2") - endif() - - # MSVC doesn't provide any preprocessor definitions to detect SSE3+, - # these unify MSVC with definitions defined by Intel|Clang|GCC. - list(APPEND CXX_CFLAGS_SSE "${CXX_DEFINE}__SSE__") - list(APPEND CXX_CFLAGS_SSE2 "${CXX_DEFINE}__SSE2__") - list(APPEND CXX_CFLAGS_SSE3 "${CXX_DEFINE}__SSE3__") - list(APPEND CXX_CFLAGS_SSSE3 "${CXX_DEFINE}__SSSE3__") - list(APPEND CXX_CFLAGS_SSE4_1 "${CXX_DEFINE}__SSE4_1__") - list(APPEND CXX_CFLAGS_SSE4_2 "${CXX_DEFINE}__SSE4_2__") - - # AVX/AVX2 doesn't need custom defs as MSVC does define __AVX[2]__ by itself. - cxx_detect_cflags(CXX_CFLAGS_AVX "/arch:AVX") - cxx_detect_cflags(CXX_CFLAGS_AVX2 "/arch:AVX2") - elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel" AND WIN32) - # Intel on Windows uses CL syntax. - set(CXX_DEFINE "/D") - set(CXX_INCLUDE "/I") - - # Intel deprecated /arch:SSE, so it's implicit. In contrast to MSVC, Intel - # also provides /arch:SSE3+ options and uses the same definitions as GCC - # and Clang, so no magic needed here. - cxx_detect_cflags(CXX_CFLAGS_SSE2 "/arch:SSE2") - cxx_detect_cflags(CXX_CFLAGS_SSE3 "/arch:SSE3") - cxx_detect_cflags(CXX_CFLAGS_SSSE3 "/arch:SSSE3") - cxx_detect_cflags(CXX_CFLAGS_SSE4_1 "/arch:SSE4.1") - cxx_detect_cflags(CXX_CFLAGS_SSE4_2 "/arch:SSE4.2") - cxx_detect_cflags(CXX_CFLAGS_AVX "/arch:AVX") - cxx_detect_cflags(CXX_CFLAGS_AVX2 "/arch:AVX2") - else() - cxx_detect_cflags(CXX_CFLAGS_SSE "-msse") - cxx_detect_cflags(CXX_CFLAGS_SSE2 "-msse2") - cxx_detect_cflags(CXX_CFLAGS_SSE3 "-msse3") - cxx_detect_cflags(CXX_CFLAGS_SSSE3 "-mssse3") - cxx_detect_cflags(CXX_CFLAGS_SSE4_1 "-msse4.1") - cxx_detect_cflags(CXX_CFLAGS_SSE4_2 "-msse4.2") - cxx_detect_cflags(CXX_CFLAGS_AVX "-mavx") - cxx_detect_cflags(CXX_CFLAGS_AVX2 "-mavx2") - endif() - - # --------------------------------------------------------------------------- - # Function - # cxx_project(product) - # - # Create a master project or embed other project in a master project. - # --------------------------------------------------------------------------- - function(cxx_project product) - string(TOUPPER "${product}" PRODUCT) - - set(MODE_EMBED ${${PRODUCT}_EMBED}) - set(MODE_STATIC ${${PRODUCT}_STATIC}) - - # EMBED implies STATIC. - if(MODE_EMBED) - set(MODE_STATIC TRUE) - set(${PRODUCT}_STATIC TRUE PARENT_SCOPE) - endif() - - # Deduce source and include directories. By default CxxProject assumes that - # both source and include files are located at './src'. - set(SOURCE_DIR "${${PRODUCT}_SOURCE_DIR}") - set(INCLUDE_DIR "${${PRODUCT}_INCLUDE_DIR}") - - if(NOT SOURCE_DIR) - set(SOURCE_DIR "${${PRODUCT}_DIR}/src") - set(${PRODUCT}_SOURCE_DIR "${SOURCE_DIR}" PARENT_SCOPE) - endif() - - if(NOT INCLUDE_DIR) - set(INCLUDE_DIR "${SOURCE_DIR}") - set(${PRODUCT}_INCLUDE_DIR "${INCLUDE_DIR}" PARENT_SCOPE) - endif() - - set(DEPS "") # Dependencies (list of libraries) for the linker. - set(LIBS "") # Dependencies with project included, for consumers. - set(CFLAGS "") # Public compiler flags. - set(PRIVATE_CFLAGS "") # Private compiler flags independent of build type. - set(PRIVATE_CFLAGS_DBG "") # Private compiler flags used by debug builds. - set(PRIVATE_CFLAGS_REL "") # Private compiler flags used by release builds. - set(PRIVATE_LFLAGS "") # Private linker flags. - - if(MODE_EMBED) - list(APPEND CFLAGS "${CXX_DEFINE}${PRODUCT}_EMBED") - list(APPEND PRIVATE_CFLAGS "${CXX_DEFINE}${PRODUCT}_EMBED") - endif() - - if(MODE_STATIC) - list(APPEND CFLAGS "${CXX_DEFINE}${PRODUCT}_STATIC") - list(APPEND PRIVATE_CFLAGS "${CXX_DEFINE}${PRODUCT}_STATIC") - endif() - - # PUBLIC properties - usable by third parties. - set(${PRODUCT}_DEPS "${DEPS}" PARENT_SCOPE) - set(${PRODUCT}_LIBS "${LIBS}" PARENT_SCOPE) - set(${PRODUCT}_CFLAGS "${CFLAGS}" PARENT_SCOPE) - - # PRIVATE properties - only used during build. - set(${PRODUCT}_PRIVATE_CFLAGS "${PRIVATE_CFLAGS}" PARENT_SCOPE) - set(${PRODUCT}_PRIVATE_CFLAGS_DBG "${PRIVATE_CFLAGS_DBG}" PARENT_SCOPE) - set(${PRODUCT}_PRIVATE_CFLAGS_REL "${PRIVATE_CFLAGS_REL}" PARENT_SCOPE) - set(${PRODUCT}_PRIVATE_LFLAGS "${PRIVATE_LFLAGS}" PARENT_SCOPE) - endfunction() - - function(cxx_project_info product) - string(TOUPPER "${product}" PRODUCT) - - set(BUILD_MODE "") - set(BUILD_TEST "") - - if(${PRODUCT}_EMBED) - set(BUILD_MODE "Embed") - elseif(${PRODUCT}_STATIC) - set(BUILD_MODE "Static") - else() - set(BUILD_MODE "Shared") - endif() - - if(${PRODUCT}_BUILD_TEST) - set(BUILD_TEST "On") - else() - set(BUILD_TEST "Off") - endif() - - message("-- [${product}]") - message(" BuildMode=${BUILD_MODE}") - message(" BuildTest=${BUILD_TEST}") - message(" ${PRODUCT}_DIR=${${PRODUCT}_DIR}") - message(" ${PRODUCT}_DEPS=${${PRODUCT}_DEPS}") - message(" ${PRODUCT}_LIBS=${${PRODUCT}_LIBS}") - message(" ${PRODUCT}_CFLAGS=${${PRODUCT}_CFLAGS}") - message(" ${PRODUCT}_SOURCE_DIR=${${PRODUCT}_SOURCE_DIR}") - message(" ${PRODUCT}_INCLUDE_DIR=${${PRODUCT}_INCLUDE_DIR}") - message(" ${PRODUCT}_PRIVATE_CFLAGS=") - cxx_print_cflags( - "${${PRODUCT}_PRIVATE_CFLAGS}" - "${${PRODUCT}_PRIVATE_CFLAGS_DBG}" - "${${PRODUCT}_PRIVATE_CFLAGS_REL}") - endfunction() - - function(cxx_add_source product out src_dir) - string(TOUPPER "${product}" PRODUCT) - - set(src_path "${${PRODUCT}_SOURCE_DIR}/${src_dir}") - set(src_array) - - foreach(file ${ARGN}) - set(src_file "${src_path}/${file}") - set(src_cflags "") - - if(file MATCHES "\\.c|\\.cc|\\.cxx|\\.cpp|\\.m|\\.mm") - if(file MATCHES "_sse\\." AND NOT "${CXX_CFLAGS_SSE}" STREQUAL "") - list(APPEND src_cflags ${CXX_CFLAGS_SSE}) - endif() - if(file MATCHES "_sse2\\." AND NOT "${CXX_CFLAGS_SSE2}" STREQUAL "") - list(APPEND src_cflags ${CXX_CFLAGS_SSE2}) - endif() - if(file MATCHES "_sse3\\." AND NOT "${CXX_CFLAGS_SSE3}" STREQUAL "") - list(APPEND src_cflags ${CXX_CFLAGS_SSE3}) - endif() - if(file MATCHES "_ssse3\\." AND NOT "${CXX_CFLAGS_SSSE3}" STREQUAL "") - list(APPEND src_cflags ${CXX_CFLAGS_SSSE3}) - endif() - if(file MATCHES "_sse4_1\\." AND NOT "${CXX_CFLAGS_SSE4_1}" STREQUAL "") - list(APPEND src_cflags ${CXX_CFLAGS_SSE4_1}) - endif() - if(file MATCHES "_sse4_2\\." AND NOT "${CXX_CFLAGS_SSE4_2}" STREQUAL "") - list(APPEND src_cflags ${CXX_CFLAGS_SSE4_2}) - endif() - if(file MATCHES "_avx\\." AND NOT "${CXX_CFLAGS_AVX}" STREQUAL "") - list(APPEND src_cflags ${CXX_CFLAGS_AVX}) - endif() - if(file MATCHES "_avx2\\." AND NOT "${CXX_CFLAGS_AVX2}" STREQUAL "") - list(APPEND src_cflags ${CXX_CFLAGS_AVX2}) - endif() - - # HACK: Setting `COMPILE_FLAGS` property cannot be used when your input - # is LIST, even when you use `VALUE1 VALUE2 ...` as cmake would insert - # escaped semicolons instead of spaces. So let's make it the cmake way: - # - nonituitive, verbose, and idiotic. - if(NOT "${src_cflags}" STREQUAL "") - foreach(src_cflag ${src_cflags}) - set_property(SOURCE "${src_file}" APPEND_STRING PROPERTY COMPILE_FLAGS " ${src_cflag}") - endforeach() - endif() - endif() - list(APPEND src_array ${src_file}) - endforeach() - source_group(${src_dir} FILES ${src_array}) - - set(out_tmp ${${out}}) - list(APPEND out_tmp ${src_array}) - set("${out}" "${out_tmp}" PARENT_SCOPE) - endfunction() - - function(cxx_add_library product target src deps cflags cflags_dbg cflags_rel) - string(TOUPPER "${product}" PRODUCT) - - if(NOT ${PRODUCT}_STATIC) - add_library(${target} SHARED ${src}) - else() - add_library(${target} STATIC ${src}) - endif() - - target_link_libraries(${target} ${deps}) - if (NOT "${${PRODUCT}_PRIVATE_LFLAGS}" STREQUAL "") - set_target_properties(${target} PROPERTIES LINK_FLAGS "${${PRODUCT}_PRIVATE_LFLAGS}") - endif() - - if(CMAKE_BUILD_TYPE) - if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") - target_compile_options(${target} PRIVATE ${cflags} ${cflags_dbg}) - else() - target_compile_options(${target} PRIVATE ${cflags} ${cflags_rel}) - endif() - else() - target_compile_options(${target} PRIVATE ${cflags} $<$:${cflags_dbg}> $<$>:${cflags_rel}>) - endif() - - if(NOT ${PRODUCT}_EMBED) - install(TARGETS ${target} RUNTIME DESTINATION "bin" - LIBRARY DESTINATION "lib${LIB_SUFFIX}" - ARCHIVE DESTINATION "lib${LIB_SUFFIX}") - endif() - endfunction() - - function(cxx_add_executable product target src deps cflags cflags_dbg cflags_rel) - string(TOUPPER "${product}" PRODUCT) - add_executable(${target} ${src}) - - target_link_libraries(${target} ${deps}) - if (NOT "${${PRODUCT}_PRIVATE_LFLAGS}" STREQUAL "") - set_target_properties(${target} PROPERTIES LINK_FLAGS "${${PRODUCT}_PRIVATE_LFLAGS}") - endif() - - if(CMAKE_BUILD_TYPE) - if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") - target_compile_options(${target} PRIVATE ${cflags} ${cflags_dbg}) - else() - target_compile_options(${target} PRIVATE ${cflags} ${cflags_rel}) - endif() - else() - target_compile_options(${target} PRIVATE ${cflags} $<$:${cflags_dbg}> $<$>:${cflags_rel}>) - endif() - - if(NOT ${PRODUCT}_STATIC) - install(TARGETS ${target} DESTINATION "lib${LIB_SUFFIX}") - endif() - endfunction() -endif() diff --git a/LICENSE.md b/LICENSE.md index 6852566..054fb48 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -Copyright (c) 2008-2017, Petr Kobalicek +Copyright (c) 2008-2019, Petr Kobalicek This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages diff --git a/README.md b/README.md index 9e70cd5..67ae165 100644 --- a/README.md +++ b/README.md @@ -1,32 +1,22 @@ AsmJit ------ -Complete x86/x64 JIT and Remote Assembler for C++. +Complete x86/x64 JIT and AOT Assembler for C++. * [Official Repository (asmjit/asmjit)](https://github.com/asmjit/asmjit) * [Official Blog (asmbits)](https://asmbits.blogspot.com/ncr) * [Official Chat (gitter)](https://gitter.im/asmjit/asmjit) * [Permissive ZLIB license](./LICENSE.md) -Important ---------- - -There are now two branches new users should decide to use: - - * oldstable - old version that will be maintained until the end of 2019 - * next-wip - new version that will be merged into master in June 2019 - -At the moment the most work happens in `next-wip` branch, which should be now used by new projects (and existing projects should migrate as soon as possible). Some changes in next-wip branch are not API compatible with master branch, for example all classes having X86 prefix were moved into `asmjit::x86` namespaces and some other classes were renamed, but almost all changes are cosmetic and transition to the new version should be straightforward. A commercial option for transitioning from old AsmJit to the current/next-wip is also available. - -The `oldstable` branch was created for users that cannot migrate at the moment and will be maintained until the end of 2019, and then deleted. So please use `next-wip` branch now and consider switching to it as soon as possible. Introduction ------------ -AsmJit is a complete JIT and remote assembler for C++ language. It can generate native code for x86 and x64 architectures and supports the whole x86/x64 instruction set - from legacy MMX to the newest AVX512. It has a type-safe API that allows C++ compiler to do semantic checks at compile-time even before the assembled code is generated and/or executed. +AsmJit is a complete JIT and AOT assembler for C++ language. It can generate native code for x86 and x64 architectures and supports the whole x86/x64 instruction set - from legacy MMX to the newest AVX512. It has a type-safe API that allows C++ compiler to do semantic checks at compile-time even before the assembled code is generated and/or executed. AsmJit, as the name implies, started as a project that provided JIT code-generation and execution. However, AsmJit evolved and it now contains features that are far beyond the scope of a simple JIT compilation. To keep the library small and lightweight the functionality not strictly related to JIT is provided by a sister project called [asmtk](https://github.com/asmjit/asmtk). + Minimal Example --------------- @@ -43,12 +33,12 @@ int main(int argc, char* argv[]) { JitRuntime rt; // Runtime specialized for JIT code execution. CodeHolder code; // Holds code and relocation information. - code.init(rt.getCodeInfo()); // Initialize to the same arch as JIT runtime. + code.init(rt.codeInfo()); // Initialize to the same arch as JIT runtime. - X86Assembler a(&code); // Create and attach X86Assembler to `code`. + x86::Assembler a(&code); // Create and attach x86::Assembler to `code`. a.mov(x86::eax, 1); // Move one to 'eax' register. a.ret(); // Return from function. - // ----> X86Assembler is no longer needed from here and can be destroyed <---- + // ----> x86::Assembler is no longer needed from here and can be destroyed <---- Func fn; Error err = rt.add(&fn, &code); // Add the generated code to the runtime. @@ -67,173 +57,183 @@ int main(int argc, char* argv[]) { } ``` + AsmJit Summary -------------- - * Complete x86/x64 instruction set - MMX, SSEx, BMIx, ADX, TBM, XOP, AVXx, FMAx, and AVX512. - * Assembler, CodeBuilder, and CodeCompiler emitters - each suitable for different tasks. + * Complete x86/x64 instruction set - MMX, SSE+, BMI+, ADX, TBM, XOP, AVX+, FMA+, and AVX512+. + * Different emitters providing various abstraction levels (Assembler, Builder, Compiler). + * Support for sections for separating code and data. * Built-in CPU vendor and features detection. - * Advanced logging/formatting and robust error handling. - * Virtual memory management similar to malloc/free for JIT code-generation and execution. - * Lightweight and embeddable - 200-250kB compiled with all built-in features. - * Modularity - unneeded features can be disabled at compile-time to make the library smaller. + * Advanced logging, formatting, and error handling. + * JIT memory allocator - interface similar to malloc/free for JIT code-generation and execution. + * Lightweight and easily embeddable - ~300kB compiled with all built-in features. + * Modular design - unneeded features can be disabled at compile-time to make the library smaller. * Zero dependencies - no external libraries, no STL/RTTI - easy to embed and/or link statically. - * Doesn't use exceptions internally, but allows to attach a "throwable" error handler (your choice). + * Doesn't use exceptions internally, but allows to attach a "throwable" error handler of your choice. + Advanced Features ----------------- * AsmJit contains a highly compressed instruction database: * Instruction names - allows to convert instruction id to its name and vice versa. - * Instruction metadata - access (read|write|rw) of all operand combinations of all instructions. + * Instruction metadata - access (read|write) of all operand combinations of all instructions. * Instruction signatures - allows to strictly validate if an instruction (with all its operands) is valid. * AsmJit allows to precisely control how instructions are encoded if there are multiple variations. * AsmJit is highly dynamic, constructing operands at runtime is a common practice. - * Multiple emitters with the same interface - emit machine code directly or to a representation that can be processed afterwards. + * Multiple emitters with the same interface - emit machine code directly or to a representation that can be post-processed. + Important --------- Breaking the official API is sometimes inevitable, what to do? - * Breaking changes are described in [BREAKING.md](./BREAKING.md) document. - * Visit our [Official Chat](https://gitter.im/asmjit/asmjit) if you need a quick help. * See asmjit tests, they always compile and provide an implementation of a lot of use-cases: - * [asmjit_test_x86_asm.cpp](./test/asmjit_test_x86_cc.cpp) - Tests that use **X86Assembler** and **X86Builder**. - * [asmjit_test_x86_cc.cpp](./test/asmjit_test_x86_cc.cpp) - Tests that use **X86Compiler**. + * [asmjit_test_x86_asm.cpp](./test/asmjit_test_x86_asm.cpp) - Tests that demonstrate the purpose of emitters. + * [asmjit_test_x86_cc.cpp](./test/asmjit_test_x86_cc.cpp) - A lot of tests targeting Compiler infrastructure. + * [asmjit_test_x86_sections.cpp](./test/asmjit_test_x86_sections.cpp) - Multiple sections test. + * Visit our [Official Chat](https://gitter.im/asmjit/asmjit) if you need a quick help. + + +TODO +---- + + * [ ] Add support for user external buffers in CodeHolder. -TODOs: - * [ ] AsmJit added support for code sections, but only the first section (executable code) works atm. - * [ ] AsmJit supports AVX512, but {sae} and {er} are not handled properly yet. - * [ ] AsmJit next-wip branch implements a brand-new register allocator (and contains reworked CodeBuilder and CodeCompiler), but it's not complete yet. Supported Environments ---------------------- ### C++ Compilers: - * Tested - * **Clang** - tested by Travis-CI. - * **GCC** - tested by Travis-CI. - * **MinGW** - tested by AppVeyor. - * **MSVC** - tested by AppVeyor. - * Maybe - * **CodeGear** - no maintainers. - * **Intel** - no maintainers. - * Other c++ compilers would require some testing and support in [asmjit_build.h](./src/asmjit/asmjit_build.h). + * Requirements: + * AsmJit won't build without C++11 enabled. If you use older GCC or Clang you would have to enable at least c++11 through compiler flags. + * Tested: + * **Clang** - tested by Travis-CI - Clang 3.9+ (with C++11 enabled) is officially supported (older Clang versions having C++11 support are probably fine, but are not regularly tested). + * **GNU** - tested by Travis-CI - GCC 4.8+ (with C++11 enabled) is officially supported. + * **MINGW** - tested by Travis-CI - Use the latest version, if possible. + * **MSVC** - tested by Travis-CI - **MSVC2017+ only!** - there is a severe bug in MSVC2015's `constexpr` implementation that makes that compiler unusable. + * Untested: + * **Intel** - no maintainers and no CI environment to regularly test this compiler. + * Other c++ compilers would require basic support in [core/build.h](./src/asmjit/core/build.h). ### Operating Systems: - * Tested - * **Linux** - tested by Travis-CI. - * **Mac** - tested by Travis-CI. - * **Windows** - tested by AppVeyor. - * Maybe - * **BSDs** - no maintainers. - * Other operating systems would require some testing and support in [asmjit_build.h](./src/asmjit/asmjit_build.h) and [osutils.cpp](./src/asmjit/base/osutils.cpp). + * Tested: + * **Linux** - tested by Travis-CI - any distribution is generally supported. + * **OSX** - tested by Travis-CI - any version is supported. + * **Windows** - tested by Travis-CI - Windows 7+ is officially supported. + * Untested: + * **BSDs** - no maintainers, no CI environment to regularly test these OSes. + * **Haiku** - not regularly tested, but reported to work. + * Other operating systems would require some testing and support in [core/build.h](./src/asmjit/core/build.h), [core/osutils.cpp](./src/asmjit/core/osutils.cpp), and [core/virtmem.cpp](./src/asmjit/core/virtmem.cpp). ### Backends: - * **X86** - tested by both Travis-CI and AppVeyor. - * **X64** - tested by both Travis-CI and AppVeyor. + * **X86** - tested by both Travis-CI - both 32-bit and 64-bit backends are fully functional. * **ARM** - work-in-progress (not public at the moment). + Project Organization -------------------- - * **`/`** - Project root - * **src** - Source code - * **asmjit** - Source code and headers (always point include path in here) - * **base** - Backend independent API - * **arm** - ARM specific API, used only by ARM32 and ARM64 backends - * **x86** - X86 specific API, used only by X86 and X64 backends - * **test** - Unit and integration tests (don't embed in your project) - * **tools** - Tools used for configuring, documenting and generating files + * **`/`** - Project root. + * **src** - Source code. + * **asmjit** - Source code and headers (always point include path in here). + * **core** - Core API, backend independent except relocations. + * **arm** - ARM specific API, used only by ARM and AArch64 backends. + * **x86** - X86 specific API, used only by X86 and X64 backends. + * **test** - Unit and integration tests (don't embed in your project). + * **tools** - Tools used for configuring, documenting and generating data files. -Configuring & Building ----------------------- -AsmJit is designed to be easy embeddable in any project. However, it depends on some compile-time macros that can be used to build a specific version of AsmJit that includes or excludes certain features. A typical way of building AsmJit is to use [cmake](https://www.cmake.org), but it's also possible to just include AsmJit source code in your project and just build it. The easiest way to include AsmJit in your project is to just include **src** directory in your project and to define **ASMJIT_STATIC** or **ASMJIT_EMBED**. AsmJit can be just updated from time to time without any changes to this integration process. Do not embed AsmJit's [/test](./test) files in such case as these are used for testing. +Configuring & Feature Selection +------------------------------- + +AsmJit is designed to be easy embeddable in any project. However, it depends on some compile-time macros that can be used to build a specific version of AsmJit that includes or excludes certain features. A typical way of building AsmJit is to use [cmake](https://www.cmake.org), but it's also possible to just include AsmJit source code in your project and just build it. The easiest way to include AsmJit in your project is to just include **src** directory in your project and to define `ASMJIT_STATIC`. AsmJit can be just updated from time to time without any changes to this integration process. Do not embed AsmJit's [/test](./test) files in such case as these are used for testing. ### Build Type: - * **ASMJIT_DEBUG** - Define to always turn debugging on (regardless of compile-time options detected). - * **ASMJIT_RELEASE** - Define to always turn debugging off (regardless of compile-time options detected). + * `ASMJIT_BUILD_DEBUG` - Define to always turn debugging on (regardless of compile-time options detected). + * `ASMJIT_BUILD_RELEASE` - Define to always turn debugging off (regardless of compile-time options detected). -By default none of these is defined, AsmJit detects build-type based on compile-time macros and supports most IDE and compiler settings out of box. +By default none of these is defined, AsmJit detects build-type based on compile-time macros and supports most IDE and compiler settings out of box. By default AsmJit switches to release mode when `NDEBUG` is defined. ### Build Mode: - * **ASMJIT_EMBED** - Define to embed AsmJit in another project. Embedding means that neither shared nor static library is created and AsmJit's source files and source files of the product that embeds AsmJit are part of the same target. This way of building AsmJit has certain advantages that are beyond this manual. **ASMJIT_EMBED** behaves similarly to **ASMJIT_STATIC** (no API exports). - * **ASMJIT_STATIC** - Define to build AsmJit as a static library. No symbols are exported in such case. + * `ASMJIT_STATIC` - Define to build AsmJit statically - either as a static library or as a part of another project. No symbols are exported in such case. -By default AsmJit build is configured to be built as a shared library, thus none of **ASMJIT_EMBED** and **ASMJIT_STATIC** is defined. +By default AsmJit build is configured to be built as a shared library, this means `ASMJIT_STATIC` must be explicitly enabled if you want to compile AsmJit statically. ### Build Backends: - * **ASMJIT_BUILD_ARM** - Build ARM32 and ARM64 backends (work-in-progress). - * **ASMJIT_BUILD_X86** - Build X86 and X64 backends. - * **ASMJIT_BUILD_HOST** - Build only the host backend (default). + * `ASMJIT_BUILD_ARM` - Build ARM backends (not ready, work-in-progress). + * `ASMJIT_BUILD_X86` - Build X86 backends (X86 and X86_64). + * `ASMJIT_BUILD_HOST` - Build only the host backend (default). -If none of **ASMJIT_BUILD_...** is defined AsmJit bails to **ASMJIT_BUILD_HOST**, which will detect the target architecture at compile-time. Each backend automatically supports 32-bit and 64-bit targets, so for example AsmJit with X86 support can generate both 32-bit and 64-bit code. +If none of `ASMJIT_BUILD_...` is defined AsmJit bails to `ASMJIT_BUILD_HOST`, which will detect the target architecture at compile-time. Each backend automatically supports 32-bit and 64-bit targets, so for example AsmJit with X86 support can generate both 32-bit and 64-bit code. ### Disabling Features: - * **ASMJIT_DISABLE_BUILDER** - Disables both **CodeBuilder** and **CodeCompiler** emitters (only **Assembler** will be available). Ideal for users that don't use **CodeBuilder** concept and want to create smaller AsmJit. - * **ASMJIT_DISABLE_COMPILER** - Disables **CodeCompiler** emitter. For users that use **CodeBuilder**, but not **CodeCompiler** - * **ASMJIT_DISABLE_LOGGING** - Disables logging (**Logger** and all classes that inherit it) and formatting features. - * **ASMJIT_DISABLE_TEXT** - Disables everything that uses text-representation and that causes certain strings to be stored in the resulting binary. For example when this flag is enabled all instruction and error names (and related APIs) will not be available. This flag has to be disabled together with **ASMJIT_DISABLE_LOGGING**. This option is suitable for deployment builds or builds that don't want to reveal the use of AsmJit. - * **ASMJIT_DISABLE_VALIDATION** - Disables instruction validation feature. Saves around 5kB of space when used. + * `ASMJIT_NO_BUILDER` - Disables both `Builder` and `Compiler` emitters (only `Assembler` will be available). Ideal for users that don't use `Builder` concept and want to have AsmJit a bit smaller. + * `ASMJIT_NO_COMPILER` - Disables `Compiler` emitter. For users that use `Builder`, but not `Compiler`. + * `ASMJIT_NO_JIT` - Disables JIT execution engine, which includes `JitUtils`, `JitAllocator`, and `JitRuntime`. + * `ASMJIT_NO_LOGGING` - Disables logging (`Logger` and all classes that inherit it) and instruction formatting. + * `ASMJIT_NO_TEXT` - Disables everything that uses text-representation and that causes certain strings to be stored in the resulting binary. For example when this flag is set all instruction and error names (and related APIs) will not be available. This flag has to be disabled together with `ASMJIT_NO_LOGGING`. This option is suitable for deployment builds or builds that don't want to reveal the use of AsmJit. + * `ASMJIT_NO_INST_API` - Disables instruction query features, strict validation, read/write information, and all additional data and APIs that can output information about instructions. + +NOTE: Please don't disable any features if you plan to build AsmJit as a shared library that will be used by multiple projects that you don't control (for example asmjit in a Linux distribution). The possibility to disable certain features exists mainly for customized builds of AsmJit. -NOTE: Please don't disable any features if you plan to build AsmJit as a shared library that will be used by multiple projects that you don't control (for example asmjit in a Linux distribution). The possibility to disable certain features exists mainly for static builds of AsmJit. Using AsmJit ------------ AsmJit library uses one global namespace called `asmjit` that provides the whole functionality. Architecture specific code is prefixed by the architecture name and architecture specific registers and operand builders have their own namespace. For example API targeting both X86 and X64 architectures is prefixed with `X86` and registers & operand builders are accessible through `x86` namespace. This design is very different from the initial version of AsmJit and it seems now as the most convenient one. -### CodeHolder & CodeEmitter +### CodeHolder & Emitters AsmJit provides two classes that are used together for code generation: - * **CodeHolder** - Provides functionality to hold generated code and stores all necessary information about code sections, labels, symbols, and possible relocations. - * **CodeEmitter** - Provides functionality to emit code into **CodeHolder**. **CodeEmitter** is abstract and provides just basic building blocks that are then implemented by **Assembler**, **CodeBuilder**, and **CodeCompiler**. + * `CodeHolder` - Provides functionality to hold generated code and stores all necessary information about code sections, labels, symbols, and possible relocations. + * `BaseEmitter` - Provides functionality to emit code into `CodeHolder`. `BaseEmitter` is abstract and provides just basic building blocks that are then implemented by `BaseAssembler`, `BaseBuilder`, `BaseCompiler`, and their architecture-specific implementations like `x86::Assembler`, `x86::Builder`, and `x86::Compiler`. Code emitters: - * **Assembler** - Emitter designed to emit machine code directly. - * **CodeBuilder** - Emitter designed to emit code into a representation that can be processed. It stores the whole code in a double linked list consisting of nodes (**CBNode** aka code-builder node). There are nodes that represent instructions (**CBInst**), labels (**CBLabel**), and other building blocks (**CBAlign**, **CBData**, ...). Some nodes are used as markers (**CBSentinel**) and comments (**CBComment**). - * **CodeCompiler** - High-level code emitter that uses virtual registers and contains high-level function building features. **CodeCompiler** is based on **CodeBuilder**, but extends its functionality and introduces new node types starting with CC (**CCFunc**, **CCFuncExit**, **CCFuncCall**). CodeCompiler is the simplest way to start with AsmJit as it abstracts many details required to generate a function in asm language. + * `[Base]Assembler` - Emitter designed to emit machine code directly into a `CodeBuffer` held by `CodeHolder`. + * `[Base]Builder` - Emitter designed to emit code into a representation that can be processed afterwards. It stores the whole code in a double linked list consisting of nodes (`BaseNode` and all derived classes). There are nodes that represent instructions (`InstNode`), labels (`LabelNode`), and other building blocks (`AlignNode`, `DataNode`, ...). Some nodes are used as markers (`SentinelNode` and comments (`CommentNode`). + * `[Base]Compiler` - High-level code emitter that uses virtual registers and contains high-level function building features. Compiler extends `[Base]Builder` functionality and introduces new nodes like `FuncNode`, `FuncRetNode`, and `FuncCallNode`. Compiler is the simplest way to start with AsmJit as it abstracts lots of details required to generate a function that can be called from a C/C++ language. -### Runtime +### Targets and JitRuntime -AsmJit's **Runtime** is designed for execution and/or linking. The **Runtime** itself is abstract and defines only how to **add()** and **release()** code held by **CodeHolder**. **CodeHolder** holds machine code and relocation entries, but should be seen as a temporary object only - after the code in **CodeHolder** is ready, it should be passed to **Runtime** or relocated manually. Users interested in inspecting the generated machine-code (instead of executing or linking) can keep it in **CodeHodler** and process it manually of course. - -The only **Runtime** implementation provided directly by AsmJit is called **JitRuntime**, which is suitable for storing and executing dynamically generated code. **JitRuntime** is used in most AsmJit examples as it makes the code management easy. It allows to add and release dynamically generated functions, so it's suitable for JIT code generators that want to keep many functions alive, and release functions which are no longer needed. +AsmJit's `Target` class is an interface that provides basic target abstraction. At the moment only one implementation called `JitRuntime` is provided, which as the name suggests provides JIT code target and execution runtime. `JitRuntime` provides all the necessary functionality to implement a simple JIT functionality with basic memory management. It only provides `add()` and `release()` functions that are used to either add code to the runtime or release it. The `JitRuntime` doesn't do any decisions on when the code should be released. Once you add new code into it you must decide when that code is no longer needed and should be released. ### Instructions & Operands -Instructions specify operations performed by the CPU, and operands specify the operation's input(s) and output(s). Each AsmJit's instruction has it's own unique id (**X86Inst::Id** for example) and platform specific code emitters always provide a type safe intrinsic (or multiple overloads) to emit such instruction. There are two ways of emitting an instruction: +Instructions specify operations performed by the CPU, and operands specify the operation's input(s) and output(s). Each AsmJit's instruction has it's own unique id (`Inst::Id` for example) and platform specific code emitters always provide a type safe intrinsic (or multiple overloads) to emit such instruction. There are two ways of emitting an instruction: - * Using emitter.**instName**(operands...) - A type-safe way provided by platform specific emitters - for example **X86Assembler** provides `mov(X86Gp, X86Gp)`. - * Using emitter.emit(**instId**, operands...) - Allows to emit an instruction in a dynamic way - you just need to know instruction's id and provide its operands. + * Using `BaseEmitter::inst(operands...)` - A type-safe way provided by platform specific emitters - for example `x86::Assembler` provides `x86::Assembler::mov(x86::Gp, x86::Gp)`. + * Using `BaseEmitter::emit(instId, operands...)` - Allows to emit an instruction in a dynamic way - you just need to know instruction's id and provide its operands. -AsmJit's operands all inherit from a base class called **Operand** and then specialize its type to: +AsmJit's operands all inherit from a base class called `Operand` and then specialize its type to: * **None** (not used or uninitialized operand). - * **Register** (**Reg**) - Describes either physical or virtual register. Physical registers have id that matches the target's machine id directly, whereas virtual registers must be allocated into physical registers by a register allocator pass. Each **Reg** provides: - * **Register Type** - Unique id that describes each possible register provided by the target architecture - for example X86 backend provides **X86Reg::RegType**, which defines all variations of general purpose registers (GPB-LO, GPB-HI, GPW, GPD, and GPQ) and all types of other registers like K, MM, BND, XMM, YMM, and ZMM. - * **Register Kind** - Groups multiple register types under a single kind - for example all general-purpose registers (of all sizes) on X86 are **X86Reg::kKindGp**, all SIMD registers (XMM, YMM, ZMM) are **X86Reg::kKindVec**, etc. + * **Register** (`BaseReg`) - Describes either physical or virtual register. Physical registers have id that matches the target's machine id directly whereas virtual registers must be allocated into physical registers by a register allocator pass. Register operand provides: + * **Register Type** - Unique id that describes each possible register provided by the target architecture - for example X86 backend provides `x86::Reg::RegType`, which defines all variations of general purpose registers (GPB-LO, GPB-HI, GPW, GPD, and GPQ) and all types of other registers like K, MM, BND, XMM, YMM, and ZMM. + * **Register Group** - Groups multiple register types under a single group - for example all general-purpose registers (of all sizes) on X86 are `x86::Reg::kGroupGp`, all SIMD registers (XMM, YMM, ZMM) are `x86::Reg::kGroupVec`, etc. * **Register Size** - Contains the size of the register in bytes. If the size depends on the mode (32-bit vs 64-bit) then generally the higher size is used (for example RIP register has size 8 by default). * **Register ID** - Contains physical or virtual id of the register. - * **Memory Address** (**Mem**) - Used to reference a memory location. Each **Mem** provides: - * **Base Register** - A base register id (physical or virtual). - * **Index Register** - An index register id (physical or virtual). + * Each architecture provides its own register that adds a architecture-specific API to `BaseReg`. + * **Memory Address** (`BaseMem`) - Used to reference a memory location. Memory operand provides: + * **Base Register** - A base register type and id (physical or virtual). + * **Index Register** - An index register type and id (physical or virtual). * **Offset** - Displacement or absolute address to be referenced (32-bit if base register is used and 64-bit if base register is not used). * **Flags** that can describe various architecture dependent information (like scale and segment-override on X86). - * **Immediate Value** (**Imm**) - Immediate values are usually part of instructions (encoded within the instruction itself) or data. - * **Label** - used to reference a location in code or data. Labels must be created by the **CodeEmitter** or by **CodeHolder**. Each label has its unique id per **CodeHolder** instance. + * Each architecture provides its own register that adds a architecture-specific API to `BaseMem`. + * **Immediate Value** (`Imm`) - Immediate values are usually part of instructions (encoded within the instruction itself) or data. + * **Label** - used to reference a location in code or data. Labels must be created by the `BaseEmitter` or by `CodeHolder`. Each label has its unique id per `CodeHolder` instance. -AsmJit allows to construct operands dynamically, to store them, and to query a complete information about them at run-time. Operands are small (always 16 bytes per **Operand**) and should be always copied if you intend to store them (don't create operands by using **new** keyword, it's not recommended). Operands are safe to be **memcpy()ed** and **memset()ed** if you need to work with arrays of operands. +AsmJit allows to construct operands dynamically, to store them, and to query a complete information about them at run-time. Operands are small (always 16 bytes per `Operand`) and should be always copied (by value) if you intend to store them (don't create operands by using `new` keyword, it's not recommended). Operands are safe to be `memcpy()`ed and `memset()`ed if you need to work with arrays of operands. Small example of manipulating and using operands: @@ -242,44 +242,45 @@ Small example of manipulating and using operands: using namespace asmjit; -X86Gp getDstRegByValue() { return x86::ecx; } +x86::Gp dstRegByValue() { return x86::ecx; } -void usingOperandsExample(X86Assembler& a) { +void usingOperandsExample(x86::Assembler& a) { // Create some operands. - X86Gp dst = getDstRegByValue(); // Get `ecx` register returned by a function. - X86Gp src = x86::rax; // Get `rax` register directly from the provided `x86` namespace. - X86Gp idx = x86::gpq(10); // Construct `r10` dynamically. - X86Mem m = x86::ptr(src, idx); // Construct [src + idx] memory address - referencing [rax + r10]. + x86::Gp dst = dstRegByValue(); // Get `ecx` register returned by a function. + x86::Gp src = x86::rax; // Get `rax` register directly from the provided `x86` namespace. + x86::Gp idx = x86::gpq(10); // Construct `r10` dynamically. + x86::Mem m = x86::ptr(src, idx); // Construct [src + idx] memory address - referencing [rax + r10]. // Examine `m`: - m.getIndexType(); // Returns `X86Reg::kRegGpq`. - m.getIndexId(); // Returns 10 (`r10`). + m.indexType(); // Returns `x86::Reg::kTypeGpq`. + m.indexId(); // Returns 10 (`r10`). // Reconstruct `idx` stored in mem: - X86Gp idx_2 = X86Gp::fromTypeAndId(m.getIndexType(), m.getIndexId()); + x86::Gp idx_2 = x86::Gp::fromTypeAndId(m.indexType(), m.indexId()); idx == idx_2; // True, `idx` and idx_2` are identical. Operand op = m; // Possible. - op.isMem(); // True (can be casted to Mem and X86Mem). + op.isMem(); // True (can be casted to BaseMem or architecture-specific Mem). m == op; // True, `op` is just a copy of `m`. - static_cast(op).addOffset(1); // Static cast is fine and valid here. + static_cast(op).addOffset(1); // Static cast is fine and valid here. + op.as().addOffset(1); // However, using `as()` to cast to a derived type is preferred. m == op; // False, `op` now points to [rax + r10 + 1], which is not [rax + r10]. // Emitting 'mov' a.mov(dst, m); // Type-safe way. - a.mov(dst, op); // Not possible, `mov` doesn't provide `X86Reg, Operand` overload. + a.mov(dst, op); // Not possible, `mov` doesn't provide `mov(x86::Gp, Operand)` overload. - a.emit(X86Inst::kIdMov, dst, m); // Type-unsafe, but possible. - a.emit(X86Inst::kIdMov, dst, op); // Also possible, `emit()` is typeless and can be used dynamically. + a.emit(x86::Inst::kIdMov, dst, m); // Type-unsafe, but possible. + a.emit(x86::Inst::kIdMov, dst, op); // Also possible, `emit()` is typeless and can be used with raw `Operand`s. } ``` -Some operands have to be created explicitly by `CodeEmitter`. For example labels must be created by `newLabel()` before they are used. +Some operands have to be created explicitly by `BaseEmitter`. For example labels must be created by `newLabel()` before they are used. ### Assembler Example -X86Assembler is a code emitter that emits machine code into a CodeBuffer directly. It's capable of targeting both 32-bit and 64-bit instruction sets and it's possible to target both instruction sets within the same code-base. The following example shows how to generate a function that works in both 32-bit and 64-bit modes, and how to use JitRuntime, CodeHolder, and X86Assembler together. +`x86::Assembler` is a code emitter that emits machine code into a CodeBuffer directly. It's capable of targeting both 32-bit and 64-bit instruction sets and it's possible to target both instruction sets within the same code-base. The following example shows how to generate a function that works in both 32-bit and 64-bit modes, and how to use JitRuntime, `CodeHolder`, and `x86::Assembler` together. The example handles 3 calling conventions manually just to show how it could be done, however, AsmJit contains utilities that can be used to create function prologs and epilogs automatically, but these concepts will be explained later. @@ -293,27 +294,27 @@ using namespace asmjit; typedef int (*SumFunc)(const int* arr, size_t count); int main(int argc, char* argv[]) { - assert(sizeof(void*) == 8 && - "This example requires 64-bit environment."); - - JitRuntime rt; // Create a runtime specialized for JIT. - + JitRuntime jit; // Create a runtime specialized for JIT. CodeHolder code; // Create a CodeHolder. - code.init(rt.getCodeInfo()); // Initialize it to be compatible with `rt`. - X86Assembler a(&code); // Create and attach X86Assembler to `code`. + code.init(jit.codeInfo()); // Initialize it to be compatible with `jit`. + x86::Assembler a(&code); // Create and attach x86::Assembler to `code`. // Decide between 32-bit CDECL, WIN64, and SysV64 calling conventions: // 32-BIT - passed all arguments by stack. // WIN64 - passes first 4 arguments by RCX, RDX, R8, and R9. // UNIX64 - passes first 6 arguments by RDI, RSI, RCX, RDX, R8, and R9. - X86Gp arr, cnt; - X86Gp sum = x86::eax; // Use EAX as 'sum' as it's a return register. + x86::Gp arr, cnt; + x86::Gp sum = x86::eax; // Use EAX as 'sum' as it's a return register. - if (ASMJIT_ARCH_64BIT) { - bool isWinOS = static_cast(ASMJIT_OS_WINDOWS); - arr = isWinOS ? x86::rcx : x86::rdi; // First argument (array ptr). - cnt = isWinOS ? x86::rdx : x86::rsi; // Second argument (number of elements) + if (ASMJIT_ARCH_BITS == 64) { + #if defined(_WIN32) + arr = x86::rcx; // First argument (array ptr). + cnt = x86::rdx; // Second argument (number of elements) + #else + arr = x86::rdi; // First argument (array ptr). + cnt = x86::rsi; // Second argument (number of elements) + #endif } else { arr = x86::edx; // Use EDX to hold the array pointer. @@ -337,10 +338,10 @@ int main(int argc, char* argv[]) { a.bind(Exit); // Exit to handle the border case. a.ret(); // Return from function ('sum' == 'eax'). - // ----> X86Assembler is no longer needed from here and can be destroyed <---- + // ----> x86::Assembler is no longer needed from here and can be destroyed <---- SumFunc fn; - Error err = rt.add(&fn, &code); // Add the generated code to the runtime. + Error err = jit.add(&fn, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error returned by AsmJit. // ----> CodeHolder is no longer needed from here and can be destroyed <---- @@ -350,7 +351,7 @@ int main(int argc, char* argv[]) { int result = fn(array, 6); // Execute the generated code. printf("%d\n", result); // Print sum of array (108). - rt.release(fn); // Remove the function from the runtime. + jit.release(fn); // Remove the function from the runtime. return 0; } ``` @@ -362,57 +363,59 @@ The example should be self-explanatory. It shows how to work with labels, how to X86 provides a complex memory addressing model that allows to encode addresses having a BASE register, INDEX register with a possible scale (left shift), and displacement (called offset in AsmJit). Memory address can also specify memory segment (segment-override in X86 terminology) and some instructions (gather / scatter) require INDEX to be a VECTOR register instead of a general-purpose register. AsmJit allows to encode and work with all forms of addresses mentioned and implemented by X86. It also allows to construct a 64-bit memory address, which is only allowed in one form of 'mov' instruction. ```c++ +#include + // Memory operand construction is provided by x86 namespace. using namespace asmjit; using namespace asmjit::x86; // Easier to access x86 regs. // BASE + OFFSET. -X86Mem a = ptr(rax); // a = [rax] -X86Mem b = ptr(rax, 15) // b = [rax + 15] +x86::Mem a = ptr(rax); // a = [rax] +x86::Mem b = ptr(rax, 15) // b = [rax + 15] -// BASE + INDEX + SCALE - Scale is in BITS as used by X86! -X86Mem c = ptr(rax, rbx) // c = [rax + rbx] -X86Mem d = ptr(rax, rbx, 2) // d = [rax + rbx << 2] -X86Mem e = ptr(rax, rbx, 2, 15) // e = [rax + rbx << 2 + 15] +// BASE + INDEX << SHIFT - Shift is in BITS as used by X86! +x86::Mem c = ptr(rax, rbx) // c = [rax + rbx] +x86::Mem d = ptr(rax, rbx, 2) // d = [rax + rbx << 2] +x86::Mem e = ptr(rax, rbx, 2, 15) // e = [rax + rbx << 2 + 15] // BASE + VM (Vector Index) (encoded as MOD+VSIB). -X86Mem f = ptr(rax, xmm1) // f = [rax + xmm1] -X86Mem g = ptr(rax, xmm1, 2) // g = [rax + xmm1 << 2] -X86Mem h = ptr(rax, xmm1, 2, 15) // h = [rax + xmm1 << 2 + 15] +x86::Mem f = ptr(rax, xmm1) // f = [rax + xmm1] +x86::Mem g = ptr(rax, xmm1, 2) // g = [rax + xmm1 << 2] +x86::Mem h = ptr(rax, xmm1, 2, 15) // h = [rax + xmm1 << 2 + 15] // WITHOUT BASE: uint64_t ADDR = (uint64_t)0x1234; -X86Mem i = ptr(ADDR); // i = [0x1234] -X86Mem j = ptr(ADDR, rbx); // j = [0x1234 + rbx] -X86Mem k = ptr(ADDR, rbx, 2); // k = [0x1234 + rbx << 2] +x86::Mem i = ptr(ADDR); // i = [0x1234] +x86::Mem j = ptr(ADDR, rbx); // j = [0x1234 + rbx] +x86::Mem k = ptr(ADDR, rbx, 2); // k = [0x1234 + rbx << 2] // LABEL - Will be encoded as RIP (64-bit) or absolute address (32-bit). Label L = ...; -X86Mem m = ptr(L); // m = [L] -X86Mem n = ptr(L, rbx); // n = [L + rbx] -X86Mem o = ptr(L, rbx, 2); // o = [L + rbx << 2] -X86Mem p = ptr(L, rbx, 2, 15); // p = [L + rbx << 2 + 15] +x86::Mem m = ptr(L); // m = [L] +x86::Mem n = ptr(L, rbx); // n = [L + rbx] +x86::Mem o = ptr(L, rbx, 2); // o = [L + rbx << 2] +x86::Mem p = ptr(L, rbx, 2, 15); // p = [L + rbx << 2 + 15] // RIP - 64-bit only (RIP can't use INDEX). -X86Mem q = ptr(rip, 24); // q = [rip + 24] +x86::Mem q = ptr(rip, 24); // q = [rip + 24] ``` Memory operands can optionally contain memory size. This is required by instructions where the memory size cannot be deduced from other operands, like `inc` and `dec`: ```c++ -X86Mem a = x86::dword_ptr(rax, rbx); // dword ptr [rax + rbx]. -X86Mem b = x86::qword_ptr(rdx, rsi, 0, 1);// qword ptr [rdx + rsi << 0 + 1]. +x86::Mem a = x86::dword_ptr(rax, rbx); // dword ptr [rax + rbx]. +x86::Mem b = x86::qword_ptr(rdx, rsi, 0, 1); // qword ptr [rdx + rsi << 0 + 1]. ``` Memory operands provide API that can be used to work with them: ```c++ -X86Mem mem = x86::dword_ptr(rax, 12); // dword ptr [rax + 12]. +x86::Mem mem = x86::dword_ptr(rax, 12); // dword ptr [rax + 12]. mem.hasBase(); // true. mem.hasIndex(); // false. -mem.getSize(); // 4. -mem.getOffset(); // 12. +mem.size(); // 4. +mem.offset(); // 12. mem.setSize(0); // Sets the size to 0 (makes it sizeless). mem.addOffset(-1); // Adds -1 to the offset and makes it 11. @@ -427,34 +430,39 @@ mem.hasIndex(); // true. Making changes to memory operand is very comfortable when emitting loads and stores: ```c++ +#include + using namespace asmjit; -X86Assembler a(...); // Your initialized X86Assembler. -X86Mem m = x86::ptr(eax); // Construct [eax] memory operand. +x86::Assembler a(...); // Your initialized x86::Assembler. +x86::Mem m = x86::ptr(eax); // Construct [eax] memory operand. // One way of emitting bunch of loads is to use `mem.adjusted()`. It returns // a new memory operand and keeps the source operand unchanged. a.movaps(x86::xmm0, m); // No adjustment needed to load [eax]. -a.movaps(x86::xmm1, m.adjusted(16)); // Loads [eax + 16]. -a.movaps(x86::xmm2, m.adjusted(32)); // Loads [eax + 32]. -a.movaps(x86::xmm3, m.adjusted(48)); // Loads [eax + 48]. +a.movaps(x86::xmm1, m.adjusted(16)); // Loads from [eax + 16]. +a.movaps(x86::xmm2, m.adjusted(32)); // Loads from [eax + 32]. +a.movaps(x86::xmm3, m.adjusted(48)); // Loads from [eax + 48]. // ... do something with xmm0-3 ... // Another way of adjusting memory is to change the operand in-place. If you // want to keep the original operand you can simply clone it. -X86Mem mx = m.clone(); -a.movaps(mx, x86::xmm0); mx.addOffset(16);// Stores [eax] (and adds 16 to mx). -a.movaps(mx, x86::xmm1); mx.addOffset(16);// Stores [eax + 16] (and adds 16 to mx). -a.movaps(mx, x86::xmm2); mx.addOffset(16);// Stores [eax + 32] (and adds 16 to mx). -a.movaps(mx, x86::xmm3); // Stores [eax + 48]. +x86::Mem mx = m.clone(); +a.movaps(mx, x86::xmm0); mx.addOffset(16);// Stores to [eax] (and adds 16 to mx). +a.movaps(mx, x86::xmm1); mx.addOffset(16);// Stores to [eax + 16] (and adds 16 to mx). +a.movaps(mx, x86::xmm2); mx.addOffset(16);// Stores to [eax + 32] (and adds 16 to mx). +a.movaps(mx, x86::xmm3); // Stores to [eax + 48]. ``` -You can explore the possibilities by taking a look at [base/operand.h](./src/asmjit/base/operand.h) and [x86/x86operand.h](./src/asmjit/x86/x86operand.h). Always use `X86Mem` when targeting X86 as it extends the base `Mem` operand with features provided only by X86. +You can explore the possibilities by taking a look at: + + * [core/operand.h](./src/asmjit/core/operand.h) + * [x86/x86operand.h](./src/asmjit/x86/x86operand.h). ### More About CodeInfo -In the first complete example the `CodeInfo` is retrieved from `JitRuntime`. It's logical as `JitRuntime` will always return a `CodeInfo` that is compatible with the runtime environment. For example if your application runs in 64-bit mode the `CodeInfo` will use `ArchInfo::kTypeX64` architecture in contrast to `ArchInfo::kTypeX86`, which will be used in 32-bit mode. AsmJit also allows to setup `CodeInfo` manually, and to select a different architecture when needed. So let's do something else this time, let's always generate a 32-bit code and print it's binary representation. To do that, we create our own `CodeInfo` and initialize it to `ArchInfo::kTypeX86` architecture. CodeInfo will populate all basic fields just based on the architecture we provide, so it's super-easy: +In the first complete example the `CodeInfo` is retrieved from `JitRuntime`. It's logical as `JitRuntime` will always return a `CodeInfo` that is compatible with the runtime environment. For example if your application runs in 64-bit mode the `CodeInfo` will use `ArchInfo::kIdX64` architecture in contrast to `ArchInfo::kIdX86`, which will be used in 32-bit mode. AsmJit also allows to setup `CodeInfo` manually, and to select a different architecture when needed. So let's do something else this time, let's always generate a 32-bit code and print it's binary representation. To do that, we create our own `CodeInfo` and initialize it to `ArchInfo::kIdX86` architecture. CodeInfo will populate all basic fields just based on the architecture we provide, so it's super-easy: ```c++ #include @@ -466,11 +474,11 @@ int main(int argc, char* argv[]) { using namespace asmjit::x86; // Easier access to x86/x64 registers. CodeHolder code; // Create a CodeHolder. - code.init(CodeInfo(ArchInfo::kTypeX86));// Initialize it for a 32-bit X86 target. + code.init(CodeInfo(ArchInfo::kIdX86));// Initialize it for a 32-bit X86 target. // Generate a 32-bit function that sums 4 floats and looks like: // void func(float* dst, const float* a, const float* b) - X86Assembler a(&code); // Create and attach X86Assembler to `code`. + x86::Assembler a(&code); // Create and attach x86::Assembler to `code`. a.mov(eax, dword_ptr(esp, 4)); // Load the destination pointer. a.mov(ecx, dword_ptr(esp, 8)); // Load the first source pointer. @@ -482,31 +490,18 @@ int main(int argc, char* argv[]) { a.movups(ptr(eax), xmm0); // Store the result to [eax]. a.ret(); // Return from function. - // Now we have two options if we want to do something with the code hold - // by CodeHolder. In order to use it we must first sync X86Assembler with - // the CodeHolder as it doesn't do it for every instruction it generates for - // performance reasons. The options are: - // - // 1. Detach X86Assembler from CodeHolder (will automatically sync). - // 2. Sync explicitly, allows to use X86Assembler again if needed. - // - // NOTE: AsmJit always syncs internally when CodeHolder needs to access these - // buffers and knows that there is an Assembler attached, so you have to sync - // explicitly only if you bypass CodeHolder and intend to do something on your - // own. - code.sync(); // So let's sync, it's easy. - // We have no Runtime this time, it's on us what we do with the code. - // CodeHolder stores code in SectionEntry, which embeds CodeSection - // and CodeBuffer structures. We are interested in section's CodeBuffer only. + // CodeHolder stores code in `Section`, which provides some basic properties + // and CodeBuffer structure. We are interested in section's CodeBuffer only. // // NOTE: The first section is always '.text', so it's safe to just use 0 index. - CodeBuffer& buf = code.getSectionEntry(0)->getBuffer(); + // Get it by using either `code.sectionById(0)` or `code.textSection()`. + CodeBuffer& buffer = code.sectionById(0)->buffer(); // Print the machine-code generated or do something more interesting with it? // 8B4424048B4C24048B5424040F28010F58010F2900C3 - for (size_t i = 0; i < buf.getLength(); i++) - printf("%02X", buf.getData()[i]); + for (size_t i = 0; i < buffer.length; i++) + printf("%02X", buffer.data[i]); return 0; } @@ -514,9 +509,13 @@ int main(int argc, char* argv[]) { ### Explicit Code Relocation -CodeInfo contains much more information than just the target architecture. It can be configured to specify a base-address (or a virtual base-address in a linker terminology), which could be static (useful when you know the location of the target's machine code) or dynamic. AsmJit assumes dynamic base-address by default and relocates the code held by `CodeHolder` to a user-provided address on-demand. To be able to relocate to a user-provided address it needs to store some information about relocations, which is represented by `CodeHolder::RelocEntry`. Relocation entries are only required if you call external functions from the generated code that cannot be encoded by using a 32-bit displacement (X64 architecture doesn't provide 64-bit encodable displacement) and when a label referenced in one section is bound in another, but this is not really a JIT case and it's more related to AOT (ahead-of-time) compilation. +CodeInfo contains much more information than just the target architecture. It can be configured to specify a base-address (or a virtual base-address in a linker terminology), which could be static (useful when you know the location of the target's machine code) or dynamic. AsmJit assumes dynamic base-address by default and relocates the code held by `CodeHolder` to a user-provided address on-demand. To be able to relocate to a user-provided address it needs to store some information about relocations, which is represented by `RelocEntry`. Relocation entries are only required if you call external functions from the generated code that cannot be encoded by using a 32-bit displacement (X64 architecture doesn't provide an encodable 64-bit displacement). -Next example shows how to use a built-in virtual memory manager `VMemMgr` instead of using `JitRuntime` (just in case you want to use your own memory management) and how to relocate the generated code into your own memory block - you can use your own virtual memory allocator if you need that, but that's OS specific and it's already provided by AsmJit, so we will use what AsmJit offers instead of rolling our own here. +There is also a concept called `LabelLink` - label links are lightweight structs that don't have any identifier and are stored per label in a single-linked list. Label links represent either unbound yet used labels (that are valid in cases in which label was not bound but was already referenced by an instruction) and links that cross-sections (only relevant to code that uses multiple sections). Since crossing sections is something that cannot be resolved immediately these links persist until offsets of these sections are assigned and `CodeHolder::resolveUnresolvedLinks()` is called. It's an error if you end up with code that has unresolved label links after flattening. You can verify it by calling `CodeHolder::hasUnresolvedLinks()` and `CodeHolder::unresolvedLinkCount()`. + +AsmJit can flatten code that uses multiple sections by assigning each section an incrementing offset that respects its alignment. Use `CodeHolder::flatten()` to do that. After the sections are flattened their offsets and virtual-sizes were adjusted to respect section's buffer size and alignment. You must call `CodeHolder::resolveUnresolvedLinks()` before relocating the code held by it. You can also flatten your code manually by iterating over all sections and calculating their offsets (relative to base) by your own algorithm. In that case you don't have to call `CodeHolder::flatten()`, but you must still call `CodeHolder::resolveUnresolvedLinks()`. + +Next example shows how to use a built-in virtual memory allocator `JitAllocator` instead of using `JitRuntime` (just in case you want to use your own memory management) and how to relocate the generated code into your own memory block - you can use your own virtual memory allocator if you prefer that, but that's OS specific and it's already provided by AsmJit, so we will use what AsmJit offers instead of going deep into OS specific APIs. The following code is similar to the previous one, but implements a function working in both 32-bit and 64-bit environments: @@ -530,18 +529,17 @@ typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b); int main(int argc, char* argv[]) { CodeHolder code; // Create a CodeHolder. - code.init(CodeInfo(ArchInfo::kTypeHost));// Initialize it for the host architecture. + code.init(CodeInfo(ArchInfo::kIdHost)); // Initialize it for the host architecture. - X86Assembler a(&code); // Create and attach X86Assembler to `code`. + x86::Assembler a(&code); // Create and attach x86::Assembler to `code`. // Generate a function runnable in both 32-bit and 64-bit architectures: - bool isX86 = static_cast(ASMJIT_ARCH_X86); - bool isWin = static_cast(ASMJIT_OS_WINDOWS); + bool isX86 = ASMJIT_ARCH_X86 == 32; // Signature: 'void func(int* dst, const int* a, const int* b)'. - X86Gp dst; - X86Gp src_a; - X86Gp src_b; + x86::Gp dst; + x86::Gp src_a; + x86::Gp src_b; // Handle the difference between 32-bit and 64-bit calling convention. // (arguments passed through stack vs. arguments passed by registers). @@ -554,9 +552,15 @@ int main(int argc, char* argv[]) { a.mov(src_b, x86::dword_ptr(x86::esp, 12)); // Load the second source pointer. } else { - dst = isWin ? x86::rcx : x86::rdi; // First argument (destination pointer). - src_a = isWin ? x86::rdx : x86::rsi; // Second argument (source 'a' pointer). - src_b = isWin ? x86::r8 : x86::rdx; // Third argument (source 'b' pointer). + #if defined(_WIN32) + dst = x86::rcx; // First argument (destination pointer). + src_a = x86::rdx; // Second argument (source 'a' pointer). + src_b = x86::r8; // Third argument (source 'b' pointer). + #else + dst = x86::rdi; // First argument (destination pointer). + src_a = x86::rsi; // Second argument (source 'a' pointer). + src_b = x86::rdx; // Third argument (source 'b' pointer). + #endif } a.movdqu(x86::xmm0, x86::ptr(src_a)); // Load 4 ints from [src_a] to XMM0. @@ -565,22 +569,54 @@ int main(int argc, char* argv[]) { a.movdqu(x86::ptr(dst), x86::xmm0); // Store the result to [dst]. a.ret(); // Return from function. + // Even when we didn't use multiple sections AsmJit could insert one section + // called '.addrtab' (address table section), which would be filled by data + // required by relocations (absolute jumps and calls). You can omit this code + // if you are 100% sure your code doesn't contain multiple sections and + // such relocations. You can use `CodeHolder::hasAddressTable()` to verify + // whether the address table section does exist. + code.flatten(); + code.resolveUnresolvedLinks(); + // After the code was generated it can be relocated manually to any memory // location, however, we need to know it's size before we perform memory - // allocation. CodeHolder's `getCodeSize()` returns the worst estimated - // code-size (the biggest possible) in case that relocations are not - // possible without trampolines (in that case some extra code at the end - // of the current code buffer is generated during relocation). - size_t size = code.getCodeSize(); + // allocation. `CodeHolder::codeSize()` returns the worst estimated code + // size in case that relocations are not possible without trampolines (in + // that case some extra code at the end of the current code buffer is + // generated during relocation). + size_t estimatedSize = code.codeSize(); - // Instead of rolling our own virtual memory allocator we can use the one - // AsmJit uses. It's decoupled so you don't need to use Runtime for that. - VMemMgr vm; + // Instead of rolling up our own memory allocator we can use the one AsmJit + // provides. It's decoupled so you don't need to use `JitRuntime` for that. + JitAllocator allocator; - void* p = vm.alloc(size); // Allocate a virtual memory (executable). - if (!p) return 0; // Handle a possible out-of-memory case. + // Allocate an executable virtual memory and handle a possible failure. + void* p = allocator.alloc(estimatedSize); + if (!p) return 0; - size_t realSize = code.relocate(p); // Relocate & store the output in 'p'. + // Now relocate the code to the address provided by the memory allocator. + // Please note that this DOESN'T COPY anything to `p`. This function will + // store the address in CodeInfo and use relocation entries to patch the + // existing code in all sections to respect the base address provided. + code.relocateToBase((uint64_t)p); + + // This is purely optional. There are cases in which the relocation can + // omit unneeded data, which would shrink the size of address table. If + // that happened the `codeSize` returned after `relocateToBase()` would + // be smaller than the originally `estimatedSize`. + size_t codeSize = code.codeSize(); + + // This will copy code from all sections to `p`. Iterating over all + // sections and calling `memcpy()` would work as well, however, this + // function supports additional options that can be used to also zero + // pad sections' virtual size, etc. + // + // With some additional features, copyFlattenData() does roughly this: + // for (Section* section : code.sections()) + // memcpy((uint8_t*)p + section->offset(), + // section->data(), + // section->bufferSize()); + code.copyFlattenedData(p, codeSize, CodeHolder::kCopyWithPadding); // Execute the generated function. int inA[4] = { 4, 3, 2, 1 }; @@ -596,32 +632,27 @@ int main(int argc, char* argv[]) { // Release 'p' is it's no longer needed. It will be destroyed with 'vm' // instance anyway, but it's a good practice to release it explicitly // when you know that the function will not be needed anymore. - vm.release(p); + allocator.release(p); return 0; } ``` -Configure the CodeInfo by calling `CodeInfo::setBaseAddress()` to initialize it to a user-provided base-address before passing it to `CodeHolder`: +If you know your base-address in advance (before code generation) you can use `CodeInfo::setBaseAddress()` to setup its initial value. In that case Assembler will know the absolute position of each instruction and would be able to use it during instruction encoding and prevent relocations in case the instruction is encodable. The following example shows how to configure the base address: ```c++ -// Configure CodeInfo. +// Configure CodeInfo with base address. CodeInfo ci(...); ci.setBaseAddress(uint64_t(0x1234)); // Then initialize CodeHolder with it. CodeHolder code; code.init(ci); - -// ... after you emit the machine code it will be relocated to the base address -// provided and stored in the pointer passed to `CodeHolder::relocate()`. ``` -TODO: Maybe `CodeHolder::relocate()` is not the best name? - ### Using Native Registers - zax, zbx, zcx, ... -AsmJit's X86 code emitters always provide functions to construct machine-size registers depending on the target. This feature is for people that want to write code targeting both 32-bit and 64-bit at the same time. In AsmJit terminology these registers are named **zax**, **zcx**, **zdx**, **zbx**, **zsp**, **zbp**, **zsi**, and **zdi** (they are defined in this exact order by X86). They are accessible through `X86Assembler`, `X86Builder`, and `X86Compiler`. The following example illustrates how to use this feature: +AsmJit's X86 code emitters always provide functions to construct machine-size registers depending on the target. This feature is for people that want to write code targeting both 32-bit and 64-bit at the same time. In AsmJit terminology these registers are named **zax**, **zcx**, **zdx**, **zbx**, **zsp**, **zbp**, **zsi**, and **zdi** (they are defined in this exact order by X86). They are accessible through `x86::Assembler`, `x86::Builder`, and `x86::Compiler`. The following example illustrates how to use this feature: ```c++ #include @@ -632,16 +663,15 @@ using namespace asmjit; typedef int (*Func)(void); int main(int argc, char* argv[]) { - JitRuntime rt; // Create a runtime specialized for JIT. - + JitRuntime jit; // Create a runtime specialized for JIT. CodeHolder code; // Create a CodeHolder. - code.init(rt.getCodeInfo()); // Initialize it to be compatible with `rt`. - X86Assembler a(&code); // Create and attach X86Assembler to `code`. + code.init(jit.codeInfo()); // Initialize it to be compatible with `jit`. + x86::Assembler a(&code); // Create and attach x86::Assembler to `code`. - // Let's get these registers from X86Assembler. - X86Gp zbp = a.zbp(); - X86Gp zsp = a.zsp(); + // Let's get these registers from x86::Assembler. + x86::Gp zbp = a.zbp(); + x86::Gp zsp = a.zsp(); int stackSize = 32; @@ -660,13 +690,13 @@ int main(int argc, char* argv[]) { // To make the example complete let's call it. Func fn; - Error err = rt.add(&fn, &code); // Add the generated code to the runtime. + Error err = jit.add(&fn, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error returned by AsmJit. int result = fn(); // Execute the generated code. printf("%d\n", result); // Print the resulting "0". - rt.release(fn); // Remove the function from the runtime. + jit.release(fn); // Remove the function from the runtime. return 0; } ``` @@ -674,15 +704,15 @@ int main(int argc, char* argv[]) { The example just returns `0`, but the function generated contains a standard prolog and epilog sequence and the function itself reserves 32 bytes of local stack. The advantage is clear - a single code-base can handle multiple targets easily. If you want to create a register of native size dynamically by specifying its id it's also possible: ```c++ -void example(X86Assembler& a) { - X86Gp zax = a.gpz(X86Gp::kIdAx); - X86Gp zbx = a.gpz(X86Gp::kIdBx); - X86Gp zcx = a.gpz(X86Gp::kIdCx); - X86Gp zdx = a.gpz(X86Gp::kIdDx); +void example(x86::Assembler& a) { + x86::Gp zax = a.gpz(x86::Gp::kIdAx); + x86::Gp zbx = a.gpz(x86::Gp::kIdBx); + x86::Gp zcx = a.gpz(x86::Gp::kIdCx); + x86::Gp zdx = a.gpz(x86::Gp::kIdDx); // You can also change register's id easily. - X86Gp zsp = zax; - zsp.setId(4); // or X86Gp::kIdSp. + x86::Gp zsp = zax; + zsp.setId(4); // or x86::Gp::kIdSp. } ``` @@ -714,16 +744,15 @@ using namespace asmjit; typedef int (*Func)(void); int main(int argc, char* argv[]) { - JitRuntime rt; // Create a runtime specialized for JIT. - + JitRuntime jit; // Create a runtime specialized for JIT. CodeHolder code; // Create a CodeHolder. - code.init(rt.getCodeInfo()); // Initialize it to be compatible with `rt`. - X86Assembler a(&code); // Create and attach X86Assembler to `code`. + code.init(jit.codeInfo()); // Initialize it to be compatible with `jit`. + x86::Assembler a(&code); // Create and attach x86::Assembler to `code`. - // Let's get these registers from X86Assembler. - X86Gp zbp = a.zbp(); - X86Gp zsp = a.zsp(); + // Let's get these registers from x86::Assembler. + x86::Gp zbp = a.zbp(); + x86::Gp zsp = a.zsp(); // Function prolog. a.push(zbp); @@ -731,7 +760,7 @@ int main(int argc, char* argv[]) { // This is where we are gonna patch the code later, so let's get the offset // (the current location) from the beginning of the code-buffer. - size_t patchOffset = a.getOffset(); + size_t patchOffset = a.offset(); // Let's just emit 'sub zsp, 0' for now, but don't forget to use LONG form. a.long_().sub(zsp, 0); @@ -752,13 +781,13 @@ int main(int argc, char* argv[]) { // Now the code is ready to be called Func fn; - Error err = rt.add(&fn, &code); // Add the generated code to the runtime. + Error err = jit.add(&fn, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error returned by AsmJit. int result = fn(); // Execute the generated code. printf("%d\n", result); // Print the resulting "0". - rt.release(fn); // Remove the function from the runtime. + jit.release(fn); // Remove the function from the runtime. return 0; } ``` @@ -775,34 +804,32 @@ AsmJit contains another instruction option that controls (forces) REX prefix - ` * `4083C410` - `rex add esp, 16` - 32-bit operation in 64-bit mode with forced REX prefix (0x40). * `4883C410` - `add rsp, 16` - 64-bit operation in 64-bit mode requires REX prefix (0x48). * `4183C410` - `add r12d, 16` - 32-bit operation in 64-bit mode using R12D requires REX prefix (0x41). - * `4983C410` - `add r12, 16` - 64-bit operation in 64-bit mode using R12 requires REX prefix (0x49). + * `4983C410` - `add r12, 16` - 64-bit operation in 64-bit mode using R12 requires REX prefix (0x49). ### Generic Function API So far all examples shown above handled creating function prologs and epilogs manually. While it's possible to do it that way it's much better to automate such process as function calling conventions vary across architectures and also across operating systems. -AsmJit contains a functionality that can be used to define function signatures and to calculate automatically optimal frame layout that can be used directly by a prolog and epilog inserter. This feature was exclusive to AsmJit's CodeCompiler for a very long time, but was abstracted out and is now available for all users regardless of CodeEmitter they use. The design of handling functions prologs and epilogs allows generally two use cases: +AsmJit contains a functionality that can be used to define function signatures and to calculate automatically optimal function frame that can be used directly by a prolog and epilog inserter. This feature was exclusive to AsmJit's Compiler for a very long time, but was abstracted out and is now available for all users regardless of BaseEmitter they use. The design of handling functions prologs and epilogs allows generally two use cases: - * Calculate function layout before the function is generated - this is the only way if you use pure `Assembler` emitter and shown in the next example. - * Calculate function layout after the function is generated - this way is generally used by `CodeBuilder` and `CodeCompiler` (will be described together with `X86Compiler`). + * Calculate function frame before the function is generated - this is the only way if you use pure `Assembler` emitter and shown in the next example. + * Calculate function frame after the function is generated - this way is generally used by `Builder` and `Compiler` emitters(will be described together with `x86::Compiler`). The following concepts are used to describe and create functions in AsmJit: - * **CallConv** - Describes a calling convention - this class contains instructions to assign registers and stack addresses to function arguments and return value(s), but doesn't specify any function signature. Calling conventions are architecture and OS dependent. + * `Type` - Type is an 8-bit value that describes a platform independent type as we know from C/C++. It provides abstractions for most common types like `int8_t`, `uint32_t`, `uintptr_t`, `float`, `double`, and all possible vector types to match ISAs up to AVX512. `Type::Id` was introduced originally to be used with the Compiler infrastucture, but is now used by `FuncSignature` as well. - * **TypeId** - TypeId is an 8-bit value that describes a platform independent type. It provides abstractions for most common types like `int8_t`, `uint32_t`, `uintptr_t`, `float`, `double`, and all possible vector types to match ISAs up to AVX512. **TypeId** was introduced originally to be used with **CodeCompiler**, but is now used by **FuncSignature** as well. + * `CallConv` - Describes a calling convention - this class contains instructions to assign registers and stack addresses to function arguments and return value(s), but doesn't specify any function signature. Calling conventions are architecture and OS dependent. - * **FuncSignature** - Describes a function signature, for example `int func(int, int)`. **FuncSignature** contains a function calling convention id, return value type, and function arguments. The signature itself is platform independent and uses **TypeId** to describe types of function arguments and its return value(s). + * `FuncSignature` - Describes a function signature, for example `int func(int, int)`. `FuncSignature` contains a function calling convention id, return value type, and function arguments. The signature itself is platform independent and uses `Type::Id` to describe types of function arguments and its return value(s). - * **FuncDetail** - Architecture and ABI dependent information that describes **CallConv** and expanded **FuncSignature**. Each function argument and return value is represented as **FuncDetail::Value** that contains the original **TypeId** enriched by additional information that specifies if the value is passed/returned by register (and which register) or by stack. Each value also contains some other metadata that provide additional information required to handle it properly (for example if a vector value is passed indirectly by a pointer as required by WIN64 calling convention, etc...). + * `FuncDetail` - Architecture and ABI dependent information that describes `CallConv` and expanded `FuncSignature`. Each function argument and return value is represented as `FuncValue` that contains the original `Type::Id` enriched by additional information that specifies if the value is passed/returned by register (and which register) or by stack. Each value also contains some other metadata that provide additional information required to handle it properly (for example if a vector value is passed indirectly by a pointer as required by WIN64 calling convention, etc...). - * **FuncArgsMapper** - A helper that can be used to define where each function argument is expected to be. It's architecture and ABI dependent mapping from function arguments described by CallConv and FuncDetail into registers specified by the user. + * `FuncFrame` - Contains information about the function frame that can be used by prolog/epilog inserter (PEI). Holds call stack size size and alignment, local stack size and alignment, and various attributes that describe how prolog and epilog should be constructed. `FuncFrame` doesn't know anything about function's arguments or return values, it hold only information necessary to create a valid and ABI conforming function prologs and epilogs. - * **FuncFrameInfo** - Contains information about a function-frame. Holds callout-stack size and alignment (i.e. stack used to call functions), stack-frame size and alignment (the stack required by the function itself), and various attributes that describe how prolog and epilog should be constructed. FuncFrameInfo doesn't know anything about function arguments or returns, it should be seen as a class that describes minimum requirements of the function frame and its attributes before the final `FuncFrameLayout` is calculated. + * `FuncArgsAssignment` - A helper class that can be used to reassign function arguments into user specified registers. It's architecture and ABI dependent mapping from function arguments described by CallConv and FuncDetail into registers specified by the user. - * **FuncFrameLayout** - Contains the final function layout that can be passed to `FuncUtils::emitProlog()` and `FuncUtils::emitEpilog()`. The content of this class should always be calculated by AsmJit by calling `FuncFrameLayout::init(const FuncDetail& detail, const FuncFrameInfo& ffi)`. - -It's a lot of concepts where each represents one step in the function layout calculation. In addition, the whole machinery can also be used to create function calls, instead of function prologs and epilogs. The next example shows how AsmJit can be used to create functions for both 32-bit and 64-bit targets and various calling conventions: +It's a lot of concepts where each represents one step in the function frame calculation. In addition, the whole machinery can also be used to create function calls, instead of function prologs and epilogs. The next example shows how AsmJit can be used to create functions for both 32-bit and 64-bit targets and various calling conventions: ```c++ #include @@ -813,48 +840,51 @@ using namespace asmjit; typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b); int main(int argc, char* argv[]) { - JitRuntime rt; // Create JIT Runtime. - + JitRuntime jit; // Create JIT Runtime. CodeHolder code; // Create a CodeHolder. - code.init(rt.getCodeInfo()); // Initialize it to match `rt`. - X86Assembler a(&code); // Create and attach X86Assembler to `code`. + + code.init(jit.codeInfo()); // Initialize it to match `jit`. + x86::Assembler a(&code); // Create and attach x86::Assembler to `code`. // Decide which registers will be mapped to function arguments. Try changing // registers of `dst`, `src_a`, and `src_b` and see what happens in function's // prolog and epilog. - X86Gp dst = a.zax(); - X86Gp src_a = a.zcx(); - X86Gp src_b = a.zdx(); + x86::Gp dst = a.zax(); + x86::Gp src_a = a.zcx(); + x86::Gp src_b = a.zdx(); - X86Xmm vec0 = x86::xmm0; - X86Xmm vec1 = x86::xmm1; + X86::Xmm vec0 = x86::xmm0; + X86::Xmm vec1 = x86::xmm1; - // Create and initialize `FuncDetail` and `FuncFrameInfo`. Both are - // needed to create a function and they hold different kind of data. + // Create and initialize `FuncDetail` and `FuncFrame`. FuncDetail func; - func.init(FuncSignature3(CallConv::kIdHost)); + func.init(FuncSignatureT(CallConv::kIdHost)); - FuncFrameInfo ffi; - ffi.setDirtyRegs(X86Reg::kKindVec, // Make XMM0 and XMM1 dirty. VEC kind - Utils::mask(0, 1)); // describes XMM|YMM|ZMM registers. + FuncFrame frame; + frame.init(func); - FuncArgsMapper args(&func); // Create function arguments mapper. + // Make XMM0 and XMM1 dirty; `kGroupVec` describes XMM|YMM|ZMM registers. + frame.setDirtyRegs(x86::Reg::kGroupVec, IntUtils::mask(0, 1)); + + // Alternatively, if you don't want to use register masks you can pass `BaseReg` + // to `addDirtyRegs()`. The following code would add both `xmm0` and `xmm1`. + frame.addDirtyRegs(x86::xmm0, x86::xmm1); + + FuncArgsAssignment args(&func); // Create arguments assignment context. args.assignAll(dst, src_a, src_b); // Assign our registers to arguments. - args.updateFrameInfo(ffi); // Reflect our args in FuncFrameInfo. + args.updateFrameInfo(frame); // Reflect our args in FuncFrame. + frame.finalize(); // Finalize the FuncFrame (updates it). - FuncFrameLayout layout; // Create the FuncFrameLayout, which - layout.init(func, ffi); // contains metadata of prolog/epilog. - - FuncUtils::emitProlog(&a, layout); // Emit function prolog. - FuncUtils::allocArgs(&a, layout, args); // Allocate arguments to registers. + a.emitProlog(frame); // Emit function prolog. + a.emitArgsAssignment(frame, args); // Assign arguments to registers. a.movdqu(vec0, x86::ptr(src_a)); // Load 4 ints from [src_a] to XMM0. a.movdqu(vec1, x86::ptr(src_b)); // Load 4 ints from [src_b] to XMM1. a.paddd(vec0, vec1); // Add 4 ints in XMM1 to XMM0. a.movdqu(x86::ptr(dst), vec0); // Store the result to [dst]. - FuncUtils::emitEpilog(&a, layout); // Emit function epilog and return. + a.emitEpilog(frame); // Emit function epilog and return. SumIntsFunc fn; - Error err = rt.add(&fn, &code); // Add the generated code to the runtime. + Error err = jit.add(&fn, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error case. // Execute the generated function. @@ -866,56 +896,55 @@ int main(int argc, char* argv[]) { // Prints {5 8 4 9} printf("{%d %d %d %d}\n", out[0], out[1], out[2], out[3]); - rt.release(fn); // Remove the function from the runtime. + jit.release(fn); // Remove the function from the runtime. return 0; } ``` -CodeBuilder ------------ -Both **CodeBuilder** and **CodeCompiler** are emitters that emit everything to a representation that allows further processing. The code stored in such representation is completely safe to be patched, simplified, reordered, obfuscated, removed, injected, analyzed, and 'think-of-anything-else'. Each instruction (or label, directive, ...) is stored as **CBNode** (Code-Builder Node) and contains all the necessary information to emit machine code out of it later. +Builder Interface +----------------- -There is a difference between **CodeBuilder** and **CodeCompiler**: +Both `Builder` and `Compiler` are emitters that emit everything to a representation that allows further processing. The code stored in such representation is completely safe to be patched, simplified, reordered, obfuscated, removed, injected, analyzed, and 'think-of-anything-else'. Each instruction, label, directive, etc... is stored in `BaseNode` (or derived class like `InstNode` or `LabelNode`) and contains all the information required to pass it later to the `Assembler`. - * **CodeBuilder** (low-level): - * Maximum compatibility with **Assembler**, easy to switch from **Assembler** to **CodeBuilder** and vice versa. - * Doesn't generate machine code directly, allows to serialize to **Assembler** when the whole code is ready to be encoded. +There is a huge difference between `Builder` and `Compiler`: - * **CodeCompiler** (high-level): + * `Builder` (low-level): + * Maximum compatibility with `Assembler`, easy to switch from `Assembler` to `Builder` and vice versa. + * Doesn't generate machine code directly, allows to serialize to `Assembler` when the whole code is ready to be encoded. + + * `Compiler` (high-level): * Virtual registers - allows to use unlimited number of virtual registers which are allocated into physical registers by a built-in register allocator. * Function nodes - allows to create functions by specifying their signatures and assigning virtual registers to function arguments and return value(s). * Function calls - allows to call other functions within the generated code by using the same interface that is used to create functions. -There are multiple node types used by both **CodeBuilder** and **CodeCompiler**: +There are multiple node types used by both `Builder` and `Compiler`: * Basic nodes: - * **CBNode** - Base class for all nodes. - * **CBInst** - Instruction node. - * **CBAlign** - Alignment directive (.align). - * **CBLabel** - Label (location where to bound it). + * `BaseNode` - Base class for all nodes. + * `InstNode` - Instruction node. + * `AlignNode` - Alignment directive (.align). + * `LabelNode` - Label (location where to bound it). * Data nodes: - * **CBData** - Data embedded into the code. - * **CBConstPool** - Constant pool data. - * **CBLabelData** - Label address embedded as data. + * `DataNode` - Data embedded into the code. + * `ConstPoolNode` - Constant pool data. + * `LabelDataNode` - Label address embedded as data. * Informative nodes: - * **CBComment** - Contains a comment string, doesn't affect code generation. - * **CBSentinel** - A marker that can be used to remember certain position, doesn't affect code generation. + * `CommentNode` - Contains a comment string, doesn't affect code generation. + * `SentinelNode` - A marker that can be used to remember certain position, doesn't affect code generation. - * **CodeCompiler** nodes: - * **CCFunc** - Start of a function. - * **CCFuncExit** - Return from a function. - * **CCFuncCall** - Function call. + * Compiler-only nodes: + * `FuncNode` - Start of a function. + * `FuncRetNode` - Return from a function. + * `FuncCallNode` - Function call. -NOTE: All nodes that have **CB** prefix are used by both **CodeBuilder** and **CodeCompiler**. Nodes that have **CC** prefix are exclusive to **CodeCompiler** and are usually lowered to **CBNodes** by a **CodeBuilder** specific pass or treated as one of **CB** nodes; for example **CCFunc** inherits **CBLabel** so it's treated as **CBLabel** by **CodeBuilder** and as **CCFunc** by **CodeCompiler**. +### Using Builder -### Using CodeBuilder +The Builder interface was designed to be used as an `Assembler` replacement in case that post-processing of the generated code is required. The code can be modified during or after code generation. The post processing can be done manually or through `Pass` (Code-Builder Pass) object. Builder stores the emitted code as a double-linked list, which allows O(1) insertion and removal. -**CodeBuilder** was designed to be used as an **Assembler** replacement in case that post-processing of the generated code is required. The code can be modified during or after code generation. The post processing can be done manually or through **Pass** (Code-Builder Pass) object. **CodeBuilder** stores the emitted code as a double-linked list, which allows O(1) insertion and removal. - -The code representation used by **CodeBuilder** is compatible with everything AsmJit provides. Each instruction is stored as **CBInst**, which contains instruction id, options, and operands. Each instruction emitted will create a new **CBInst** instance and add it to the current cursor in the double-linked list of nodes. Since the instruction stream used by **CodeBuilder** can be manipulated, we can rewrite the **SumInts** example into the following: +The code representation used by `Builder` is compatible with everything AsmJit provides. Each instruction is stored as `InstNode`, which contains instruction id, options, and operands. Each instruction emitted will create a new `InstNode` instance and add it to the current cursor in the double-linked list of nodes. Since the instruction stream used by `Builder` can be manipulated, we can rewrite the **SumInts** example into the following: ```c++ #include @@ -926,35 +955,35 @@ using namespace asmjit; typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b); // Small helper function to print the current content of `cb`. -static void dumpCode(CodeBuilder& cb, const char* phase) { +static void dumpCode(BaseBuilder& cb, const char* phase) { StringBuilder sb; cb.dump(sb); - printf("%s:\n%s\n", phase, sb.getData()); + printf("%s:\n%s\n", phase, sb.data()); } int main(int argc, char* argv[]) { - JitRuntime rt; // Create JIT Runtime. - + JitRuntime jit; // Create JIT Runtime. CodeHolder code; // Create a CodeHolder. - code.init(rt.getCodeInfo()); // Initialize it to match `rt`. - X86Builder cb(&code); // Create and attach X86Builder to `code`. + + code.init(jit.codeInfo()); // Initialize it to match `jit`. + x86::Builder cb(&code); // Create and attach x86::Builder to `code`. // Decide which registers will be mapped to function arguments. Try changing // registers of `dst`, `src_a`, and `src_b` and see what happens in function's // prolog and epilog. - X86Gp dst = cb.zax(); - X86Gp src_a = cb.zcx(); - X86Gp src_b = cb.zdx(); + x86::Gp dst = cb.zax(); + x86::Gp src_a = cb.zcx(); + x86::Gp src_b = cb.zdx(); - X86Xmm vec0 = x86::xmm0; - X86Xmm vec1 = x86::xmm1; + X86::Xmm vec0 = x86::xmm0; + X86::Xmm vec1 = x86::xmm1; // Create and initialize `FuncDetail`. FuncDetail func; - func.init(FuncSignature3(CallConv::kIdHost)); + func.init(FuncSignatureT(CallConv::kIdHost)); // Remember prolog insertion point. - CBNode* prologInsertionPoint = cb.getCursor(); + BaseNode* prologInsertionPoint = cb.cursor(); // Emit function body: cb.movdqu(vec0, x86::ptr(src_a)); // Load 4 ints from [src_a] to XMM0. @@ -963,42 +992,42 @@ int main(int argc, char* argv[]) { cb.movdqu(x86::ptr(dst), vec0); // Store the result to [dst]. // Remember epilog insertion point. - CBNode* epilogInsertionPoint = cb.getCursor(); + BaseNode* epilogInsertionPoint = cb.cursor(); // Let's see what we have now. dumpCode(cb, "Raw Function"); // Now, after we emitted the function body, we can insert the prolog, arguments - // allocation, and epilog. This is not possible with using pure X86Assembler. - FuncFrameInfo ffi; - ffi.setDirtyRegs(X86Reg::kKindVec, // Make XMM0 and XMM1 dirty. VEC kind - Utils::mask(0, 1)); // describes XMM|YMM|ZMM registers. + // allocation, and epilog. This is not possible with using pure x86::Assembler. + FuncFrame frame; + frame.init(func); - FuncArgsMapper args(&func); // Create function arguments mapper. + // Make XMM0 and XMM1 dirty; `kGroupVec` describes XMM|YMM|ZMM registers. + frame.setDirtyRegs(x86::Reg::kGroupVec, IntUtils::mask(0, 1)); + + FuncArgsAssignment args(&func); // Create arguments assignment context. args.assignAll(dst, src_a, src_b); // Assign our registers to arguments. - args.updateFrameInfo(ffi); // Reflect our args in FuncFrameInfo. - - FuncFrameLayout layout; // Create the FuncFrameLayout, which - layout.init(func, ffi); // contains metadata of prolog/epilog. + args.updateFrame(frame); // Reflect our args in FuncFrame. + frame.finalize(); // Finalize the FuncFrame (updates it). // Insert function prolog and allocate arguments to registers. cb.setCursor(prologInsertionPoint); - FuncUtils::emitProlog(&cb, layout); - FuncUtils::allocArgs(&cb, layout, args); + cb.emitProlog(frame); + cb.emitArgsAssignment(frame, args); // Insert function epilog. cb.setCursor(epilogInsertionPoint); - FuncUtils::emitEpilog(&cb, layout); + cb.emitEpilog(frame); - // Let's see how the function prolog and epilog looks. + // Let's see how the function's prolog and epilog looks. dumpCode(cb, "Prolog & Epilog"); - // IMPORTANT: CodeBuilder requires `finalize()` to be called to serialize - // the code to the Assembler (it automatically creates one if not attached). + // IMPORTANT: Builder requires `finalize()` to be called to serialize the code + // to the Assembler (it automatically creates one if not attached). cb.finalize(); SumIntsFunc fn; - Error err = rt.add(&fn, &code); // Add the generated code to the runtime. + Error err = jit.add(&fn, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error case. // Execute the generated function. @@ -1010,7 +1039,7 @@ int main(int argc, char* argv[]) { // Prints {5 8 4 9} printf("{%d %d %d %d}\n", out[0], out[1], out[2], out[3]); - rt.release(fn); // Remove the function from the runtime. + jit.release(fn); // Remove the function from the runtime. return 0; } ``` @@ -1036,110 +1065,115 @@ ret {5 8 4 9} ``` -The number of use-cases of **X86Builder** is not limited and highly depends on your creativity and experience. The previous example can be easily improved to collect all dirty registers inside the function programmatically and to pass them to `ffi.setDirtyRegs()`: +The number of use-cases of **x86::Builder** is not limited and highly depends on your creativity and experience. The previous example can be easily improved to collect all dirty registers inside the function programmatically and to pass them to `frame.setDirtyRegs()`: ```c++ +#include + using namespace asmjit; -// NOTE: This function doesn't cover all possible instructions. It ignores +// NOTE: This function doesn't cover all possible constructs. It ignores // instructions that write to implicit registers that are not part of the // operand list. It also counts read-only registers. Real implementation -// would be a bit more complicated, but still relatively easy. -static void collectDirtyRegs(const CBNode* first, const CBNode* last, uint32_t regMask[X86Reg::kKindCount]) { - const CBNode* node = first; +// would be a bit more complicated, but still relatively easy to implement. +static void collectDirtyRegs(const BaseNode* first, const BaseNode* last, uint32_t regMask[BaseReg::kGroupVirt]) { + const BaseNode* node = first; while (node) { if (node->actsAsInst()) { - const CBInst* inst = node->as(); - const Operand* opArray = inst->getOpArray(); + const InstNode* inst = node->as(); + const Operand* opArray = inst->operands(); - for (uint32_t i = 0, opCount = inst->getOpCount(); i < opCount; i++) { + for (uint32_t i = 0, opCount = inst->opCount(); i < opCount; i++) { const Operand& op = opArray[i]; if (op.isReg()) { - const X86Reg& reg = op.as(); - regMask[reg.getKind()] |= 1U << reg.getId(); + const x86::Reg& reg = op.as(); + if (reg.group() < BaseReg::kGroupVirt) + regMask[reg.group()] |= 1u << reg.id(); } } } if (node == last) break; - node = node->getNext(); - } -} + node = node->next(); + n} -static void setDirtyRegsOfFFI(const X86Builder& cb, FuncFrameInfo& ffi) { - uint32_t regMask[X86Reg::kKindCount] = { 0 }; - collectDirtyRegs(cb.getFirstNode(), cb.getLastNode(), regMask); +static void setDirtyRegsOfFuncFrame(const x86::Builder& cb, FuncFrame& frame) { + uint32_t regMask[BaseReg::kGroupVirt] = { 0 }; + collectDirtyRegs(cb.firstNode(), cb.lastNode(), regMask); // X86/X64 ABIs only require to save GP/XMM registers: - ffi.setDirtyRegs(X86Reg::kKindGp, regMask[X86Reg::kKindGp]); - ffi.setDirtyRegs(X86Reg::kKindVec, regMask[X86Reg::kKindVec]); + frame.setDirtyRegs(x86::Reg::kGroupGp , regMask[x86::Reg::kGroupGp ]); + frame.setDirtyRegs(x86::Reg::kGroupVec, regMask[x86::Reg::kGroupVec]); } ``` -### Using X86Assembler or X86Builder through X86Emitter +### Using x86::Assembler or x86::Builder through X86::Emitter -Even when **Assembler** and **CodeBuilder** implement the same interface defined by **CodeEmitter** their platform dependent variants (**X86Assembler** and **X86Builder**, respective) cannot be interchanged or casted to each other by using C++'s `static_cast<>`. The main reason is the inheritance graph of these classes is different and cast-incompatible, as illustrated in the following graph: +Even when **Assembler** and **Builder** provide the same interface as defined by **BaseEmitter** their platform dependent variants (**x86::Assembler** and **x86::Builder**, respective) cannot be interchanged or casted to each other by using C++'s `static_cast<>`. The main reason is the inheritance graph of these classes is different and cast-incompatible, as illustrated in the following graph: ``` - +--------------+ +=======================+ - +----------------------->| X86Emitter |<--+--# X86EmitterImplicitT<> #<--+ - | +--------------+ | +=======================+ | - | (abstract) | (mixin) | - | +--------------+ +~~~~~~~~~~~~~~+ | | - +-->| Assembler |---->| X86Assembler |<--+ | - | +--------------+ +~~~~~~~~~~~~~~+ | | - | (abstract) (final) | | -+===============+ | +--------------+ +~~~~~~~~~~~~~~+ | | -# CodeEmitter #--+-->| CodeBuilder |--+->| X86Builder |<--+ | -+===============+ +--------------+ | +~~~~~~~~~~~~~~+ | - (abstract) (abstract) | (final) | - +---------------------+ | - | | - | +--------------+ +~~~~~~~~~~~~~~+ +=======================+ | - +-->| CodeCompiler |---->| X86Compiler |<-----# X86EmitterExplicitT<> #---+ - +--------------+ +~~~~~~~~~~~~~~+ +=======================+ + +--------------+ +=========================+ + +----------------------->| x86::Emitter |<--+--# x86::EmitterImplicitT<> #<--+ + | +--------------+ | +=========================+ | + | (abstract) | (mixin) | + | +--------------+ +~~~~~~~~~~~~~~+ | | + +-->| BaseAssembler|---->|x86::Assembler|<--+ | + | +--------------+ +~~~~~~~~~~~~~~+ | | + | (abstract) (final) | | ++===============+ | +--------------+ +~~~~~~~~~~~~~~+ | | +# BaseEmitter #--+-->| BaseBuilder |--+->| x86::Builder |<--+ | ++===============+ +--------------+ | +~~~~~~~~~~~~~~+ | + (abstract) (abstract) | (final) | + +---------------------+ | + | | + | +--------------+ +~~~~~~~~~~~~~~+ +=========================+ | + +-->| BaseCompiler |---->| x86::Compiler|<-----# x86::EmitterExplicitT<> #---+ + +--------------+ +~~~~~~~~~~~~~~+ +=========================+ (abstract) (final) (mixin) ``` -The graph basically shows that it's not possible to cast **X86Assembler** to **X86Builder** and vice versa. However, since both **X86Assembler** and **X86Builder** share the same interface defined by both **CodeEmitter** and **X86EmmiterImplicitT** a class called **X86Emitter** was introduced to make it possible to write a function that can emit to both **X86Assembler** and **X86Builder**. Note that **X86Emitter** cannot be created, it's abstract and has private constructors and destructors; it was only designed to be casted to and used as an interface. +The graph basically shows that it's not possible to cast `x86::Assembler` to `x86::Builder` and vice versa. However, since both `x86::Assembler` and `x86::Builder` share the same interface defined by both `BaseEmitter` and `x86::EmmiterImplicitT` a class called `x86::Emitter` was introduced to make it possible to write a function that can emit to both `x86::Assembler` and `x86::Builder`. Note that `x86::Emitter` cannot be created, it's abstract and has private constructors and destructors; it was only designed to be casted to and used as an interface. -Each X86 emitter implements a member function called **asEmitter()**, which casts the instance to the **X86Emitter**, as illustrated on the next example: +Each X86 emitter implements a member function called `as()`, which casts the instance to the `x86::Emitter`, as illustrated on the next example: ```c++ +#include + using namespace asmjit; -static void emitSomething(X86Emitter* e) { +static void emitSomething(x86::Emitter* e) { e->mov(x86::eax, x86::ebx); } static void assemble(CodeHolder& code, bool useAsm) { if (useAsm) { - X86Assembler a(&code); - emitSomething(a.asEmitter()); + x86::Assembler a(&code); + emitSomething(a.as()); } else { - X86Builder cb(&code); - emitSomething(cb.asEmitter()); + x86::Builder cb(&code); + emitSomething(cb.as()); - // IMPORTANT: CodeBuilder requires `finalize()` to be called to serialize - // the code to the Assembler (it automatically creates one if not attached). + // IMPORTANT: Builder requires `finalize()` to be called to serialize the + // code to the Assembler (it automatically creates one if not attached). cb.finalize(); } } ``` -The example above shows how to create a function that can emit code to either **X86Assembler** or **X86Builder** through **X86Emitter**, which provides emitter-neutral functionality. **X86Emitter**, however, doesn't provide any emitter **X86Assembler** or **X86Builder** specific functionality like **setCursor()**. +The example above shows how to create a function that can emit code to either **x86::Assembler** or **x86::Builder** through **x86::Emitter**, which provides emitter-neutral functionality. **x86::Emitter**, however, doesn't provide any emitter **x86::Assembler** or **x86::Builder** specific functionality like **setCursor()**. -CodeCompiler ------------- -**CodeCompiler** is a high-level code emitter that provides virtual registers and automatically handles function calling conventions. It's still architecture dependent, but makes the code generation much easier by offering a built-in register allocator and function builder. Functions are essential; the first-step to generate some code is to define the signature of the function you want to generate (before generating the function body). Function arguments and return value(s) are handled by assigning virtual registers to them. Similarly, function calls are handled the same way. +Compiler Interface +------------------ -**CodeCompiler** also makes the use of passes (introduced by **CodeBuilder**) and automatically adds an architecture-dependent register allocator pass to the list of passes when attached to **CodeHolder**. +**Compiler** is a high-level code emitter that provides virtual registers and automatically handles function calling conventions. It's still architecture dependent, but makes the code generation much easier by offering a built-in register allocator and function builder. Functions are essential; the first-step to generate some code is to define the signature of the function you want to generate (before generating the function body). Function arguments and return value(s) are handled by assigning virtual registers to them. Similarly, function calls are handled the same way. + +**Compiler** also makes the use of passes (introduced by **Builder**) and automatically adds an architecture-dependent register allocator pass to the list of passes when attached to **CodeHolder**. ### Compiler Basics -The first **CodeCompiler** example shows how to generate a function that simply returns an integer value. It's an analogy to the very first example: +The first **Compiler** example shows how to generate a function that simply returns an integer value. It's an analogy to the very first example: ```c++ #include @@ -1151,31 +1185,31 @@ using namespace asmjit; typedef int (*Func)(void); int main(int argc, char* argv[]) { - JitRuntime rt; // Runtime specialized for JIT code execution. - + JitRuntime jit; // Runtime specialized for JIT code execution. CodeHolder code; // Holds code and relocation information. - code.init(rt.getCodeInfo()); // Initialize to the same arch as JIT runtime. - X86Compiler cc(&code); // Create and attach X86Compiler to `code`. - cc.addFunc(FuncSignature0()); // Begin a function of `int fn(void)` signature. + code.init(jit.codeInfo()); // Initialize to the same arch as JIT runtime. + x86::Compiler cc(&code); // Create and attach x86::Compiler to `code`. - X86Gp vReg = cc.newGpd(); // Create a 32-bit general purpose register. + cc.addFunc(FuncSignatureT()); // Begin a function of `int fn(void)` signature. + + x86::Gp vReg = cc.newGpd(); // Create a 32-bit general purpose register. cc.mov(vReg, 1); // Move one to our virtual register `vReg`. cc.ret(vReg); // Return `vReg` from the function. cc.endFunc(); // End of the function body. cc.finalize(); // Translate and assemble the whole `cc` content. - // ----> X86Compiler is no longer needed from here and can be destroyed <---- + // ----> x86::Compiler is no longer needed from here and can be destroyed <---- Func fn; - Error err = rt.add(&fn, &code); // Add the generated code to the runtime. + Error err = jit.add(&fn, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error returned by AsmJit. // ----> CodeHolder is no longer needed from here and can be destroyed <---- int result = fn(); // Execute the generated code. printf("%d\n", result); // Print the resulting "1". - rt.release(fn); // RAII, but let's make it explicit. + jit.release(fn); // RAII, but let's make it explicit. return 0; } ``` @@ -1192,14 +1226,14 @@ using namespace asmjit; typedef void (*MemCpy32)(uint32_t* dst, const uint32_t* src, size_t count); int main(int argc, char* argv[]) { - JitRuntime rt; // Runtime specialized for JIT code execution. - + JitRuntime jit; // Runtime specialized for JIT code execution. CodeHolder code; // Holds code and relocation information. - code.init(rt.getCodeInfo()); // Initialize to the same arch as JIT runtime. - X86Compiler cc(&code); // Create and attach X86Compiler to `code`. + code.init(jit.codeInfo()); // Initialize to the same arch as JIT runtime. + x86::Compiler cc(&code); // Create and attach x86::Compiler to `code`. + cc.addFunc( // Begin the function of the following signature: - FuncSignature3()); // 3rd argument - size_t (machine reg-size). @@ -1207,9 +1241,9 @@ int main(int argc, char* argv[]) { Label L_Loop = cc.newLabel(); // Start of the loop. Label L_Exit = cc.newLabel(); // Used to exit early. - X86Gp dst = cc.newIntPtr("dst"); // Create `dst` register (destination pointer). - X86Gp src = cc.newIntPtr("src"); // Create `src` register (source pointer). - X86Gp cnt = cc.newUIntPtr("cnt"); // Create `cnt` register (loop counter). + x86::Gp dst = cc.newIntPtr("dst"); // Create `dst` register (destination pointer). + x86::Gp src = cc.newIntPtr("src"); // Create `src` register (source pointer). + x86::Gp cnt = cc.newUIntPtr("cnt"); // Create `cnt` register (loop counter). cc.setArg(0, dst); // Assign `dst` argument. cc.setArg(1, src); // Assign `src` argument. @@ -1220,7 +1254,7 @@ int main(int argc, char* argv[]) { cc.bind(L_Loop); // Bind the beginning of the loop here. - X86Gp tmp = cc.newInt32("tmp"); // Copy a single dword (4 bytes). + x86::Gp tmp = cc.newInt32("tmp"); // Copy a single dword (4 bytes). cc.mov(tmp, x86::dword_ptr(src)); // Load DWORD from [src] address. cc.mov(x86::dword_ptr(dst), tmp); // Store DWORD to [dst] address. @@ -1234,30 +1268,29 @@ int main(int argc, char* argv[]) { cc.endFunc(); // End of the function body. cc.finalize(); // Translate and assemble the whole `cc` content. - // ----> X86Compiler is no longer needed from here and can be destroyed <---- + // ----> x86::Compiler is no longer needed from here and can be destroyed <---- MemCpy32 memcpy32; - Error err = rt.add(&memcpy32, &code); // Add the generated code to the runtime. + Error err = jit.add(&memcpy32, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error returned by AsmJit. // ----> CodeHolder is no longer needed from here and can be destroyed <---- // Test the generated code. - uint32_t srcData[6] = { 1, 2, 3, 5, 8, 13 }; - uint32_t dstData[6]; - - memcpy32(dstData, srcData, 6); // Calls the generated function. + uint32_t input[6] = { 1, 2, 3, 5, 8, 13 }; + uint32_t output[6]; + memcpy32(output, input, 6); for (uint32_t i = 0; i < 6; i++) - printf("%d\n", dstData[i]); + printf("%d\n", output[i]); - rt.release(memcpy32); // RAII, but let's make it explicit. + jit.release(memcpy32); // RAII, but let's make it explicit. return 0; } ``` ### Recursive Functions -It's possible to create more functions by using the same `X86Compiler` instance and make links between them. In such case it's important to keep the pointer to the `CCFunc` node. The first example creates a simple Fibonacci function that calls itself recursively: +It's possible to create more functions by using the same `x86::Compiler` instance and make links between them. In such case it's important to keep the pointer to the `FuncNode` node. The first example creates a simple Fibonacci function that calls itself recursively: ```c++ #include @@ -1269,18 +1302,18 @@ using namespace asmjit; typedef uint32_t (*Fibonacci)(uint32_t x); int main(int argc, char* argv[]) { - JitRuntime rt; // Runtime specialized for JIT code execution. - + JitRuntime jit; // Runtime specialized for JIT code execution. CodeHolder code; // Holds code and relocation information. - code.init(rt.getCodeInfo()); // Initialize to the same arch as JIT runtime. - X86Compiler cc(&code); // Create and attach X86Compiler to `code`. - CCFunc* func = cc.addFunc( // Begin of the Fibonacci function, `addFunc()` - FuncSignature1()); // Returns a pointer to the `CCFunc` node. + code.init(jit.codeInfo()); // Initialize to the same arch as JIT runtime. + x86::Compiler cc(&code); // Create and attach x86::Compiler to `code`. - Label L_Exit = cc.newLabel(); // Exit label. - X86Gp x = cc.newU32(); // Function `x` argument. - X86Gp y = cc.newU32(); // Temporary. + FuncNode* func = cc.addFunc( // Begin of the Fibonacci function, `addFunc()` + FuncSignatureT()); // Returns a pointer to the `FuncNode` node. + + Label L_Exit = cc.newLabel() // Exit label. + x86::Gp x = cc.newU32(); // Function `x` argument. + x86::Gp y = cc.newU32(); // Temporary. cc.setArg(0, x); @@ -1290,9 +1323,9 @@ int main(int argc, char* argv[]) { cc.mov(y, x); // Make copy of the original `x`. cc.dec(x); // Decrease `x`. - CCFuncCall* call = cc.call( // Function call: - func->getLabel(), // Function address or Label. - FuncSignature1()); // Function signature. + FuncCallNode* call = cc.call( // Function call: + func->label(), // Function address or Label. + FuncSignatureT()); // Function signature. call->setArg(0, x); // Assign `x` as the first argument and call->setRet(0, x); // assign `x` as a return value as well. @@ -1304,23 +1337,23 @@ int main(int argc, char* argv[]) { cc.endFunc(); // End of the function body. cc.finalize(); // Translate and assemble the whole `cc` content. - // ----> X86Compiler is no longer needed from here and can be destroyed <---- + // ----> x86::Compiler is no longer needed from here and can be destroyed <---- Fibonacci fib; - Error err = rt.add(&fib, &code); // Add the generated code to the runtime. + Error err = jit.add(&fib, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error returned by AsmJit. // ----> CodeHolder is no longer needed from here and can be destroyed <---- printf("Fib(%u) -> %u\n", 8, fib(8)); // Test the generated code. - rt.release(fib); // RAII, but let's make it explicit. + jit.release(fib); // RAII, but let's make it explicit. return 0; } ``` ### Stack Management -**CodeCompiler** manages function's stack-frame, which is used by the register allocator to spill virtual registers. It also provides an interface to allocate user-defined block of the stack, which can be used as a temporary storage by the generated function. In the following example a stack of 256 bytes size is allocated, filled by bytes starting from 0 to 255 and then iterated again to sum all the values. +**Compiler** manages function's stack-frame, which is used by the register allocator to spill virtual registers. It also provides an interface to allocate user-defined block of the stack, which can be used as a temporary storage by the generated function. In the following example a stack of 256 bytes size is allocated, filled by bytes starting from 0 to 255 and then iterated again to sum all the values. ```c++ #include @@ -1332,19 +1365,19 @@ using namespace asmjit; typedef int (*Func)(void); int main(int argc, char* argv[]) { - JitRuntime rt; // Runtime specialized for JIT code execution. - + JitRuntime jit; // Runtime specialized for JIT code execution. CodeHolder code; // Holds code and relocation information. - code.init(rt.getCodeInfo()); // Initialize to the same arch as JIT runtime. - X86Compiler cc(&code); // Create and attach X86Compiler to `code`. - cc.addFunc(FuncSignature0()); // Create a function that returns 'int'. + code.init(jit.codeInfo()); // Initialize to the same arch as JIT runtime. + x86::Compiler cc(&code); // Create and attach x86::Compiler to `code`. - X86Gp p = cc.newIntPtr("p"); - X86Gp i = cc.newIntPtr("i"); + cc.addFunc(FuncSignatureT()); // Create a function that returns 'int'. - X86Mem stack = cc.newStack(256, 4); // Allocate 256 bytes on the stack aligned to 4 bytes. - X86Mem stackIdx(stack); // Copy of `stack` with `i` added. + x86::Gp p = cc.newIntPtr("p"); + x86::Gp i = cc.newIntPtr("i"); + + x86::Mem stack = cc.newStack(256, 4); // Allocate 256 bytes on the stack aligned to 4 bytes. + x86::Mem stackIdx(stack); // Copy of `stack` with `i` added. stackIdx.setIndex(i); // stackIdx <- stack[i]. stackIdx.setSize(1); // stackIdx <- byte ptr stack[i]. @@ -1367,8 +1400,8 @@ int main(int argc, char* argv[]) { cc.jb(L1); // goto L1; // Second loop, sum all bytes stored in `stack`. - X86Gp sum = cc.newI32("sum"); - X86Gp val = cc.newI32("val"); + x86::Gp sum = cc.newI32("sum"); + x86::Gp val = cc.newI32("val"); cc.xor_(i, i); cc.xor_(sum, sum); @@ -1386,37 +1419,37 @@ int main(int argc, char* argv[]) { cc.endFunc(); // End of the function body. cc.finalize(); // Translate and assemble the whole `cc` content. - // ----> X86Compiler is no longer needed from here and can be destroyed <---- + // ----> x86::Compiler is no longer needed from here and can be destroyed <---- Func func; - Error err = rt.add(&func, &code); // Add the generated code to the runtime. + Error err = jit.add(&func, &code); // Add the generated code to the runtime. if (err) return 1; // Handle a possible error returned by AsmJit. // ----> CodeHolder is no longer needed from here and can be destroyed <---- - printf("Func() -> %d\n", func()); // Test the generated code. + printf("Func() -> %d\n", func()); // Test the generated code. - rt.release(func); // RAII, but let's make it explicit. + jit.release(func); // RAII, but let's make it explicit. return 0; } ``` ### Constant Pool -**CodeCompiler** provides two constant pools for a general purpose code generation - local and global. Local constant pool is related to a single **CCFunc** node and is generally flushed after the function body, and global constant pool is flushed at the end of the generated code by **CodeCompiler::finalize()**. +**Compiler** provides two constant pools for a general purpose code generation - local and global. Local constant pool is related to a single **FuncNode** node and is generally flushed after the function body, and global constant pool is flushed at the end of the generated code by **Compiler::finalize()**. ```c++ #include using namespace asmjit; -static void exampleUseOfConstPool(X86Compiler& cc) { - cc.addFunc(FuncSignature0()); +static void exampleUseOfConstPool(x86::Compiler& cc) { + cc.addFunc(FuncSignatureT()); - X86Gp v0 = cc.newGpd("v0"); - X86Gp v1 = cc.newGpd("v1"); + x86::Gp v0 = cc.newGpd("v0"); + x86::Gp v1 = cc.newGpd("v1"); - X86Mem c0 = cc.newInt32Const(kConstScopeLocal, 200); - X86Mem c1 = cc.newInt32Const(kConstScopeLocal, 33); + x86::Mem c0 = cc.newInt32Const(ConstPool::kScopeLocal, 200); + x86::Mem c1 = cc.newInt32Const(ConstPool::kScopeLocal, 33); cc.mov(v0, c0); cc.mov(v1, c1); @@ -1427,158 +1460,36 @@ static void exampleUseOfConstPool(X86Compiler& cc) { } ``` -### Code Injection - -Both **CodeBuilder** and **CodeCompiler** emitters store their nodes in a double-linked list, which makes it easy to manipulate during the code generation or after it. Each node is always emitted next to the current **cursor** and the cursor is changed to that newly emitted node. Cursor can be explicitly retrieved and assigned by **getCursor()** and **setCursor()**, respectively. - -The following example shows how to inject code at the beginning of the function by providing an **XmmConstInjector** helper class. - -```c++ -#include -#include -#include - -using namespace asmjit; - -// Simple injector that injects `movaps()` to the beginning of the function. -class XmmConstInjector { -public: - struct Slot { - X86Xmm reg; - Data128 value; - }; - - XmmConstInjector(X86Compiler* cc) - : _cc(cc), - _injectTarget(cc->getCursor()) {} - - X86Xmm xmmOf(const Data128& value) { - // First reuse the register if it already holds the given `value`. - for (std::vector::const_iterator it(_slots.begin()); it != _slots.end(); ++it) { - const Slot& slot = *it; - if (::memcmp(&slot.value, &value, sizeof(Data128)) == 0) - return slot.reg; - } - - // Create a new register / value pair and store in `_slots`. - X86Xmm reg = _cc->newXmm("const%u", static_cast(_slots.size())); - - Slot newSlot; - newSlot.value = value; - newSlot.reg = reg; - _slots.push_back(newSlot); - - // Create the constant and inject it after the injectTarget. - X86Mem mem = _cc->newConst(kConstScopeGlobal, &value, 16); - CBNode* saved = _cc->setCursor(_injectTarget); - - _cc->movaps(reg, mem); - // Make sure we inject next load after the load we just emitted. - _injectTarget = _cc->getCursor(); - - // Restore the original cursor, so the code emitting can continue from where it was. - _cc->setCursor(saved); - return reg; - } - - X86Compiler* _cc; - CBNode* _injectTarget; - std::vector _slots; -}; - -// Signature of the generated function. -typedef void (*Func)(uint16_t*); - -int main(int argc, char* argv[]) { - JitRuntime rt; // Runtime specialized for JIT code execution. - - FileLogger logger(stdout); - - CodeHolder code; // Holds code and relocation information. - code.init(rt.getCodeInfo()); // Initialize to the same arch as JIT runtime. - code.setLogger(&logger); - - X86Compiler cc(&code); // Create and attach X86Compiler to `code`. - cc.addFunc( - FuncSignature1()); // Create a function that accepts `uint16_t[]'. - - X86Gp p = cc.newIntPtr("p"); // Create and Assign the function argument `p`. - cc.setArg(0, p); - - XmmConstInjector injector(&cc); // The injector will inject the code |here|. - - X86Xmm x = cc.newXmm("x"); - cc.movups(x, x86::ptr(p)); // Load 16 bytes from `[p]` to `x`. - - // Now use injector to add constants to the constant pool and to inject their loads. - Data128 data0 = Data128::fromU16(0x80); - Data128 data1 = Data128::fromU16(0x13); - - cc.paddw(x, injector.xmmOf(data0)); // x.u16 = x.u16 + 0x80. - cc.pmullw(x, injector.xmmOf(data1)); // x.u16 = x.u16 * 0x13. - cc.movups(x86::ptr(p), x); // Write back to `[p]`. - - cc.endFunc(); // End of the function body. - cc.finalize(); // Translate and assemble the whole `cc` content. - // ----> X86Compiler is no longer needed from here and can be destroyed <---- - - Func func; - Error err = rt.add(&func, &code); // Add the generated code to the runtime. - if (err) return 1; // Handle a possible error returned by AsmJit. - // ----> CodeHolder is no longer needed from here and can be destroyed <---- - - // Test the generated function - uint16_t vec[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; - func(vec); - - for (uint32_t i = 0; i < 8; i++) - printf("%u ", vec[i]); - printf("\n"); - - rt.release(func); // RAII, but let's make it explicit. - return 0; -} -``` - -The code generated would look similar to: - -```x86asm -L0: -movaps xmm0, oword [L2] ; movaps const0, oword [L2] -movaps xmm1, oword [L2+16] ; movaps const1, oword [L2+16] -movups xmm2, [rdi] ; movups x, [p] -paddw xmm2, xmm0 ; paddw x, const0 -pmullw xmm2, xmm1 ; pmullw x, const1 -movups [rdi], xmm2 ; movups [p], x -L1: -ret -.align 16 -L2: -.data 80008000800080008000800080008000 -.data 13001300130013001300130013001300 -``` - -There are many other applications of code injection, usually it's used to lazy-add some initialization code and such, but the application is practically unlimited. Advanced Features ----------------- ### Logging -Failures are common, especially when working at machine-code level. AsmJit does already a good job with function overloading to prevent from emitting semantically incorrect instructions, but it can't prevent from emitting code that is semantically correct, but contains bugs. Logging has always been an important part of AsmJit's infrastructure and looking at logs could become handy when your code doesn't work as expected. +The initial phase of any project that generates machine code is not always smooth. Failure cases are common especially at the beginning of the project and AsmJit provides a logging functionality to address this issue. AsmJit does already a good job with function overloading to prevent from emitting semantically incorrect instructions, but it can't prevent from emitting machine code that is semantically correct, but doesn't work when it's executed. Logging has always been an important part of AsmJit's infrastructure and looking at logs can sometimes reveal code generation issues quickly. -AsmJit's **Logger** provides the following: - * Defines basic logging interface used by AsmJit, - * Allows to reimplement its `Error _log(const char* str, size_t len)` function. - * **FileLogger** implements logging into a C `FILE*` stream. - * **StringLogger** implements logging into AsmJit's `StringBuilder`. +AsmJit provides API for logging and formatting: + * `Logger` - A logger that you can pass to `CodeHolder` and all emitters that inherit `BaseEmitter`. + * `FormatOptions` - Formatting options that can change how instructions and operands are formatted. -**Logger** also contains useful options that control the output and what should be logged: +AsmJit's `Logger` serves the following purposes: + * Provides a basic foundation for logging. + * Abstract class leaving the implementation (destination) on users. Two backends are built-in for simplicity: + * `FileLogger` implements logging into a standard `std::FILE` stream. + * `StringLogger` stores the logged text in `StringBuilder` instance. - * **Logger::kOptionBinaryForm** - Output also binary representation of each instruction. - * **Logger::kOptionImmExtended** - Output meaning of some immediate values. - * **Logger::kOptionHexImmediate** - Display all immediates in hexadecimal. - * **Logger::kOptionHexDisplacement** - Display all offsets in hexadecimal. +AsmJit's `FormatOptions` provides the following to customize the formatting of instructions and operands: + * Flags: + * `FormatOptions::kFlagMachineCode` - Show a machine code of each encoded instruction. + * `FormatOptions::kFlagExplainConsts` - Show a text explanation of some immediate values that are used as predicates. + * `FormatOptions::kFlagHexImms` - Use hexadecimal notation to output immediates. + * `FormatOptions::kFlagHexOffsets` - Use hexadecimal notation to output offsets. + * `FormatOptions::kFlagRegCasts` - Show casts between various register types (compiler). + * `FormatOptions::kFlagPositions` - Show positions associated with nodes (compiler). + * Indentation: + * `FormatOptions::kIndentationCode` - Indentation of instructions and directives. + * `FormatOptions::kIndentationLabel` - Indentation of labels. + * `FormatOptions::kIndentationComment` - Indentation of whole-line comments. **Logger** is typically attached to **CodeHolder** and all attached code emitters automatically use it: @@ -1589,11 +1500,11 @@ AsmJit's **Logger** provides the following: using namespace asmjit; int main(int argc, char* argv[]) { - JitRuntime rt; // Runtime specialized for JIT code execution. + JitRuntime jit; // Runtime specialized for JIT code execution. FileLogger logger(stdout); // Logger should always survive the CodeHolder. CodeHolder code; // Holds code and relocation information. - code.init(rt.getCodeInfo()); // Initialize to the same arch as JIT runtime. + code.init(jit.codeInfo()); // Initialize to the same arch as JIT runtime. code.setLogger(&logger); // Attach the `logger` to `code` holder. // ... code as usual, everything you emit will be logged to `stdout` ... @@ -1604,17 +1515,17 @@ int main(int argc, char* argv[]) { ### Error Handling -AsmJit uses error codes to represent and return errors. Every function where error can occur returns **Error**. Exceptions are never thrown by AsmJit even in extreme conditions like out-of-memory. Errors should never be ignored, however, checking errors after each asmjit API call would simply overcomplicate the whole code generation. To handle these errors AsmJit provides **ErrorHandler**, which contains **handleError()**: +AsmJit uses error codes to represent and return errors. Every function where error can occur returns **Error**. Exceptions are never thrown by AsmJit even in extreme conditions like out-of-memory. Errors should never be ignored, however, checking errors after each asmjit API call would simply overcomplicate the whole code generation experience. To make life simpler AsmJit provides **ErrorHandler**, which provides **handleError()** function: - `virtual bool handleError(Error err, const char* message, CodeEmitter* origin) = 0;` + `virtual bool handleError(Error err, const char* message, BaseEmitter* origin) = 0;` That can be overridden by AsmJit users and do the following: - * 1. Return `true` or `false` from `handleError()`. If `true` is returned it means that error was handled and AsmJit can continue execution. The error code still be propagated to the caller, but the error origin (CodeEmitter) won't be put into an error state (last-error won't be set and `isInErrorState()` would return `true`). However, `false` reports to AsmJit that the error cannot be handled - in such case it stores the error, which can be retrieved later by `getLastError()`. Returning `false` is the default behavior when no error handler is provided. To put the assembler into a non-error state again `resetLastError()` must be called. - * 2. Throw an exception. AsmJit doesn't use exceptions and is completely exception-safe, but you can throw exception from the error handler if this way is easier / preferred by you. Throwing an exception acts virtually as returning `true` - AsmJit won't store the error. - * 3. Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts `Assembler` and `Compiler` to a consistent state before calling the `handleError()` so `longjmp()` can be used without issues to cancel the code-generation if an error occurred. This method can be used if exception handling in your project is turned off and you still want some comfort. In most cases it should be safe as AsmJit is based on Zone memory, so no memory leaks will occur if you jump back to a location where `CodeHolder` still exist. + * 1. Record the error and continue (the way how the error is user-implemented). + * 2. Throw an exception. AsmJit doesn't use exceptions and is completely exception-safe, but it's perfectly legal to throw an exception from the error handler. + * 3. Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts `Assembler` and `Compiler` to a consistent state before calling the `handleError()` so `longjmp()` can be used without issues to cancel the code-generation if an error occurred. This method can be used if exception handling in your project is turned off and you still want some comfort. In most cases it should be safe as AsmJit uses Zone memory and the ownership of memory it allocates always ends with the instance that allocated it. If using this approach please never jump outside the life-time of **CodeHolder** and **BaseEmitter**. -**ErrorHandler** is simply attached to **CodeHolder** and will be used by every emitter attached to it. The first example uses error handler that just prints the error, but lets AsmJit continue: +**ErrorHandler** can be attached to **CodeHolder** and/or **BaseEmitter** (which has a priority). The first example uses error handler that just prints the error, but lets AsmJit continue: ```c++ // Error handling #1: @@ -1623,28 +1534,35 @@ That can be overridden by AsmJit users and do the following: #include // Error handler that just prints the error and lets AsmJit ignore it. -class PrintErrorHandler : public asmjit::ErrorHandler { +class SimpleErrorHandler : public asmjit::ErrorHandler { public: - // Return `true` to set last error to `err`, return `false` to do nothing. - bool handleError(asmjit::Error err, const char* message, asmjit::CodeEmitter* origin) override { + inline SimpleErrorHandler() : lastError(kErrorOk) {} + + void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override { + this->err = err; fprintf(stderr, "ERROR: %s\n", message); - return false; } + + Error err; }; int main(int argc, char* argv[]) { using namespace asmjit; - JitRuntime rt; - PrintErrorHandler eh; + JitRuntime jit; + SimpleErrorHandler eh; CodeHolder code; - code.init(rt.getCodeInfo()); + code.init(jit.codeInfo()); code.setErrorHandler(&eh); // Try to emit instruction that doesn't exist. - X86Assembler a(&code); - a.emit(X86Inst::kIdMov, x86::xmm0, x86::xmm1); + x86::Assembler a(&code); + a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1); + + if (eh.err) { + // Assembler failed! + } return 0; } @@ -1664,19 +1582,19 @@ If error happens during instruction emitting / encoding the assembler behaves tr class AsmJitException : public std::exception { public: AsmJitException(asmjit::Error err, const char* message) noexcept - : error(err), + : err(err), message(message) {} const char* what() const noexcept override { return message.c_str(); } - asmjit::Error error; + asmjit::Error err; std::string message; }; -class ThrowErrorHandler : public asmjit::ErrorHandler { +class ThrowableErrorHandler : public asmjit::ErrorHandler { public: // Throw is possible, functions that use ErrorHandler are never 'noexcept'. - bool handleError(asmjit::Error err, const char* message, asmjit::CodeEmitter* origin) override { + void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override { throw AsmJitException(err, message); } }; @@ -1684,17 +1602,18 @@ public: int main(int argc, char* argv[]) { using namespace asmjit; - JitRuntime rt; - ThrowErrorHandler eh; + JitRuntime jit; + ThrowableErrorHandler eh; CodeHolder code; - code.init(rt.getCodeInfo()); + code.init(jit.codeInfo()); code.setErrorHandler(&eh); + x86::Assembler a(&code); + // Try to emit instruction that doesn't exist. try { - X86Assembler a(&code); - a.emit(X86Inst::kIdMov, x86::xmm0, x86::xmm1); + a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1); } catch (const AsmJitException& ex) { printf("EXCEPTION THROWN: %s\n", ex.what()); @@ -1717,7 +1636,7 @@ class LongJmpErrorHandler : public asmjit::ErrorHandler { public: inline LongJmpErrorHandler() : err(asmjit::kErrorOk) {} - virtual bool handleError(asmjit::Error err, const char* message, asmjit::CodeEmitter* origin) { + void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override { this->err = err; longjmp(state, 1); } @@ -1729,17 +1648,18 @@ public: int main(int argc, char* argv[]) { using namespace asmjit; - JitRuntime rt; + JitRuntime jit; LongJmpErrorHandler eh; CodeHolder code; - code.init(rt.getCodeInfo()); + code.init(jit.codeInfo()); code.setErrorHandler(&eh); - // Try to emit instruction that doesn't exist. - X86Assembler a(&code); + x86::Assembler a(&code); + if (!setjmp(eh.state)) { - a.emit(X86Inst::kIdMov, x86::xmm0, x86::xmm1); + // Try to emit instruction that doesn't exist. + a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1); } else { Error err = eh.err; @@ -1750,10 +1670,183 @@ int main(int argc, char* argv[]) { } ``` +### Code Injection + +Both `Builder` and `Compiler` emitters store their nodes in a double-linked list, which makes it easy to manipulate that list during the code generation or after. Each node is always emitted next to the current `cursor` and the cursor is changed to that newly emitted node. The cursor can be explicitly retrieved and changed by `cursor()` and `setCursor()`, respectively. + +The following example shows how to inject code at the beginning of the function by implementing an `XmmConstInjector` helper class. + +```c++ +``` + ### TODO ...More documentation... + + +Other Topics +------------ + +This section provides quick answers to some recurring questions and topics. + +### Instruction Validation + +AsmJit by default prefers performance when it comes to instruction encoding. The Assembler implementation would only validate operands that must be validated to select a proper encoding of the instruction. This means that by default it would accept instructions that do not really exist like `mov rax, ebx`. This is great in release mode as it makes the assembler faster, however, it's not that great for development as it allows to silently pass even when the instruction's operands are incorrect. To fix this Asmjit contains a feature called **Strict Validation**, which allows to validate each instruction before the Assembler tries to encode it. This feature can also be used without an Assembler instance through `BaseInst::validate()` API. + +Emitter options are configured through CodeHolder: + +```c++ +CodeHolder code; + +// Enables strict instruction validation for all emitters attached to `code`. +code.addEmitterOptions(BaseEmitter::kOptionStrictValidation); + +// Use either ErrorHandler attached to CodeHolder or Error code returned by +// the Assembler. +x86::Assembler a(&code); +Error err = a.emit(x86::Inst::kIdMov, x86::eax, x86::al); +if (err) { /* failed */ } +``` + +### Label Offsets and Links + +When you use a label that is not yet bound the Assembler would create a `LabelLink`, which is then added to CodeHolder's `LabelEntry`. These links are also created for labels that are bound but reference some location in a different section. Firstly, here are some functions that can be used to check some basics: + +```c++ +CodeHolder code = ...; +Label L = ...; + +// Returns whether the Label `L` is bound. +bool bound = code.isLabelBound(L or L.id()); + +// Returns true if the code contains either referenced, but unbound labels, +// or cross-section label links that are not resolved yet. +bool value = code.hasUnresolvedLinks(); // Boolean answer. +size_t count = code.unresolvedLinkCount(); // Count of links. +``` + +Please note that there is not API to return a count of unbound labels as this is completely unimportant from CodeHolder's perspective. If a label is not used then it doesn't matter whether it's bound or not, only used labels matter. After a Label is bound you can query it's offset relative to the start of the section where it was bound: + +```c++ +CodeHolder code = ...; +Label L = ...; + +// After you are done you can check the offset. The offset provided +// is relative to the start of the section, see below for alternative. +// If the given label is not bound then the offset returned will be zero. +uint64_t offset = code.labelOffset(L or L.id()); + +// If you use multiple sections and want the offset relative to the base. +// NOTE: This function expects that the section has already an offset and +// the label-link was resolved (if this is not true you will still get an +// offset relative to the start of the section). +uint64_t offset = code.labelOffsetFromBase(L or L.id()); +``` + +### Sections + +Sections is a relatively new feature that allows to create multiple sections. It's supported by Assembler, Builder, and Compiler. Please note that using multiple sections is advanced and requires more understanding about how AsmJit works. There is a test-case [asmjit_test_x86_sections.cpp](./test/asmjit_test_x86_sections.cpp) that shows how sections can be used. + +```c++ +CodeHolder code = ...; + +// Text section is always provided as the first section. +Section* text = code.textSection(); // or code.sectionById(0); + +// To create another section use `code.newSection()`. +Section* data; +Error err = code.newSection(&data, + ".data", // Section name + SIZE_MAX, // Name length if the name is not null terminated (or SIZE_MAX). + 0, // Section flags, see Section::Flags. + 8); // Section alignment, must be power of 2. + +// When you switch sections in Assembler, Builder, or Compiler the cursor +// will always move to the end of that section. When you create an Assembler +// the cursor would be placed at the end of the first (.text) section, which +// is initially empty. +x86::Assembler a(&code); +Label L_Data = a.newLabel(); + +a.mov(x86::eax, x86::ebx); // Emits in .text section. + +a.section(data); // Switches to the end of .data section. +a.bind(L_Data); // Binds label in this .data section +a.db(0x01); // Emits byte in .data section. + +a.section(text); // Switches to the end of .text section. +a.add(x86::ebx, x86::eax); // Emits in .text section. + +// References a label bound in .data section in .text section. This +// would create a LabelLink even when the L_Data is already bound, +// because the reference crosses sections. See below... +a.lea(x86::rsi, x86::ptr(L_Data)); +``` + +The last line in the example above shows that a LabelLink would be created even for bound labels that cross sections. In this case a referenced label was bound in another section, which means that the link couldn't be resolved at that moment. If your code uses sections, but you wish AsmJit to flatten these sections (you don't plan to flatten then manually) then there is an API for that. + +```c++ +// ... (continuing the previous example) ... +CodeHolder code = ...; + +// Suppose we have some code that contains multiple sections and +// we would like to flatten it by using AsmJit's built-in API: +Error err = code.flatten(); +if (err) { /* Error handling is necessary. */ } + +// After flattening all sections would contain assigned offsets +// relative to base. Offsets are 64-bit unsigned integers so we +// cast it to `unsigned int` for simplicity here... +printf("Data section offset %u", unsigned(data->offset())); + +// The flattening doesn't resolve unresolved label links, this +// has to be done manually as flattening can be done separately. +err = code.resolveUnresolvedLinks(); +if (err) { /* Error handling is necessary. */ } + +if (code.hasUnresolvedLinks()) { + // This would mean either unbound label or some other issue. + printf("FAILED: UnresoledLinkCount=%zu\n", code.unresovedLinkCount()); +} +``` + +### Using AsmJit Data Structures + +AsmJit stores its data in data structures allocated by `ZoneAllocator`. It's a fast allocator that allows AsmJit to allocate a lot of small data structures fast and without `malloc()` overhead. The most common data structure that you will probably inspect is `ZoneVector`. It's like C++'s `std::vector`. but the implementation doesn't use exceptions and uses the mentioned `ZoneAllocator` for performance reasons. You don't have to worry about allocations as you should not need to add items to data structures that are managed by `CodeHolder` or advanced emitters like Builder/Compiler. + +APIs that return `ZoneVector`: + +```c++ +CodeHolder code = ...; + +// Contains all emitters attached to CodeHolder. +const ZoneVector& emitters = code.emitters(); + +// Contains all sections managed by CodeHolder. +const ZoneVector& sections = code.sections(); + +// Contains all LabelEntry records associated with created Labels. +const ZoneVector& labelEntries = code.labelEntries(); + +// Contains all RelocEntry records that describe relocations. +const ZoneVector& relocEntries = code.relocEntries(); +``` + +AsmJit's `ZoneVector` has overloaded array access operator to make it possible accessing its elements through operator[]. Some standard functions like `empty()`, `size()`, and `data()` are provided as well. Vectors are also iterable through range-based for loop: + +```c++ +CodeHolder code = ...; + +for (LabelEntry* le : code.labelEntries()) { + printf("Label #%u {Bound=%s Offset=%llu}", + le->id(), + le->isBound() ? "true" : "false", + (unsigned long long)le->offset()); +} +``` + + Support ------- @@ -1771,6 +1864,8 @@ Donors: * [ZehMatt](https://github.com/ZehMatt) + + Authors & Maintainers --------------------- diff --git a/asmjit.natvis b/asmjit.natvis new file mode 100644 index 0000000..b85320d --- /dev/null +++ b/asmjit.natvis @@ -0,0 +1,54 @@ + + + + + + + + {_small.data, s8} + {_large.data, s8} + + + Small + Large + External + + (int)_small.type, d + _large.size, d + asmjit::String::kSSOCapacity, d + _large.capacity, d + _small.data, s8 + _large.data, s8 + + + + + {{ [size={_size, d} capacity={_capacity, d}] }} + + _size, d + _capacity, d + + _size + (($T1*)_data) + + + + + + + [None] + [Reg] {{ id={_reg.id, d} group={(_reg.signature >> 8) & 0xF, d} type={(_reg.signature >> 4) & 0x1F, d} size={(_reg.signature >> 24) & 0xFF, d} }} + [Mem] {{ }} + [Imm] {{ val={_imm.value.i64, d} hex={_imm.value.u64, X} f64={_imm.value.f64} }} + [Label] {{ id={_label.id} }} + [Unknown] + + _any + _reg + _mem + _imm + _label + _any + + + diff --git a/cxxconfig.js b/cxxconfig.js deleted file mode 100644 index c82eb74..0000000 --- a/cxxconfig.js +++ /dev/null @@ -1,16 +0,0 @@ -module.exports = { - product: "asmjit", - version: "1.0.0", - - prefix: "ASMJIT", - source: "src/asmjit", - - tools: { - NoTabs : true, - NoTrailingLines : true, - NoTrailingSpaces: true, - UnixEOL : true, - SortIncludes : true, - ExpandTemplates : true - } -}; diff --git a/src/asmjit/arm.h b/src/asmjit/arm.h deleted file mode 100644 index 0a916d9..0000000 --- a/src/asmjit/arm.h +++ /dev/null @@ -1,21 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_ARM_H -#define _ASMJIT_ARM_H - -// [Dependencies] -#include "./base.h" - -#include "./arm/armassembler.h" -#include "./arm/armbuilder.h" -#include "./arm/armcompiler.h" -#include "./arm/arminst.h" -#include "./arm/armoperand.h" - -// [Guard] -#endif // _ASMJIT_ARM_H diff --git a/src/asmjit/asmjit.h b/src/asmjit/asmjit.h index ead90f0..4428129 100644 --- a/src/asmjit/asmjit.h +++ b/src/asmjit/asmjit.h @@ -1,47 +1,41 @@ // [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. +// Machine Code Generation for C++. // // [License] // Zlib - See LICENSE.md file in the package. -// [Guard] #ifndef _ASMJIT_ASMJIT_H #define _ASMJIT_ASMJIT_H -// ============================================================================ -// [asmjit_mainpage] -// ============================================================================ - -//! \mainpage +//! \mainpage API Reference //! -//! AsmJit - Complete x86/x64 JIT and Remote Assembler for C++. +//! AsmJit C++ API reference documentation generated by Doxygen. //! //! Introduction provided by the project page at https://github.com/asmjit/asmjit. - -//! \defgroup asmjit_base AsmJit Base API (architecture independent) //! -//! \brief Backend Neutral API. - -//! \defgroup asmjit_x86 AsmJit X86/X64 API +//! \section main_groups Groups //! -//! \brief X86/X64 Backend API. - -//! \defgroup asmjit_arm AsmJit ARM32/ARM64 API +//! The documentation is split into the following groups: //! -//! \brief ARM32/ARM64 Backend API. +//! $$DOCS_GROUP_OVERVIEW$$ +//! +//! \section main_other Other Pages +//! +//! - Class List - List of classes sorted alphabetically +//! - AsmJit Namespace - List of symbols provided by `asmjit` namespace -// [Dependencies] -#include "./base.h" +//! \namespace asmjit +//! +//! Root namespace used by AsmJit. -// [X86/X64] -#if defined(ASMJIT_BUILD_X86) -#include "./x86.h" -#endif // ASMJIT_BUILD_X86 +#include "./core.h" -// [ARM32/ARM64] -#if defined(ASMJIT_BUILD_ARM) -#include "./arm.h" -#endif // ASMJIT_BUILD_ARM +#ifdef ASMJIT_BUILD_X86 + #include "./x86.h" +#endif + +#ifdef ASMJIT_BUILD_ARM + #include "./arm.h" +#endif -// [Guard] #endif // _ASMJIT_ASMJIT_H diff --git a/src/asmjit/asmjit_apibegin.h b/src/asmjit/asmjit_apibegin.h deleted file mode 100644 index 19db2e7..0000000 --- a/src/asmjit/asmjit_apibegin.h +++ /dev/null @@ -1,124 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Dependencies] -#if !defined(_ASMJIT_BUILD_H) -# include "./build.h" -#endif // !_ASMJIT_BUILD_H - -// [Guard] -#if !defined(ASMJIT_API_SCOPE) -# define ASMJIT_API_SCOPE -#else -# error "[asmjit] api-scope is already active, previous scope not closed by asmjit_apiend.h?" -#endif // ASMJIT_API_SCOPE - -// ============================================================================ -// [C++ Support] -// ============================================================================ - -// [NoExcept] -#if !ASMJIT_CC_HAS_NOEXCEPT && !defined(noexcept) -# define noexcept ASMJIT_NOEXCEPT -# define ASMJIT_UNDEF_NOEXCEPT -#endif // !ASMJIT_CC_HAS_NOEXCEPT && !noexcept - -// [NullPtr] -#if !ASMJIT_CC_HAS_NULLPTR && !defined(nullptr) -# define nullptr NULL -# define ASMJIT_UNDEF_NULLPTR -#endif // !ASMJIT_CC_HAS_NULLPTR && !nullptr - -// [Override] -#if !ASMJIT_CC_HAS_OVERRIDE && !defined(override) -# define override -# define ASMJIT_UNDEF_OVERRIDE -#endif // !ASMJIT_CC_HAS_OVERRIDE && !override - -// ============================================================================ -// [Compiler Support] -// ============================================================================ - -// [Clang] -#if ASMJIT_CC_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wc++11-extensions" -# pragma clang diagnostic ignored "-Wconstant-logical-operand" -# pragma clang diagnostic ignored "-Wunnamed-type-template-args" -#endif // ASMJIT_CC_CLANG - -// [GCC] -#if ASMJIT_CC_GCC -# pragma GCC diagnostic push -# if ASMJIT_CC_GCC_GE(7, 0, 0) -# pragma GCC diagnostic ignored "-Wbool-operation" -# endif -# if ASMJIT_CC_GCC_GE(8, 0, 0) -# pragma GCC diagnostic ignored "-Wclass-memaccess" -# endif -#endif // ASMJIT_CC_GCC - -// [MSC] -#if ASMJIT_CC_MSC -# pragma warning(push) -# pragma warning(disable: 4127) // conditional expression is constant -# pragma warning(disable: 4201) // nameless struct/union -# pragma warning(disable: 4244) // '+=' : conversion from 'int' to 'x', possible loss of data -# pragma warning(disable: 4251) // struct needs to have dll-interface to be used by clients of struct ... -# pragma warning(disable: 4275) // non dll-interface struct ... used as base for dll-interface struct -# pragma warning(disable: 4355) // this used in base member initializer list -# pragma warning(disable: 4480) // specifying underlying type for enum -# pragma warning(disable: 4800) // forcing value to bool 'true' or 'false' -# pragma warning(disable: 4838) // comversion from 'int' to ... -# if _MSC_VER < 1900 -# if !defined(vsnprintf) -# define ASMJIT_UNDEF_VSNPRINTF -# define vsnprintf _vsnprintf -# endif // !vsnprintf -# if !defined(snprintf) -# define ASMJIT_UNDEF_SNPRINTF -# define snprintf _snprintf -# endif // !snprintf -# endif -#endif // ASMJIT_CC_MSC - -// ============================================================================ -// [Custom Macros] -// ============================================================================ - -// [ASMJIT_NON...] -#if ASMJIT_CC_HAS_DELETE_FUNCTION -#define ASMJIT_NONCONSTRUCTIBLE(...) \ -private: \ - __VA_ARGS__() = delete; \ - __VA_ARGS__(const __VA_ARGS__& other) = delete; \ - __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \ -public: -#define ASMJIT_NONCOPYABLE(...) \ -private: \ - __VA_ARGS__(const __VA_ARGS__& other) = delete; \ - __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \ -public: -#else -#define ASMJIT_NONCONSTRUCTIBLE(...) \ -private: \ - inline __VA_ARGS__(); \ - inline __VA_ARGS__(const __VA_ARGS__& other); \ - inline __VA_ARGS__& operator=(const __VA_ARGS__& other); \ -public: -#define ASMJIT_NONCOPYABLE(...) \ -private: \ - inline __VA_ARGS__(const __VA_ARGS__& other); \ - inline __VA_ARGS__& operator=(const __VA_ARGS__& other); \ -public: -#endif // ASMJIT_CC_HAS_DELETE_FUNCTION - -// [ASMJIT_ENUM] -#if defined(_MSC_VER) && _MSC_VER >= 1400 -# define ASMJIT_ENUM(NAME) enum NAME : uint32_t -#else -# define ASMJIT_ENUM(NAME) enum NAME -#endif diff --git a/src/asmjit/asmjit_apiend.h b/src/asmjit/asmjit_apiend.h deleted file mode 100644 index a51630b..0000000 --- a/src/asmjit/asmjit_apiend.h +++ /dev/null @@ -1,74 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#if defined(ASMJIT_API_SCOPE) -# undef ASMJIT_API_SCOPE -#else -# error "[asmjit] api-scope not active, forgot to include asmjit_apibegin.h?" -#endif // ASMJIT_API_SCOPE - -// ============================================================================ -// [C++ Support] -// ============================================================================ - -// [NoExcept] -#if defined(ASMJIT_UNDEF_NOEXCEPT) -# undef noexcept -# undef ASMJIT_UNDEF_NOEXCEPT -#endif // ASMJIT_UNDEF_NOEXCEPT - -// [NullPtr] -#if defined(ASMJIT_UNDEF_NULLPTR) -# undef nullptr -# undef ASMJIT_UNDEF_NULLPTR -#endif // ASMJIT_UNDEF_NULLPTR - -// [Override] -#if defined(ASMJIT_UNDEF_OVERRIDE) -# undef override -# undef ASMJIT_UNDEF_OVERRIDE -#endif // ASMJIT_UNDEF_OVERRIDE - -// ============================================================================ -// [Compiler Support] -// ============================================================================ - -// [Clang] -#if ASMJIT_CC_CLANG -# pragma clang diagnostic pop -#endif // ASMJIT_CC_CLANG - -// [GCC] -#if ASMJIT_CC_GCC -# pragma GCC diagnostic pop -#endif // ASMJIT_CC_GCC - -// [MSC] -#if ASMJIT_CC_MSC -# pragma warning(pop) -# if _MSC_VER < 1900 -# if defined(ASMJIT_UNDEF_VSNPRINTF) -# undef vsnprintf -# undef ASMJIT_UNDEF_VSNPRINTF -# endif // ASMJIT_UNDEF_VSNPRINTF -# if defined(ASMJIT_UNDEF_SNPRINTF) -# undef snprintf -# undef ASMJIT_UNDEF_SNPRINTF -# endif // ASMJIT_UNDEF_SNPRINTF -# endif -#endif // ASMJIT_CC_MSC - -// ============================================================================ -// [Custom Macros] -// ============================================================================ - -// [ASMJIT_NON...] -#undef ASMJIT_NONCONSTRUCTIBLE -#undef ASMJIT_NONCOPYABLE - -// [ASMJIT_ENUM] -#undef ASMJIT_ENUM diff --git a/src/asmjit/asmjit_build.h b/src/asmjit/asmjit_build.h deleted file mode 100644 index 77b151a..0000000 --- a/src/asmjit/asmjit_build.h +++ /dev/null @@ -1,949 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BUILD_H -#define _ASMJIT_BUILD_H - -// ============================================================================ -// [asmjit::Build - Configuration] -// ============================================================================ - -// AsmJit is by default compiled only for a host processor for the purpose of -// JIT code generation. Both Assembler and CodeCompiler emitters are compiled -// by default. Preprocessor macros can be used to change the default behavior. - -// External Config File -// -------------------- -// -// Define in case your configuration is generated in an external file to be -// included. - -#if defined(ASMJIT_CONFIG_FILE) -# include ASMJIT_CONFIG_FILE -#endif // ASMJIT_CONFIG_FILE - -// AsmJit Static Builds and Embedding -// ---------------------------------- -// -// These definitions can be used to enable static library build. Embed is used -// when AsmJit's source code is embedded directly in another project, implies -// static build as well. -// -// #define ASMJIT_EMBED // Asmjit is embedded (implies ASMJIT_STATIC). -// #define ASMJIT_STATIC // Define to enable static-library build. - -// AsmJit Build Modes -// ------------------ -// -// These definitions control the build mode and tracing support. The build mode -// should be auto-detected at compile time, but it's possible to override it in -// case that the auto-detection fails. -// -// Tracing is a feature that is never compiled by default and it's only used to -// debug AsmJit itself. -// -// #define ASMJIT_DEBUG // Define to enable debug-mode. -// #define ASMJIT_RELEASE // Define to enable release-mode. - -// AsmJit Build Backends -// --------------------- -// -// These definitions control which backends to compile. If none of these is -// defined AsmJit will use host architecture by default (for JIT code generation). -// -// #define ASMJIT_BUILD_X86 // Define to enable X86 and X64 code-generation. -// #define ASMJIT_BUILD_ARM // Define to enable ARM32 and ARM64 code-generation. -// #define ASMJIT_BUILD_HOST // Define to enable host instruction set. - -// AsmJit Build Features -// --------------------- -// -// Flags can be defined to disable standard features. These are handy especially -// when building AsmJit statically and some features are not needed or unwanted -// (like CodeCompiler). -// -// AsmJit features are enabled by default. -// #define ASMJIT_DISABLE_COMPILER // Disable CodeCompiler (completely). -// #define ASMJIT_DISABLE_LOGGING // Disable logging and formatting (completely). -// #define ASMJIT_DISABLE_TEXT // Disable everything that contains text -// // representation (instructions, errors, ...). -// #define ASMJIT_DISABLE_VALIDATION // Disable Validation (completely). - -// Prevent compile-time errors caused by misconfiguration. -#if defined(ASMJIT_DISABLE_TEXT) && !defined(ASMJIT_DISABLE_LOGGING) -# error "[asmjit] ASMJIT_DISABLE_TEXT requires ASMJIT_DISABLE_LOGGING to be defined." -#endif // ASMJIT_DISABLE_TEXT && !ASMJIT_DISABLE_LOGGING - -// Detect ASMJIT_DEBUG and ASMJIT_RELEASE if not forced from outside. -#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE) -# if !defined(NDEBUG) -# define ASMJIT_DEBUG -# else -# define ASMJIT_RELEASE -# endif -#endif - -// ASMJIT_EMBED implies ASMJIT_STATIC. -#if defined(ASMJIT_EMBED) && !defined(ASMJIT_STATIC) -# define ASMJIT_STATIC -#endif - -// ============================================================================ -// [asmjit::Build - VERSION] -// ============================================================================ - -// [@VERSION{@] -#define ASMJIT_VERSION_MAJOR 1 -#define ASMJIT_VERSION_MINOR 0 -#define ASMJIT_VERSION_PATCH 0 -#define ASMJIT_VERSION_STRING "1.0.0" -// [@VERSION}@] - -// ============================================================================ -// [asmjit::Build - WIN32] -// ============================================================================ - -// [@WIN32_CRT_NO_DEPRECATE{@] -#if defined(_MSC_VER) && defined(ASMJIT_EXPORTS) -# if !defined(_CRT_SECURE_NO_DEPRECATE) -# define _CRT_SECURE_NO_DEPRECATE -# endif -# if !defined(_CRT_SECURE_NO_WARNINGS) -# define _CRT_SECURE_NO_WARNINGS -# endif -#endif -// [@WIN32_CRT_NO_DEPRECATE}@] - -// [@WIN32_LEAN_AND_MEAN{@] -#if (defined(_WIN32) || defined(_WINDOWS)) && !defined(_WINDOWS_) -# if !defined(WIN32_LEAN_AND_MEAN) -# define WIN32_LEAN_AND_MEAN -# define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN -# endif -# if !defined(NOMINMAX) -# define NOMINMAX -# define ASMJIT_UNDEF_NOMINMAX -# endif -# include -# if defined(ASMJIT_UNDEF_NOMINMAX) -# undef NOMINMAX -# undef ASMJIT_UNDEF_NOMINMAX -# endif -# if defined(ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN) -# undef WIN32_LEAN_AND_MEAN -# undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN -# endif -#endif -// [@WIN32_LEAN_AND_MEAN}@] - -// ============================================================================ -// [asmjit::Build - OS] -// ============================================================================ - -// [@OS{@] -#if defined(_WIN32) || defined(_WINDOWS) -#define ASMJIT_OS_WINDOWS (1) -#else -#define ASMJIT_OS_WINDOWS (0) -#endif - -#if defined(__APPLE__) -# include -# define ASMJIT_OS_MAC (TARGET_OS_MAC) -# define ASMJIT_OS_IOS (TARGET_OS_IPHONE) -#else -# define ASMJIT_OS_MAC (0) -# define ASMJIT_OS_IOS (0) -#endif - -#if defined(__ANDROID__) -# define ASMJIT_OS_ANDROID (1) -#else -# define ASMJIT_OS_ANDROID (0) -#endif - -#if defined(__linux__) || defined(__ANDROID__) -# define ASMJIT_OS_LINUX (1) -#else -# define ASMJIT_OS_LINUX (0) -#endif - -#if defined(__DragonFly__) -# define ASMJIT_OS_DRAGONFLYBSD (1) -#else -# define ASMJIT_OS_DRAGONFLYBSD (0) -#endif - -#if defined(__FreeBSD__) -# define ASMJIT_OS_FREEBSD (1) -#else -# define ASMJIT_OS_FREEBSD (0) -#endif - -#if defined(__NetBSD__) -# define ASMJIT_OS_NETBSD (1) -#else -# define ASMJIT_OS_NETBSD (0) -#endif - -#if defined(__OpenBSD__) -# define ASMJIT_OS_OPENBSD (1) -#else -# define ASMJIT_OS_OPENBSD (0) -#endif - -#if defined(__QNXNTO__) -# define ASMJIT_OS_QNX (1) -#else -# define ASMJIT_OS_QNX (0) -#endif - -#if defined(__sun) -# define ASMJIT_OS_SOLARIS (1) -#else -# define ASMJIT_OS_SOLARIS (0) -#endif - -#if defined(__CYGWIN__) -# define ASMJIT_OS_CYGWIN (1) -#else -# define ASMJIT_OS_CYGWIN (0) -#endif - -#define ASMJIT_OS_BSD ( \ - ASMJIT_OS_FREEBSD || \ - ASMJIT_OS_DRAGONFLYBSD || \ - ASMJIT_OS_NETBSD || \ - ASMJIT_OS_OPENBSD || \ - ASMJIT_OS_MAC) -#define ASMJIT_OS_POSIX (!ASMJIT_OS_WINDOWS) -// [@OS}@] - -// ============================================================================ -// [asmjit::Build - ARCH] -// ============================================================================ - -// [@ARCH{@] -// \def ASMJIT_ARCH_ARM32 -// True if the target architecture is a 32-bit ARM. -// -// \def ASMJIT_ARCH_ARM64 -// True if the target architecture is a 64-bit ARM. -// -// \def ASMJIT_ARCH_X86 -// True if the target architecture is a 32-bit X86/IA32 -// -// \def ASMJIT_ARCH_X64 -// True if the target architecture is a 64-bit X64/AMD64 -// -// \def ASMJIT_ARCH_LE -// True if the target architecture is little endian. -// -// \def ASMJIT_ARCH_BE -// True if the target architecture is big endian. -// -// \def ASMJIT_ARCH_64BIT -// True if the target architecture is 64-bit. - -#if (defined(_M_X64 ) || defined(__x86_64) || defined(__x86_64__) || \ - defined(_M_AMD64) || defined(__amd64 ) || defined(__amd64__ )) -# define ASMJIT_ARCH_X64 1 -#else -# define ASMJIT_ARCH_X64 0 -#endif - -#if (defined(_M_IX86 ) || defined(__X86__ ) || defined(__i386 ) || \ - defined(__IA32__) || defined(__I86__ ) || defined(__i386__) || \ - defined(__i486__) || defined(__i586__) || defined(__i686__)) -# define ASMJIT_ARCH_X86 (!ASMJIT_ARCH_X64) -#else -# define ASMJIT_ARCH_X86 0 -#endif - -#if defined(__aarch64__) -# define ASMJIT_ARCH_ARM64 1 -#else -# define ASMJIT_ARCH_ARM64 0 -#endif - -#if (defined(_M_ARM ) || defined(__arm ) || defined(__thumb__ ) || \ - defined(_M_ARMT ) || defined(__arm__ ) || defined(__thumb2__)) -# define ASMJIT_ARCH_ARM32 (!ASMJIT_ARCH_ARM64) -#else -# define ASMJIT_ARCH_ARM32 0 -#endif - -#define ASMJIT_ARCH_LE ( \ - ASMJIT_ARCH_X86 || \ - ASMJIT_ARCH_X64 || \ - ASMJIT_ARCH_ARM32 || \ - ASMJIT_ARCH_ARM64 ) -#define ASMJIT_ARCH_BE (!(ASMJIT_ARCH_LE)) -#define ASMJIT_ARCH_64BIT (ASMJIT_ARCH_X64 || ASMJIT_ARCH_ARM64) -// [@ARCH}@] - -// [@ARCH_UNALIGNED_RW{@] -// \def ASMJIT_ARCH_UNALIGNED_16 -// True if the target architecture allows unaligned 16-bit reads and writes. -// -// \def ASMJIT_ARCH_UNALIGNED_32 -// True if the target architecture allows unaligned 32-bit reads and writes. -// -// \def ASMJIT_ARCH_UNALIGNED_64 -// True if the target architecture allows unaligned 64-bit reads and writes. - -#define ASMJIT_ARCH_UNALIGNED_16 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) -#define ASMJIT_ARCH_UNALIGNED_32 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) -#define ASMJIT_ARCH_UNALIGNED_64 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) -// [@ARCH_UNALIGNED_RW}@] - -// ============================================================================ -// [asmjit::Build - CC] -// ============================================================================ - -// [@CC{@] -// \def ASMJIT_CC_CLANG -// Non-zero if the detected C++ compiler is CLANG (contains normalized CLANG version). -// -// \def ASMJIT_CC_CODEGEAR -// Non-zero if the detected C++ compiler is CODEGEAR or BORLAND (version not normalized). -// -// \def ASMJIT_CC_INTEL -// Non-zero if the detected C++ compiler is INTEL (version not normalized). -// -// \def ASMJIT_CC_GCC -// Non-zero if the detected C++ compiler is GCC (contains normalized GCC version). -// -// \def ASMJIT_CC_MSC -// Non-zero if the detected C++ compiler is MSC (contains normalized MSC version). -// -// \def ASMJIT_CC_MINGW -// Non-zero if the detected C++ compiler is MINGW32 (set to 32) or MINGW64 (set to 64). - -#define ASMJIT_CC_CLANG 0 -#define ASMJIT_CC_CODEGEAR 0 -#define ASMJIT_CC_GCC 0 -#define ASMJIT_CC_INTEL 0 -#define ASMJIT_CC_MSC 0 - -// Intel masquerades as GCC, so check for it first. -#if defined(__INTEL_COMPILER) -# undef ASMJIT_CC_INTEL -# define ASMJIT_CC_INTEL __INTEL_COMPILER -#elif defined(__CODEGEARC__) -# undef ASMJIT_CC_CODEGEAR -# define ASMJIT_CC_CODEGEAR (__CODEGEARC__) -#elif defined(__BORLANDC__) -# undef ASMJIT_CC_CODEGEAR -# define ASMJIT_CC_CODEGEAR (__BORLANDC__) -#elif defined(__clang__) && defined(__clang_minor__) -# undef ASMJIT_CC_CLANG -# define ASMJIT_CC_CLANG (__clang_major__ * 10000000 + __clang_minor__ * 100000 + __clang_patchlevel__) -#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) -# undef ASMJIT_CC_GCC -# define ASMJIT_CC_GCC (__GNUC__ * 10000000 + __GNUC_MINOR__ * 100000 + __GNUC_PATCHLEVEL__) -#elif defined(_MSC_VER) && defined(_MSC_FULL_VER) -# undef ASMJIT_CC_MSC -# if _MSC_VER == _MSC_FULL_VER / 10000 -# define ASMJIT_CC_MSC (_MSC_VER * 100000 + (_MSC_FULL_VER % 10000)) -# else -# define ASMJIT_CC_MSC (_MSC_VER * 100000 + (_MSC_FULL_VER % 100000)) -# endif -#else -# error "[asmjit] Unable to detect the C/C++ compiler." -#endif - -#if ASMJIT_CC_INTEL && (defined(__GNUC__) || defined(__clang__)) -# define ASMJIT_CC_INTEL_COMPAT_MODE 1 -# else -# define ASMJIT_CC_INTEL_COMPAT_MODE 0 -#endif - -#define ASMJIT_CC_CODEGEAR_EQ(x, y) (ASMJIT_CC_CODEGEAR == (((x) << 8) + (y))) -#define ASMJIT_CC_CODEGEAR_GE(x, y) (ASMJIT_CC_CODEGEAR >= (((x) << 8) + (y))) - -#define ASMJIT_CC_CLANG_EQ(x, y, z) (ASMJIT_CC_CLANG == ((x) * 10000000 + (y) * 100000 + (z))) -#define ASMJIT_CC_CLANG_GE(x, y, z) (ASMJIT_CC_CLANG >= ((x) * 10000000 + (y) * 100000 + (z))) - -#define ASMJIT_CC_GCC_EQ(x, y, z) (ASMJIT_CC_GCC == ((x) * 10000000 + (y) * 100000 + (z))) -#define ASMJIT_CC_GCC_GE(x, y, z) (ASMJIT_CC_GCC >= ((x) * 10000000 + (y) * 100000 + (z))) - -#define ASMJIT_CC_INTEL_EQ(x, y) (ASMJIT_CC_INTEL == (((x) * 100) + (y))) -#define ASMJIT_CC_INTEL_GE(x, y) (ASMJIT_CC_INTEL >= (((x) * 100) + (y))) - -#define ASMJIT_CC_MSC_EQ(x, y, z) (ASMJIT_CC_MSC == ((x) * 10000000 + (y) * 100000 + (z))) -#define ASMJIT_CC_MSC_GE(x, y, z) (ASMJIT_CC_MSC >= ((x) * 10000000 + (y) * 100000 + (z))) - -#if defined(__MINGW64__) -# define ASMJIT_CC_MINGW 64 -#elif defined(__MINGW32__) -# define ASMJIT_CC_MINGW 32 -#else -# define ASMJIT_CC_MINGW 0 -#endif - -#if defined(__cplusplus) -# if __cplusplus >= 201103L -# define ASMJIT_CC_CXX_VERSION __cplusplus -# elif defined(__GXX_EXPERIMENTAL_CXX0X__) || ASMJIT_CC_MSC_GE(18, 0, 0) || ASMJIT_CC_INTEL_GE(14, 0) -# define ASMJIT_CC_CXX_VERSION 201103L -# else -# define ASMJIT_CC_CXX_VERSION 199711L -# endif -#endif - -#if !defined(ASMJIT_CC_CXX_VERSION) -# define ASMJIT_CC_CXX_VERSION 0 -#endif -// [@CC}@] - -// [@CC_FEATURES{@] -#if ASMJIT_CC_CLANG -# define ASMJIT_CC_HAS_ATTRIBUTE (1) -# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (__has_attribute(__aligned__)) -# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(__always_inline__)) -# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (__has_attribute(__noinline__)) -# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (__has_attribute(__noreturn__)) -# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (__has_attribute(__optimize__)) -# define ASMJIT_CC_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume)) -# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned)) -# define ASMJIT_CC_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect)) -# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable)) -# define ASMJIT_CC_HAS_ALIGNAS (__has_extension(__cxx_alignas__)) -# define ASMJIT_CC_HAS_ALIGNOF (__has_extension(__cxx_alignof__)) -# define ASMJIT_CC_HAS_CONSTEXPR (__has_extension(__cxx_constexpr__)) -# define ASMJIT_CC_HAS_DECLTYPE (__has_extension(__cxx_decltype__)) -# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (__has_extension(__cxx_defaulted_functions__)) -# define ASMJIT_CC_HAS_DELETE_FUNCTION (__has_extension(__cxx_deleted_functions__)) -# define ASMJIT_CC_HAS_FINAL (__has_extension(__cxx_override_control__)) -# define ASMJIT_CC_HAS_INITIALIZER_LIST (__has_extension(__cxx_generalized_initializers__)) -# define ASMJIT_CC_HAS_LAMBDA (__has_extension(__cxx_lambdas__)) -# define ASMJIT_CC_HAS_NATIVE_CHAR (1) -# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) -# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (__has_extension(__cxx_unicode_literals__)) -# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (__has_extension(__cxx_unicode_literals__)) -# define ASMJIT_CC_HAS_NOEXCEPT (__has_extension(__cxx_noexcept__)) -# define ASMJIT_CC_HAS_NULLPTR (__has_extension(__cxx_nullptr__)) -# define ASMJIT_CC_HAS_OVERRIDE (__has_extension(__cxx_override_control__)) -# define ASMJIT_CC_HAS_RVALUE (__has_extension(__cxx_rvalue_references__)) -# define ASMJIT_CC_HAS_STATIC_ASSERT (__has_extension(__cxx_static_assert__)) -# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (__has_extension(__cxx_variadic_templates__)) -#endif - -#if ASMJIT_CC_CODEGEAR -# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_CODEGEAR >= 0x0610) -# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0) -# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0) -# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_CODEGEAR >= 0x0610) -# define ASMJIT_CC_HAS_ALIGNAS (0) -# define ASMJIT_CC_HAS_ALIGNOF (0) -# define ASMJIT_CC_HAS_CONSTEXPR (0) -# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_CODEGEAR >= 0x0610) -# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (0) -# define ASMJIT_CC_HAS_DELETE_FUNCTION (0) -# define ASMJIT_CC_HAS_FINAL (0) -# define ASMJIT_CC_HAS_INITIALIZER_LIST (0) -# define ASMJIT_CC_HAS_LAMBDA (0) -# define ASMJIT_CC_HAS_NATIVE_CHAR (1) -# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) -# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (0) -# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (0) -# define ASMJIT_CC_HAS_NOEXCEPT (0) -# define ASMJIT_CC_HAS_NULLPTR (0) -# define ASMJIT_CC_HAS_OVERRIDE (0) -# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_CODEGEAR >= 0x0610) -# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_CODEGEAR >= 0x0610) -# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (0) -#endif - -#if ASMJIT_CC_GCC -# define ASMJIT_CC_HAS_ATTRIBUTE (1) -# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_GCC_GE(2, 7, 0)) -# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_GCC_GE(4, 4, 0) && !ASMJIT_CC_MINGW) -# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_GCC_GE(3, 4, 0) && !ASMJIT_CC_MINGW) -# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_GCC_GE(2, 5, 0)) -# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (ASMJIT_CC_GCC_GE(4, 4, 0)) -# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0) -# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (ASMJIT_CC_GCC_GE(4, 7, 0)) -# define ASMJIT_CC_HAS_BUILTIN_EXPECT (1) -# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_NATIVE_CHAR (1) -# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) -# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L) -#endif - -#if ASMJIT_CC_INTEL -# define ASMJIT_CC_HAS_ATTRIBUTE (ASMJIT_CC_INTEL_COMPAT_MODE) -# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_INTEL_COMPAT_MODE) -# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_INTEL_COMPAT_MODE) -# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_INTEL_COMPAT_MODE) -# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_INTEL_COMPAT_MODE) -# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (ASMJIT_CC_INTEL_COMPAT_MODE) -# define ASMJIT_CC_HAS_BUILTIN_EXPECT (ASMJIT_CC_INTEL_COMPAT_MODE) -# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_INTEL_COMPAT_MODE == 0) -# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (ASMJIT_CC_INTEL_COMPAT_MODE == 0) -# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (ASMJIT_CC_INTEL_COMPAT_MODE == 0) -# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_INTEL_COMPAT_MODE == 0) -# define ASMJIT_CC_HAS_ASSUME (1) -# define ASMJIT_CC_HAS_ASSUME_ALIGNED (1) -# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_INTEL >= 1500) -# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_INTEL >= 1500) -# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_INTEL >= 1400) -# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_INTEL >= 1200) -# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_INTEL >= 1200) -# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_INTEL >= 1200) -# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_INTEL >= 1400) -# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_INTEL >= 1400) -# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_INTEL >= 1200) -# define ASMJIT_CC_HAS_NATIVE_CHAR (1) -# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) -# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_INTEL >= 1400 || (ASMJIT_CC_INTEL_COMPAT_MODE > 0 && ASMJIT_CC_INTEL >= 1206)) -# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_INTEL >= 1400 || (ASMJIT_CC_INTEL_COMPAT_MODE > 0 && ASMJIT_CC_INTEL >= 1206)) -# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_INTEL >= 1400) -# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_INTEL >= 1206) -# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_INTEL >= 1400) -# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_INTEL >= 1110) -# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_INTEL >= 1110) -# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_INTEL >= 1206) -#endif - -#if ASMJIT_CC_MSC -# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (1) -# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (1) -# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (1) -# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (1) -# define ASMJIT_CC_HAS_ASSUME (1) -# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0) -# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_MSC_GE(19, 0, 0)) -# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_MSC_GE(19, 0, 0)) -# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_MSC_GE(19, 0, 0)) -# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_MSC_GE(16, 0, 0)) -# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0)) -# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0)) -# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_MSC_GE(14, 0, 0)) -# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_MSC_GE(18, 0, 0)) -# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_MSC_GE(16, 0, 0)) -# define ASMJIT_CC_HAS_NATIVE_CHAR (1) -# if defined(_NATIVE_WCHAR_T_DEFINED) -# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) -# else -# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (0) -# endif -# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_MSC_GE(19, 0, 0)) -# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_MSC_GE(19, 0, 0)) -# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_MSC_GE(19, 0, 0)) -# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_MSC_GE(16, 0, 0)) -# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_MSC_GE(14, 0, 0)) -# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_MSC_GE(16, 0, 0)) -# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_MSC_GE(16, 0, 0)) -# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_MSC_GE(18, 0, 0)) -#endif - -// Fixup some vendor specific keywords. -#if !defined(ASMJIT_CC_HAS_ASSUME) -# define ASMJIT_CC_HAS_ASSUME (0) -#endif -#if !defined(ASMJIT_CC_HAS_ASSUME_ALIGNED) -# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0) -#endif - -// Fixup compilers that don't support '__attribute__'. -#if !defined(ASMJIT_CC_HAS_ATTRIBUTE) -# define ASMJIT_CC_HAS_ATTRIBUTE (0) -#endif -#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED) -# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (0) -#endif -#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE) -# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (0) -#endif -#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE) -# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (0) -#endif -#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_NORETURN) -# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (0) -#endif -#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE) -# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (0) -#endif - -// Fixup compilers that don't support '__builtin?'. -#if !defined(ASMJIT_CC_HAS_BUILTIN_ASSUME) -# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0) -#endif -#if !defined(ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED) -# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (0) -#endif -#if !defined(ASMJIT_CC_HAS_BUILTIN_EXPECT) -# define ASMJIT_CC_HAS_BUILTIN_EXPECT (0) -#endif -#if !defined(ASMJIT_CC_HAS_BUILTIN_UNREACHABLE) -# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (0) -#endif - -// Fixup compilers that don't support 'declspec'. -#if !defined(ASMJIT_CC_HAS_DECLSPEC_ALIGN) -# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (0) -#endif -#if !defined(ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE) -# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0) -#endif -#if !defined(ASMJIT_CC_HAS_DECLSPEC_NOINLINE) -# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0) -#endif -#if !defined(ASMJIT_CC_HAS_DECLSPEC_NORETURN) -# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (0) -#endif -// [@CC_FEATURES}@] - -// [@CC_API{@] -// \def ASMJIT_API -// The decorated function is asmjit API and should be exported. -#if !defined(ASMJIT_API) -# if defined(ASMJIT_STATIC) -# define ASMJIT_API -# elif ASMJIT_OS_WINDOWS -# if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_CC_MINGW -# if defined(ASMJIT_EXPORTS) -# define ASMJIT_API __attribute__((__dllexport__)) -# else -# define ASMJIT_API __attribute__((__dllimport__)) -# endif -# else -# if defined(ASMJIT_EXPORTS) -# define ASMJIT_API __declspec(dllexport) -# else -# define ASMJIT_API __declspec(dllimport) -# endif -# endif -# else -# if ASMJIT_CC_CLANG || ASMJIT_CC_GCC_GE(4, 0, 0) || ASMJIT_CC_INTEL -# define ASMJIT_API __attribute__((__visibility__("default"))) -# endif -# endif -#endif -// [@CC_API}@] - -// [@CC_VARAPI{@] -// \def ASMJIT_VARAPI -// The decorated variable is part of asmjit API and is exported. -#if !defined(ASMJIT_VARAPI) -# define ASMJIT_VARAPI extern ASMJIT_API -#endif -// [@CC_VARAPI}@] - -// [@CC_VIRTAPI{@] -// \def ASMJIT_VIRTAPI -// The decorated class has a virtual table and is part of asmjit API. -// -// This is basically a workaround. When using MSVC and marking class as DLL -// export everything gets exported, which is unwanted in most projects. MSVC -// automatically exports typeinfo and vtable if at least one symbol of the -// class is exported. However, GCC has some strange behavior that even if -// one or more symbol is exported it doesn't export typeinfo unless the -// class itself is decorated with "visibility(default)" (i.e. asmjit_API). -#if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_OS_WINDOWS -# define ASMJIT_VIRTAPI ASMJIT_API -#else -# define ASMJIT_VIRTAPI -#endif -// [@CC_VIRTAPI}@] - -// [@CC_INLINE{@] -// \def ASMJIT_INLINE -// Always inline the decorated function. -#if ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE -# define ASMJIT_INLINE inline __attribute__((__always_inline__)) -#elif ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE -# define ASMJIT_INLINE __forceinline -#else -# define ASMJIT_INLINE inline -#endif -// [@CC_INLINE}@] - -// [@CC_NOINLINE{@] -// \def ASMJIT_NOINLINE -// Never inline the decorated function. -#if ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE -# define ASMJIT_NOINLINE __attribute__((__noinline__)) -#elif ASMJIT_CC_HAS_DECLSPEC_NOINLINE -# define ASMJIT_NOINLINE __declspec(noinline) -#else -# define ASMJIT_NOINLINE -#endif -// [@CC_NOINLINE}@] - -// [@CC_NORETURN{@] -// \def ASMJIT_NORETURN -// The decorated function never returns (exit, assertion failure, etc...). -#if ASMJIT_CC_HAS_ATTRIBUTE_NORETURN -# define ASMJIT_NORETURN __attribute__((__noreturn__)) -#elif ASMJIT_CC_HAS_DECLSPEC_NORETURN -# define ASMJIT_NORETURN __declspec(noreturn) -#else -# define ASMJIT_NORETURN -#endif -// [@CC_NORETURN}@] - -// [@CC_CDECL{@] -// \def ASMJIT_CDECL -// Standard C function calling convention decorator (__cdecl). -#if ASMJIT_ARCH_X86 -# if ASMJIT_CC_HAS_ATTRIBUTE -# define ASMJIT_CDECL __attribute__((__cdecl__)) -# else -# define ASMJIT_CDECL __cdecl -# endif -#else -# define ASMJIT_CDECL -#endif -// [@CC_CDECL}@] - -// [@CC_STDCALL{@] -// \def ASMJIT_STDCALL -// StdCall function calling convention decorator (__stdcall). -#if ASMJIT_ARCH_X86 -# if ASMJIT_CC_HAS_ATTRIBUTE -# define ASMJIT_STDCALL __attribute__((__stdcall__)) -# else -# define ASMJIT_STDCALL __stdcall -# endif -#else -# define ASMJIT_STDCALL -#endif -// [@CC_STDCALL}@] - -// [@CC_FASTCALL{@] -// \def ASMJIT_FASTCALL -// FastCall function calling convention decorator (__fastcall). -#if ASMJIT_ARCH_X86 -# if ASMJIT_CC_HAS_ATTRIBUTE -# define ASMJIT_FASTCALL __attribute__((__fastcall__)) -# else -# define ASMJIT_FASTCALL __fastcall -# endif -#else -# define ASMJIT_FASTCALL -#endif -// [@CC_FASTCALL}@] - -// [@CC_REGPARM{@] -// \def ASMJIT_REGPARM(n) -// A custom calling convention which passes n arguments in registers. -#if ASMJIT_ARCH_X86 && ASMJIT_CC_HAS_ATTRIBUTE -# define ASMJIT_REGPARM(n) __attribute__((__regparm__(n))) -#else -# define ASMJIT_REGPARM(n) -#endif -// [@CC_REGPARM}@] - -// [@CC_NOEXCEPT{@] -// \def ASMJIT_NOEXCEPT -// The decorated function never throws an exception (noexcept). -#if ASMJIT_CC_HAS_NOEXCEPT -# define ASMJIT_NOEXCEPT noexcept -#else -# define ASMJIT_NOEXCEPT -#endif -// [@CC_NOEXCEPT}@] - -// [@CC_NOP{@] -// \def ASMJIT_NOP -// No operation. -#if !defined(ASMJIT_NOP) -# define ASMJIT_NOP ((void)0) -#endif -// [@CC_NOP}@] - -// [@CC_ASSUME{@] -// \def ASMJIT_ASSUME(exp) -// Assume that the expression exp is always true. -#if ASMJIT_CC_HAS_ASSUME -# define ASMJIT_ASSUME(exp) __assume(exp) -#elif ASMJIT_CC_HAS_BUILTIN_ASSUME -# define ASMJIT_ASSUME(exp) __builtin_assume(exp) -#elif ASMJIT_CC_HAS_BUILTIN_UNREACHABLE -# define ASMJIT_ASSUME(exp) do { if (!(exp)) __builtin_unreachable(); } while (0) -#else -# define ASMJIT_ASSUME(exp) ((void)0) -#endif -// [@CC_ASSUME}@] - -// [@CC_ASSUME_ALIGNED{@] -// \def ASMJIT_ASSUME_ALIGNED(p, alignment) -// Assume that the pointer 'p' is aligned to at least 'alignment' bytes. -#if ASMJIT_CC_HAS_ASSUME_ALIGNED -# define ASMJIT_ASSUME_ALIGNED(p, alignment) __assume_aligned(p, alignment) -#elif ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED -# define ASMJIT_ASSUME_ALIGNED(p, alignment) p = __builtin_assume_aligned(p, alignment) -#else -# define ASMJIT_ASSUME_ALIGNED(p, alignment) ((void)0) -#endif -// [@CC_ASSUME_ALIGNED}@] - -// [@CC_EXPECT{@] -// \def ASMJIT_LIKELY(exp) -// Expression exp is likely to be true. -// -// \def ASMJIT_UNLIKELY(exp) -// Expression exp is likely to be false. -#if ASMJIT_CC_HAS_BUILTIN_EXPECT -# define ASMJIT_LIKELY(exp) __builtin_expect(!!(exp), 1) -# define ASMJIT_UNLIKELY(exp) __builtin_expect(!!(exp), 0) -#else -# define ASMJIT_LIKELY(exp) (exp) -# define ASMJIT_UNLIKELY(exp) (exp) -#endif -// [@CC_EXPECT}@] - -// [@CC_FALLTHROUGH{@] -// \def ASMJIT_FALLTHROUGH -// The code falls through annotation (switch / case). -#if ASMJIT_CC_CLANG && __cplusplus >= 201103L -# define ASMJIT_FALLTHROUGH [[clang::fallthrough]] -#else -# define ASMJIT_FALLTHROUGH (void)0 -#endif -// [@CC_FALLTHROUGH}@] - -// [@CC_UNUSED{@] -// \def ASMJIT_UNUSED(x) -// Mark a variable x as unused. -#define ASMJIT_UNUSED(x) (void)(x) -// [@CC_UNUSED}@] - -// [@CC_OFFSET_OF{@] -// \def ASMJIT_OFFSET_OF(x, y). -// Get the offset of a member y of a struct x at compile-time. -#define ASMJIT_OFFSET_OF(x, y) ((int)(intptr_t)((const char*)&((const x*)0x1)->y) - 1) -// [@CC_OFFSET_OF}@] - -// [@CC_ARRAY_SIZE{@] -// \def ASMJIT_ARRAY_SIZE(x) -// Get the array size of x at compile-time. -#define ASMJIT_ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) -// [@CC_ARRAY_SIZE}@] - -// ============================================================================ -// [asmjit::Build - STDTYPES] -// ============================================================================ - -// [@STDTYPES{@] -#if defined(__MINGW32__) || defined(__MINGW64__) -# include -#endif -#if defined(_MSC_VER) && (_MSC_VER < 1600) -# include -# if !defined(ASMJIT_SUPPRESS_STD_TYPES) -# if (_MSC_VER < 1300) -typedef signed char int8_t; -typedef signed short int16_t; -typedef signed int int32_t; -typedef signed __int64 int64_t; -typedef unsigned char uint8_t; -typedef unsigned short uint16_t; -typedef unsigned int uint32_t; -typedef unsigned __int64 uint64_t; -# else -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -# endif -# endif -#else -# include -# include -#endif -// [@STDTYPES}@] - -// ============================================================================ -// [asmjit::Build - Dependencies] -// ============================================================================ - -#include -#include -#include -#include - -#include -#include - -#if ASMJIT_OS_POSIX -# include -#endif // ASMJIT_OS_POSIX - -// ============================================================================ -// [asmjit::Build - Additional] -// ============================================================================ - -// Build host architecture if no architecture is selected. -#if !defined(ASMJIT_BUILD_HOST) && \ - !defined(ASMJIT_BUILD_X86) && \ - !defined(ASMJIT_BUILD_ARM) -# define ASMJIT_BUILD_HOST -#endif - -// Detect host architecture if building only for host. -#if defined(ASMJIT_BUILD_HOST) -# if (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && !defined(ASMJIT_BUILD_X86) -# define ASMJIT_BUILD_X86 -# endif // ASMJIT_ARCH_X86 -#endif // ASMJIT_BUILD_HOST - -#if ASMJIT_CC_MSC -# define ASMJIT_UINT64_C(x) x##ui64 -#else -# define ASMJIT_UINT64_C(x) x##ull -#endif - -#if ASMJIT_ARCH_LE -# define ASMJIT_PACK32_4x8(A, B, C, D) ((A) + ((B) << 8) + ((C) << 16) + ((D) << 24)) -#else -# define ASMJIT_PACK32_4x8(A, B, C, D) ((D) + ((C) << 8) + ((B) << 16) + ((A) << 24)) -#endif - -// Internal macros that are only used when building AsmJit itself. -#if defined(ASMJIT_EXPORTS) -# if !defined(ASMJIT_DEBUG) && ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE -# define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os"))) -# else -# define ASMJIT_FAVOR_SIZE -# endif -#endif // ASMJIT_EXPORTS - -// ============================================================================ -// [asmjit::Build - Test] -// ============================================================================ - -// Include a unit testing package if this is a `asmjit_test` build. -#if defined(ASMJIT_TEST) -# include "../../test/broken.h" -#endif // ASMJIT_TEST - -// [Guard] -#endif // _ASMJIT_BUILD_H diff --git a/src/asmjit/base.h b/src/asmjit/base.h deleted file mode 100644 index 70b7e82..0000000 --- a/src/asmjit/base.h +++ /dev/null @@ -1,34 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_H -#define _ASMJIT_BASE_H - -// [Dependencies] -#include "./base/arch.h" -#include "./base/assembler.h" -#include "./base/codebuilder.h" -#include "./base/codecompiler.h" -#include "./base/codeemitter.h" -#include "./base/codeholder.h" -#include "./base/constpool.h" -#include "./base/cpuinfo.h" -#include "./base/func.h" -#include "./base/globals.h" -#include "./base/inst.h" -#include "./base/logging.h" -#include "./base/operand.h" -#include "./base/osutils.h" -#include "./base/runtime.h" -#include "./base/simdtypes.h" -#include "./base/string.h" -#include "./base/utils.h" -#include "./base/vmem.h" -#include "./base/zone.h" - -// [Guard] -#endif // _ASMJIT_BASE_H diff --git a/src/asmjit/base/arch.cpp b/src/asmjit/base/arch.cpp deleted file mode 100644 index 2e849c6..0000000 --- a/src/asmjit/base/arch.cpp +++ /dev/null @@ -1,161 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/arch.h" - -#if defined(ASMJIT_BUILD_X86) -#include "../x86/x86operand.h" -#endif // ASMJIT_BUILD_X86 - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::ArchInfo] -// ============================================================================ - -static const uint32_t archInfoTable[] = { - // <-------------+---------------------+-----------------------+-------+ - // | Type | SubType | GPInfo| - // <-------------+---------------------+-----------------------+-------+ - ASMJIT_PACK32_4x8(ArchInfo::kTypeNone , ArchInfo::kSubTypeNone, 0, 0), - ASMJIT_PACK32_4x8(ArchInfo::kTypeX86 , ArchInfo::kSubTypeNone, 4, 8), - ASMJIT_PACK32_4x8(ArchInfo::kTypeX64 , ArchInfo::kSubTypeNone, 8, 16), - ASMJIT_PACK32_4x8(ArchInfo::kTypeX32 , ArchInfo::kSubTypeNone, 8, 16), - ASMJIT_PACK32_4x8(ArchInfo::kTypeA32 , ArchInfo::kSubTypeNone, 4, 16), - ASMJIT_PACK32_4x8(ArchInfo::kTypeA64 , ArchInfo::kSubTypeNone, 8, 32) -}; - -ASMJIT_FAVOR_SIZE void ArchInfo::init(uint32_t type, uint32_t subType) noexcept { - uint32_t index = type < ASMJIT_ARRAY_SIZE(archInfoTable) ? type : uint32_t(0); - - // Make sure the `archInfoTable` array is correctly indexed. - _signature = archInfoTable[index]; - ASMJIT_ASSERT(_type == index); - - // Even if the architecture is not known we setup its type and sub-type, - // however, such architecture is not really useful. - _type = type; - _subType = subType; -} - -// ============================================================================ -// [asmjit::ArchUtils] -// ============================================================================ - -ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t archType, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept { - uint32_t typeId = typeIdInOut; - - // Zero the signature so it's clear in case that typeId is not invalid. - regInfo._signature = 0; - -#if defined(ASMJIT_BUILD_X86) - if (ArchInfo::isX86Family(archType)) { - // Passed RegType instead of TypeId? - if (typeId <= Reg::kRegMax) - typeId = x86OpData.archRegs.regTypeToTypeId[typeId]; - - if (ASMJIT_UNLIKELY(!TypeId::isValid(typeId))) - return DebugUtils::errored(kErrorInvalidTypeId); - - // First normalize architecture dependent types. - if (TypeId::isAbstract(typeId)) { - if (typeId == TypeId::kIntPtr) - typeId = (archType == ArchInfo::kTypeX86) ? TypeId::kI32 : TypeId::kI64; - else - typeId = (archType == ArchInfo::kTypeX86) ? TypeId::kU32 : TypeId::kU64; - } - - // Type size helps to construct all kinds of registers. If the size is zero - // then the TypeId is invalid. - uint32_t size = TypeId::sizeOf(typeId); - if (ASMJIT_UNLIKELY(!size)) - return DebugUtils::errored(kErrorInvalidTypeId); - - if (ASMJIT_UNLIKELY(typeId == TypeId::kF80)) - return DebugUtils::errored(kErrorInvalidUseOfF80); - - uint32_t regType = 0; - - switch (typeId) { - case TypeId::kI8: - case TypeId::kU8: - regType = X86Reg::kRegGpbLo; - break; - - case TypeId::kI16: - case TypeId::kU16: - regType = X86Reg::kRegGpw; - break; - - case TypeId::kI32: - case TypeId::kU32: - regType = X86Reg::kRegGpd; - break; - - case TypeId::kI64: - case TypeId::kU64: - if (archType == ArchInfo::kTypeX86) - return DebugUtils::errored(kErrorInvalidUseOfGpq); - - regType = X86Reg::kRegGpq; - break; - - // F32 and F64 are always promoted to use vector registers. - case TypeId::kF32: - typeId = TypeId::kF32x1; - regType = X86Reg::kRegXmm; - break; - - case TypeId::kF64: - typeId = TypeId::kF64x1; - regType = X86Reg::kRegXmm; - break; - - // Mask registers {k}. - case TypeId::kMask8: - case TypeId::kMask16: - case TypeId::kMask32: - case TypeId::kMask64: - regType = X86Reg::kRegK; - break; - - // MMX registers. - case TypeId::kMmx32: - case TypeId::kMmx64: - regType = X86Reg::kRegMm; - break; - - // XMM|YMM|ZMM registers. - default: - if (size <= 16) - regType = X86Reg::kRegXmm; - else if (size == 32) - regType = X86Reg::kRegYmm; - else - regType = X86Reg::kRegZmm; - break; - } - - typeIdInOut = typeId; - regInfo._signature = x86OpData.archRegs.regInfo[regType].getSignature(); - return kErrorOk; - } -#endif // ASMJIT_BUILD_X86 - - return DebugUtils::errored(kErrorInvalidArch); -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/arch.h b/src/asmjit/base/arch.h deleted file mode 100644 index 22e95e3..0000000 --- a/src/asmjit/base/arch.h +++ /dev/null @@ -1,199 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_ARCH_H -#define _ASMJIT_BASE_ARCH_H - -// [Dependencies] -#include "../base/globals.h" -#include "../base/operand.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::ArchInfo] -// ============================================================================ - -class ArchInfo { -public: - //! Architecture type. - ASMJIT_ENUM(Type) { - kTypeNone = 0, //!< No/Unknown architecture. - - // X86 architectures. - kTypeX86 = 1, //!< X86 architecture (32-bit). - kTypeX64 = 2, //!< X64 architecture (64-bit) (AMD64). - kTypeX32 = 3, //!< X32 architecture (DEAD-END). - - // ARM architectures. - kTypeA32 = 4, //!< ARM 32-bit architecture (AArch32/ARM/THUMB). - kTypeA64 = 5, //!< ARM 64-bit architecture (AArch64). - - //! Architecture detected at compile-time (architecture of the host). - kTypeHost = ASMJIT_ARCH_X86 ? kTypeX86 : - ASMJIT_ARCH_X64 ? kTypeX64 : - ASMJIT_ARCH_ARM32 ? kTypeA32 : - ASMJIT_ARCH_ARM64 ? kTypeA64 : kTypeNone - }; - - //! Architecture sub-type or execution mode. - ASMJIT_ENUM(SubType) { - kSubTypeNone = 0, //!< Default mode (or no specific mode). - - // X86 sub-types. - kSubTypeX86_AVX = 1, //!< Code generation uses AVX by default (VEC instructions). - kSubTypeX86_AVX2 = 2, //!< Code generation uses AVX2 by default (VEC instructions). - kSubTypeX86_AVX512 = 3, //!< Code generation uses AVX-512F by default (+32 vector regs). - kSubTypeX86_AVX512VL = 4, //!< Code generation uses AVX-512F-VL by default (+VL extensions). - - // ARM sub-types. - kSubTypeA32_Thumb = 8, //!< THUMB|THUMB2 sub-type (only ARM in 32-bit mode). - -#if (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX512VL__) - kSubTypeHost = kSubTypeX86_AVX512VL -#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX512F__) - kSubTypeHost = kSubTypeX86_AVX512 -#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX2__) - kSubTypeHost = kSubTypeX86_AVX2 -#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX__) - kSubTypeHost = kSubTypeX86_AVX -#elif (ASMJIT_ARCH_ARM32) && (defined(_M_ARMT) || defined(__thumb__) || defined(__thumb2__)) - kSubTypeHost = kSubTypeA32_Thumb -#else - kSubTypeHost = 0 -#endif - }; - - // -------------------------------------------------------------------------- - // [Utilities] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE bool isX86Family(uint32_t archType) noexcept { return archType >= kTypeX86 && archType <= kTypeX32; } - static ASMJIT_INLINE bool isArmFamily(uint32_t archType) noexcept { return archType >= kTypeA32 && archType <= kTypeA64; } - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE ArchInfo() noexcept : _signature(0) {} - ASMJIT_INLINE ArchInfo(const ArchInfo& other) noexcept = default; - explicit ASMJIT_INLINE ArchInfo(uint32_t type, uint32_t subType = kSubTypeNone) noexcept { init(type, subType); } - - ASMJIT_INLINE static ArchInfo host() noexcept { return ArchInfo(kTypeHost, kSubTypeHost); } - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool isInitialized() const noexcept { return _type != kTypeNone; } - - ASMJIT_API void init(uint32_t type, uint32_t subType = kSubTypeNone) noexcept; - ASMJIT_INLINE void reset() noexcept { _signature = 0; } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get if the architecture is 32-bit. - ASMJIT_INLINE bool is32Bit() const noexcept { return _gpSize == 4; } - //! Get if the architecture is 64-bit. - ASMJIT_INLINE bool is64Bit() const noexcept { return _gpSize == 8; } - - //! Get architecture type, see \ref Type. - ASMJIT_INLINE uint32_t getType() const noexcept { return _type; } - - //! Get architecture sub-type, see \ref SubType. - //! - //! X86 & X64 - //! --------- - //! - //! Architecture subtype describe the highest instruction-set level that can - //! be used. - //! - //! ARM32 - //! ----- - //! - //! Architecture mode means the instruction encoding to be used when generating - //! machine code, thus mode can be used to force generation of THUMB and THUMB2 - //! encoding or regular ARM encoding. - //! - //! ARM64 - //! ----- - //! - //! No meaning yet. - ASMJIT_INLINE uint32_t getSubType() const noexcept { return _subType; } - - //! Get if the architecture is X86, X64, or X32. - ASMJIT_INLINE bool isX86Family() const noexcept { return isX86Family(_type); } - //! Get if the architecture is ARM32 or ARM64. - ASMJIT_INLINE bool isArmFamily() const noexcept { return isArmFamily(_type); } - - //! Get a size of a general-purpose register. - ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _gpSize; } - //! Get number of general-purpose registers. - ASMJIT_INLINE uint32_t getGpCount() const noexcept { return _gpCount; } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE ArchInfo& operator=(const ArchInfo& other) noexcept = default; - ASMJIT_INLINE bool operator==(const ArchInfo& other) const noexcept { return _signature == other._signature; } - ASMJIT_INLINE bool operator!=(const ArchInfo& other) const noexcept { return _signature != other._signature; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - union { - struct { - uint8_t _type; //!< Architecture type. - uint8_t _subType; //!< Architecture sub-type. - uint8_t _gpSize; //!< Default size of a general purpose register. - uint8_t _gpCount; //!< Count of all general purpose registers. - }; - uint32_t _signature; //!< Architecture signature (32-bit int). - }; -}; - -// ============================================================================ -// [asmjit::ArchRegs] -// ============================================================================ - -//! Information about all architecture registers. -struct ArchRegs { - //! Register information and signatures indexed by \ref Reg::Type. - RegInfo regInfo[Reg::kRegMax + 1]; - //! Count (maximum) of registers per \ref Reg::Type. - uint8_t regCount[Reg::kRegMax + 1]; - //! Converts RegType to TypeId, see \ref TypeId::Id. - uint8_t regTypeToTypeId[Reg::kRegMax + 1]; -}; - -// ============================================================================ -// [asmjit::ArchUtils] -// ============================================================================ - -struct ArchUtils { - ASMJIT_API static Error typeIdToRegInfo(uint32_t archType, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_ARCH_H diff --git a/src/asmjit/base/assembler.cpp b/src/asmjit/base/assembler.cpp deleted file mode 100644 index 79a2666..0000000 --- a/src/asmjit/base/assembler.cpp +++ /dev/null @@ -1,447 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/assembler.h" -#include "../base/constpool.h" -#include "../base/utils.h" -#include "../base/vmem.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::Assembler - Construction / Destruction] -// ============================================================================ - -Assembler::Assembler() noexcept - : CodeEmitter(kTypeAssembler), - _section(nullptr), - _bufferData(nullptr), - _bufferEnd(nullptr), - _bufferPtr(nullptr), - _op4(), - _op5() {} - -Assembler::~Assembler() noexcept { - if (_code) sync(); -} - -// ============================================================================ -// [asmjit::Assembler - Events] -// ============================================================================ - -Error Assembler::onAttach(CodeHolder* code) noexcept { - // Attach to the end of the .text section. - _section = code->_sections[0]; - uint8_t* p = _section->_buffer._data; - - _bufferData = p; - _bufferEnd = p + _section->_buffer._capacity; - _bufferPtr = p + _section->_buffer._length; - - _op4.reset(); - _op5.reset(); - - return Base::onAttach(code); -} - -Error Assembler::onDetach(CodeHolder* code) noexcept { - _section = nullptr; - _bufferData = nullptr; - _bufferEnd = nullptr; - _bufferPtr = nullptr; - - _op4.reset(); - _op5.reset(); - - return Base::onDetach(code); -} - -// ============================================================================ -// [asmjit::Assembler - Code-Generation] -// ============================================================================ - -Error Assembler::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) { - _op4 = o4; - _op5 = o5; - _options |= kOptionOp4Op5Used; - return _emit(instId, o0, o1, o2, o3); -} - -Error Assembler::_emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) { - const Operand_* op = opArray; - switch (opCount) { - case 0: return _emit(instId, _none, _none, _none, _none); - case 1: return _emit(instId, op[0], _none, _none, _none); - case 2: return _emit(instId, op[0], op[1], _none, _none); - case 3: return _emit(instId, op[0], op[1], op[2], _none); - case 4: return _emit(instId, op[0], op[1], op[2], op[3]); - - case 5: - _op4 = op[4]; - _op5.reset(); - _options |= kOptionOp4Op5Used; - return _emit(instId, op[0], op[1], op[2], op[3]); - - case 6: - _op4 = op[4]; - _op5 = op[5]; - _options |= kOptionOp4Op5Used; - return _emit(instId, op[0], op[1], op[2], op[3]); - - default: - return DebugUtils::errored(kErrorInvalidArgument); - } -} - -// ============================================================================ -// [asmjit::Assembler - Sync] -// ============================================================================ - -void Assembler::sync() noexcept { - ASMJIT_ASSERT(_code != nullptr); // Only called by CodeHolder, so we must be attached. - ASMJIT_ASSERT(_section != nullptr); // One section must always be active, no matter what. - ASMJIT_ASSERT(_bufferData == _section->_buffer._data); // `_bufferStart` is a shortcut to `_section->buffer.data`. - - // Update only if the current offset is greater than the section length. - size_t offset = (size_t)(_bufferPtr - _bufferData); - if (_section->getBuffer().getLength() < offset) - _section->_buffer._length = offset; -} - -// ============================================================================ -// [asmjit::Assembler - Code-Buffer] -// ============================================================================ - -Error Assembler::setOffset(size_t offset) { - if (_lastError) return _lastError; - - size_t length = std::max(_section->getBuffer().getLength(), getOffset()); - if (ASMJIT_UNLIKELY(offset > length)) - return setLastError(DebugUtils::errored(kErrorInvalidArgument)); - - // If the `Assembler` generated any code the `_bufferPtr` may be higher than - // the section length stored in `CodeHolder` as it doesn't update it each - // time it generates machine code. This is the same as calling `sync()`. - if (_section->_buffer._length < length) - _section->_buffer._length = length; - - _bufferPtr = _bufferData + offset; - return kErrorOk; -} - -// ============================================================================ -// [asmjit::Assembler - Comment] -// ============================================================================ - -Error Assembler::comment(const char* s, size_t len) { - if (_lastError) return _lastError; - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (_globalOptions & kOptionLoggingEnabled) { - Logger* logger = _code->getLogger(); - logger->log(s, len); - logger->log("\n", 1); - return kErrorOk; - } -#else - ASMJIT_UNUSED(s); - ASMJIT_UNUSED(len); -#endif - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::Assembler - Building Blocks] -// ============================================================================ - -Label Assembler::newLabel() { - uint32_t id = 0; - if (!_lastError) { - ASMJIT_ASSERT(_code != nullptr); - Error err = _code->newLabelId(id); - if (ASMJIT_UNLIKELY(err)) setLastError(err); - } - return Label(id); -} - -Label Assembler::newNamedLabel(const char* name, size_t nameLength, uint32_t type, uint32_t parentId) { - uint32_t id = 0; - if (!_lastError) { - ASMJIT_ASSERT(_code != nullptr); - Error err = _code->newNamedLabelId(id, name, nameLength, type, parentId); - if (ASMJIT_UNLIKELY(err)) setLastError(err); - } - return Label(id); -} - -Error Assembler::bind(const Label& label) { - if (_lastError) return _lastError; - ASMJIT_ASSERT(_code != nullptr); - - LabelEntry* le = _code->getLabelEntry(label); - if (ASMJIT_UNLIKELY(!le)) - return setLastError(DebugUtils::errored(kErrorInvalidLabel)); - - // Label can be bound only once. - if (ASMJIT_UNLIKELY(le->isBound())) - return setLastError(DebugUtils::errored(kErrorLabelAlreadyBound)); - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (_globalOptions & kOptionLoggingEnabled) { - StringBuilderTmp<256> sb; - if (le->hasName()) - sb.setFormat("%s:", le->getName()); - else - sb.setFormat("L%u:", Operand::unpackId(label.getId())); - - size_t binSize = 0; - if (!_code->_logger->hasOption(Logger::kOptionBinaryForm)) - binSize = Globals::kInvalidIndex; - - Logging::formatLine(sb, nullptr, binSize, 0, 0, getInlineComment()); - _code->_logger->log(sb.getData(), sb.getLength()); - } -#endif // !ASMJIT_DISABLE_LOGGING - - Error err = kErrorOk; - size_t pos = getOffset(); - - LabelLink* link = le->_links; - LabelLink* prev = nullptr; - - while (link) { - intptr_t offset = link->offset; - uint32_t relocId = link->relocId; - - if (relocId != RelocEntry::kInvalidId) { - // Adjust relocation data. - RelocEntry* re = _code->_relocations[relocId]; - re->_data += static_cast(pos); - } - else { - // Not using relocId, this means that we are overwriting a real - // displacement in the CodeBuffer. - int32_t patchedValue = static_cast( - static_cast(pos) - offset + link->rel); - - // Size of the value we are going to patch. Only BYTE/DWORD is allowed. - uint32_t size = _bufferData[offset]; - if (size == 4) - Utils::writeI32u(_bufferData + offset, static_cast(patchedValue)); - else if (size == 1 && Utils::isInt8(patchedValue)) - _bufferData[offset] = static_cast(patchedValue & 0xFF); - else - err = DebugUtils::errored(kErrorInvalidDisplacement); - } - - prev = link->prev; - _code->_unresolvedLabelsCount--; - _code->_baseHeap.release(link, sizeof(LabelLink)); - - link = prev; - } - - // Set as bound. - le->_sectionId = _section->getId(); - le->_offset = pos; - le->_links = nullptr; - resetInlineComment(); - - if (err != kErrorOk) - return setLastError(err); - - return kErrorOk; -} - -Error Assembler::embed(const void* data, uint32_t size) { - if (_lastError) return _lastError; - - if (getRemainingSpace() < size) { - Error err = _code->growBuffer(&_section->_buffer, size); - if (ASMJIT_UNLIKELY(err != kErrorOk)) return setLastError(err); - } - - ::memcpy(_bufferPtr, data, size); - _bufferPtr += size; - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (_globalOptions & kOptionLoggingEnabled) - _code->_logger->logBinary(data, size); -#endif // !ASMJIT_DISABLE_LOGGING - - return kErrorOk; -} - -Error Assembler::embedLabel(const Label& label) { - if (_lastError) return _lastError; - ASMJIT_ASSERT(_code != nullptr); - - RelocEntry* re; - LabelEntry* le = _code->getLabelEntry(label); - - if (ASMJIT_UNLIKELY(!le)) - return setLastError(DebugUtils::errored(kErrorInvalidLabel)); - - Error err; - uint32_t gpSize = getGpSize(); - - if (getRemainingSpace() < gpSize) { - err = _code->growBuffer(&_section->_buffer, gpSize); - if (ASMJIT_UNLIKELY(err)) return setLastError(err); - } - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (_globalOptions & kOptionLoggingEnabled) - _code->_logger->logf(gpSize == 4 ? ".dd L%u\n" : ".dq L%u\n", Operand::unpackId(label.getId())); -#endif // !ASMJIT_DISABLE_LOGGING - - err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, gpSize); - if (ASMJIT_UNLIKELY(err)) return setLastError(err); - - re->_sourceSectionId = _section->getId(); - re->_sourceOffset = static_cast(getOffset()); - - if (le->isBound()) { - re->_targetSectionId = le->getSectionId(); - re->_data = static_cast(static_cast(le->getOffset())); - } - else { - LabelLink* link = _code->newLabelLink(le, _section->getId(), getOffset(), 0); - if (ASMJIT_UNLIKELY(!link)) - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - link->relocId = re->getId(); - } - - // Emit dummy DWORD/QWORD depending on the address size. - ::memset(_bufferPtr, 0, gpSize); - _bufferPtr += gpSize; - - return kErrorOk; -} - -Error Assembler::embedConstPool(const Label& label, const ConstPool& pool) { - if (_lastError) return _lastError; - - if (!isLabelValid(label)) - return DebugUtils::errored(kErrorInvalidLabel); - - ASMJIT_PROPAGATE(align(kAlignData, static_cast(pool.getAlignment()))); - ASMJIT_PROPAGATE(bind(label)); - - size_t size = pool.getSize(); - if (getRemainingSpace() < size) { - Error err = _code->growBuffer(&_section->_buffer, size); - if (ASMJIT_UNLIKELY(err)) return setLastError(err); - } - - uint8_t* p = _bufferPtr; - pool.fill(p); - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (_globalOptions & kOptionLoggingEnabled) - _code->_logger->logBinary(p, size); -#endif // !ASMJIT_DISABLE_LOGGING - - _bufferPtr += size; - return kErrorOk; -} - -// ============================================================================ -// [asmjit::Assembler - Emit-Helpers] -// ============================================================================ - -#if !defined(ASMJIT_DISABLE_LOGGING) -void Assembler::_emitLog( - uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, - uint32_t relSize, uint32_t imLen, uint8_t* afterCursor) { - - Logger* logger = _code->getLogger(); - ASMJIT_ASSERT(logger != nullptr); - ASMJIT_ASSERT(options & CodeEmitter::kOptionLoggingEnabled); - - StringBuilderTmp<256> sb; - uint32_t logOptions = logger->getOptions(); - - uint8_t* beforeCursor = _bufferPtr; - intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor); - - sb.appendString(logger->getIndentation()); - - Operand_ opArray[6]; - opArray[0].copyFrom(o0); - opArray[1].copyFrom(o1); - opArray[2].copyFrom(o2); - opArray[3].copyFrom(o3); - - if (options & kOptionOp4Op5Used) { - opArray[4].copyFrom(_op4); - opArray[5].copyFrom(_op5); - } - else { - opArray[4].reset(); - opArray[5].reset(); - } - - Logging::formatInstruction( - sb, logOptions, - this, getArchType(), - Inst::Detail(instId, options, _extraReg), opArray, 6); - - if ((logOptions & Logger::kOptionBinaryForm) != 0) - Logging::formatLine(sb, _bufferPtr, emittedSize, relSize, imLen, getInlineComment()); - else - Logging::formatLine(sb, nullptr, Globals::kInvalidIndex, 0, 0, getInlineComment()); - - logger->log(sb.getData(), sb.getLength()); -} - -Error Assembler::_emitFailed( - Error err, - uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) { - - StringBuilderTmp<256> sb; - sb.appendString(DebugUtils::errorAsString(err)); - sb.appendString(": "); - - Operand_ opArray[6]; - opArray[0].copyFrom(o0); - opArray[1].copyFrom(o1); - opArray[2].copyFrom(o2); - opArray[3].copyFrom(o3); - - if (options & kOptionOp4Op5Used) { - opArray[4].copyFrom(_op4); - opArray[5].copyFrom(_op5); - } - else { - opArray[4].reset(); - opArray[5].reset(); - } - - Logging::formatInstruction( - sb, 0, - this, getArchType(), - Inst::Detail(instId, options, _extraReg), opArray, 6); - - resetOptions(); - resetExtraReg(); - resetInlineComment(); - return setLastError(err, sb.getData()); -} -#endif - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/assembler.h b/src/asmjit/base/assembler.h deleted file mode 100644 index 55fbb14..0000000 --- a/src/asmjit/base/assembler.h +++ /dev/null @@ -1,154 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_ASSEMBLER_H -#define _ASMJIT_BASE_ASSEMBLER_H - -// [Dependencies] -#include "../base/codeemitter.h" -#include "../base/codeholder.h" -#include "../base/operand.h" -#include "../base/simdtypes.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::Assembler] -// ============================================================================ - -//! Base assembler. -//! -//! This class implements a base interface that is used by architecture -//! specific assemblers. -//! -//! \sa CodeCompiler. -class ASMJIT_VIRTAPI Assembler : public CodeEmitter { -public: - ASMJIT_NONCOPYABLE(Assembler) - typedef CodeEmitter Base; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `Assembler` instance. - ASMJIT_API Assembler() noexcept; - //! Destroy the `Assembler` instance. - ASMJIT_API virtual ~Assembler() noexcept; - - // -------------------------------------------------------------------------- - // [Events] - // -------------------------------------------------------------------------- - - ASMJIT_API Error onAttach(CodeHolder* code) noexcept override; - ASMJIT_API Error onDetach(CodeHolder* code) noexcept override; - - // -------------------------------------------------------------------------- - // [Code-Generation] - // -------------------------------------------------------------------------- - - using CodeEmitter::_emit; - - ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) override; - ASMJIT_API Error _emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) override; - - // -------------------------------------------------------------------------- - // [Code-Buffer] - // -------------------------------------------------------------------------- - - //! Called by \ref CodeHolder::sync(). - ASMJIT_API virtual void sync() noexcept; - - //! Get the capacity of the current CodeBuffer. - ASMJIT_INLINE size_t getBufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); } - //! Get the number of remaining bytes in the current CodeBuffer. - ASMJIT_INLINE size_t getRemainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); } - - //! Get the current position in the CodeBuffer. - ASMJIT_INLINE size_t getOffset() const noexcept { return (size_t)(_bufferPtr - _bufferData); } - //! Set the current position in the CodeBuffer to `offset`. - //! - //! NOTE: The `offset` cannot be outside of the buffer length (even if it's - //! within buffer's capacity). - ASMJIT_API Error setOffset(size_t offset); - - //! Get start of the CodeBuffer of the current section. - ASMJIT_INLINE uint8_t* getBufferData() const noexcept { return _bufferData; } - //! Get end (first invalid byte) of the current section. - ASMJIT_INLINE uint8_t* getBufferEnd() const noexcept { return _bufferEnd; } - //! Get pointer in the CodeBuffer of the current section. - ASMJIT_INLINE uint8_t* getBufferPtr() const noexcept { return _bufferPtr; } - - // -------------------------------------------------------------------------- - // [Code-Generation] - // -------------------------------------------------------------------------- - - ASMJIT_API Label newLabel() override; - ASMJIT_API Label newNamedLabel( - const char* name, - size_t nameLength = Globals::kInvalidIndex, - uint32_t type = Label::kTypeGlobal, - uint32_t parentId = 0) override; - ASMJIT_API Error bind(const Label& label) override; - ASMJIT_API Error embed(const void* data, uint32_t size) override; - ASMJIT_API Error embedLabel(const Label& label) override; - ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override; - ASMJIT_API Error comment(const char* s, size_t len = Globals::kInvalidIndex) override; - - // -------------------------------------------------------------------------- - // [Emit-Helpers] - // -------------------------------------------------------------------------- - -protected: -#if !defined(ASMJIT_DISABLE_LOGGING) - void _emitLog( - uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, - uint32_t relSize, uint32_t imLen, uint8_t* afterCursor); - - Error _emitFailed( - Error err, - uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3); -#else - ASMJIT_INLINE Error _emitFailed( - uint32_t err, - uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) { - - resetOptions(); - resetInlineComment(); - return setLastError(err); - } -#endif - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - -public: - SectionEntry* _section; //!< Current section where the assembling happens. - uint8_t* _bufferData; //!< Start of the CodeBuffer of the current section. - uint8_t* _bufferEnd; //!< End (first invalid byte) of the current section. - uint8_t* _bufferPtr; //!< Pointer in the CodeBuffer of the current section. - - Operand_ _op4; //!< 5th operand data, used only temporarily. - Operand_ _op5; //!< 6th operand data, used only temporarily. -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_ASSEMBLER_H diff --git a/src/asmjit/base/codebuilder.cpp b/src/asmjit/base/codebuilder.cpp deleted file mode 100644 index 1f00248..0000000 --- a/src/asmjit/base/codebuilder.cpp +++ /dev/null @@ -1,584 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Guard] -#include "../asmjit_build.h" -#if !defined(ASMJIT_DISABLE_BUILDER) - -// [Dependencies] -#include "../base/codebuilder.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::CodeBuilder - Construction / Destruction] -// ============================================================================ - -CodeBuilder::CodeBuilder() noexcept - : CodeEmitter(kTypeBuilder), - _cbBaseZone(32768 - Zone::kZoneOverhead), - _cbDataZone(16384 - Zone::kZoneOverhead), - _cbPassZone(32768 - Zone::kZoneOverhead), - _cbHeap(&_cbBaseZone), - _cbPasses(), - _cbLabels(), - _firstNode(nullptr), - _lastNode(nullptr), - _cursor(nullptr), - _position(0), - _nodeFlags(0) {} -CodeBuilder::~CodeBuilder() noexcept {} - -// ============================================================================ -// [asmjit::CodeBuilder - Events] -// ============================================================================ - -Error CodeBuilder::onAttach(CodeHolder* code) noexcept { - return Base::onAttach(code); -} - -Error CodeBuilder::onDetach(CodeHolder* code) noexcept { - _cbPasses.reset(); - _cbLabels.reset(); - _cbHeap.reset(&_cbBaseZone); - - _cbBaseZone.reset(false); - _cbDataZone.reset(false); - _cbPassZone.reset(false); - - _position = 0; - _nodeFlags = 0; - - _firstNode = nullptr; - _lastNode = nullptr; - _cursor = nullptr; - - return Base::onDetach(code); -} - -// ============================================================================ -// [asmjit::CodeBuilder - Node-Factory] -// ============================================================================ - -Error CodeBuilder::getCBLabel(CBLabel** pOut, uint32_t id) noexcept { - if (_lastError) return _lastError; - ASMJIT_ASSERT(_code != nullptr); - - size_t index = Operand::unpackId(id); - if (ASMJIT_UNLIKELY(index >= _code->getLabelsCount())) - return DebugUtils::errored(kErrorInvalidLabel); - - if (index >= _cbLabels.getLength()) - ASMJIT_PROPAGATE(_cbLabels.resize(&_cbHeap, index + 1)); - - CBLabel* node = _cbLabels[index]; - if (!node) { - node = newNodeT(id); - if (ASMJIT_UNLIKELY(!node)) - return DebugUtils::errored(kErrorNoHeapMemory); - _cbLabels[index] = node; - } - - *pOut = node; - return kErrorOk; -} - -Error CodeBuilder::registerLabelNode(CBLabel* node) noexcept { - if (_lastError) return _lastError; - ASMJIT_ASSERT(_code != nullptr); - - // Don't call setLastError() from here, we are noexcept and we are called - // by `newLabelNode()` and `newFuncNode()`, which are noexcept as well. - uint32_t id; - ASMJIT_PROPAGATE(_code->newLabelId(id)); - size_t index = Operand::unpackId(id); - - // We just added one label so it must be true. - ASMJIT_ASSERT(_cbLabels.getLength() < index + 1); - ASMJIT_PROPAGATE(_cbLabels.resize(&_cbHeap, index + 1)); - - _cbLabels[index] = node; - node->_id = id; - return kErrorOk; -} - -CBLabel* CodeBuilder::newLabelNode() noexcept { - CBLabel* node = newNodeT(); - if (!node || registerLabelNode(node) != kErrorOk) - return nullptr; - return node; -} - -CBAlign* CodeBuilder::newAlignNode(uint32_t mode, uint32_t alignment) noexcept { - return newNodeT(mode, alignment); -} - -CBData* CodeBuilder::newDataNode(const void* data, uint32_t size) noexcept { - if (size > CBData::kInlineBufferSize) { - void* cloned = _cbDataZone.alloc(size); - if (!cloned) return nullptr; - - if (data) ::memcpy(cloned, data, size); - data = cloned; - } - - return newNodeT(const_cast(data), size); -} - -CBConstPool* CodeBuilder::newConstPool() noexcept { - CBConstPool* node = newNodeT(); - if (!node || registerLabelNode(node) != kErrorOk) - return nullptr; - return node; -} - -CBComment* CodeBuilder::newCommentNode(const char* s, size_t len) noexcept { - if (s) { - if (len == Globals::kInvalidIndex) len = ::strlen(s); - if (len > 0) { - s = static_cast(_cbDataZone.dup(s, len, true)); - if (!s) return nullptr; - } - } - - return newNodeT(s); -} - -// ============================================================================ -// [asmjit::CodeBuilder - Code-Emitter] -// ============================================================================ - -Label CodeBuilder::newLabel() { - uint32_t id = kInvalidValue; - - if (!_lastError) { - CBLabel* node = newNodeT(id); - if (ASMJIT_UNLIKELY(!node)) { - setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - } - else { - Error err = registerLabelNode(node); - if (ASMJIT_UNLIKELY(err)) - setLastError(err); - else - id = node->getId(); - } - } - - return Label(id); -} - -Label CodeBuilder::newNamedLabel(const char* name, size_t nameLength, uint32_t type, uint32_t parentId) { - uint32_t id = kInvalidValue; - - if (!_lastError) { - CBLabel* node = newNodeT(id); - if (ASMJIT_UNLIKELY(!node)) { - setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - } - else { - Error err = _code->newNamedLabelId(id, name, nameLength, type, parentId); - if (ASMJIT_UNLIKELY(err)) - setLastError(err); - else - id = node->getId(); - } - } - - return Label(id); -} - -Error CodeBuilder::bind(const Label& label) { - if (_lastError) return _lastError; - - CBLabel* node; - Error err = getCBLabel(&node, label); - if (ASMJIT_UNLIKELY(err)) - return setLastError(err); - - addNode(node); - return kErrorOk; -} - -Error CodeBuilder::align(uint32_t mode, uint32_t alignment) { - if (_lastError) return _lastError; - - CBAlign* node = newAlignNode(mode, alignment); - if (ASMJIT_UNLIKELY(!node)) - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - - addNode(node); - return kErrorOk; -} - -Error CodeBuilder::embed(const void* data, uint32_t size) { - if (_lastError) return _lastError; - - CBData* node = newDataNode(data, size); - if (ASMJIT_UNLIKELY(!node)) - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - - addNode(node); - return kErrorOk; -} - -Error CodeBuilder::embedLabel(const Label& label) { - if (_lastError) return _lastError; - - CBLabelData* node = newNodeT(label.getId()); - if (ASMJIT_UNLIKELY(!node)) - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - - addNode(node); - return kErrorOk; -} - -Error CodeBuilder::embedConstPool(const Label& label, const ConstPool& pool) { - if (_lastError) return _lastError; - - if (!isLabelValid(label)) - return setLastError(DebugUtils::errored(kErrorInvalidLabel)); - - ASMJIT_PROPAGATE(align(kAlignData, static_cast(pool.getAlignment()))); - ASMJIT_PROPAGATE(bind(label)); - - CBData* node = newDataNode(nullptr, static_cast(pool.getSize())); - if (ASMJIT_UNLIKELY(!node)) - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - - pool.fill(node->getData()); - addNode(node); - return kErrorOk; -} - -Error CodeBuilder::comment(const char* s, size_t len) { - if (_lastError) return _lastError; - - CBComment* node = newCommentNode(s, len); - if (ASMJIT_UNLIKELY(!node)) - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - - addNode(node); - return kErrorOk; -} - -// ============================================================================ -// [asmjit::CodeBuilder - Node-Management] -// ============================================================================ - -CBNode* CodeBuilder::addNode(CBNode* node) noexcept { - ASMJIT_ASSERT(node); - ASMJIT_ASSERT(node->_prev == nullptr); - ASMJIT_ASSERT(node->_next == nullptr); - - if (!_cursor) { - if (!_firstNode) { - _firstNode = node; - _lastNode = node; - } - else { - node->_next = _firstNode; - _firstNode->_prev = node; - _firstNode = node; - } - } - else { - CBNode* prev = _cursor; - CBNode* next = _cursor->_next; - - node->_prev = prev; - node->_next = next; - - prev->_next = node; - if (next) - next->_prev = node; - else - _lastNode = node; - } - - _cursor = node; - return node; -} - -CBNode* CodeBuilder::addAfter(CBNode* node, CBNode* ref) noexcept { - ASMJIT_ASSERT(node); - ASMJIT_ASSERT(ref); - - ASMJIT_ASSERT(node->_prev == nullptr); - ASMJIT_ASSERT(node->_next == nullptr); - - CBNode* prev = ref; - CBNode* next = ref->_next; - - node->_prev = prev; - node->_next = next; - - prev->_next = node; - if (next) - next->_prev = node; - else - _lastNode = node; - - return node; -} - -CBNode* CodeBuilder::addBefore(CBNode* node, CBNode* ref) noexcept { - ASMJIT_ASSERT(node != nullptr); - ASMJIT_ASSERT(node->_prev == nullptr); - ASMJIT_ASSERT(node->_next == nullptr); - ASMJIT_ASSERT(ref != nullptr); - - CBNode* prev = ref->_prev; - CBNode* next = ref; - - node->_prev = prev; - node->_next = next; - - next->_prev = node; - if (prev) - prev->_next = node; - else - _firstNode = node; - - return node; -} - -static ASMJIT_INLINE void CodeBuilder_nodeRemoved(CodeBuilder* self, CBNode* node_) noexcept { - if (node_->isJmpOrJcc()) { - CBJump* node = static_cast(node_); - CBLabel* label = node->getTarget(); - - if (label) { - // Disconnect. - CBJump** pPrev = &label->_from; - for (;;) { - ASMJIT_ASSERT(*pPrev != nullptr); - - CBJump* current = *pPrev; - if (!current) break; - - if (current == node) { - *pPrev = node->_jumpNext; - break; - } - - pPrev = ¤t->_jumpNext; - } - - label->subNumRefs(); - } - } -} - -CBNode* CodeBuilder::removeNode(CBNode* node) noexcept { - CBNode* prev = node->_prev; - CBNode* next = node->_next; - - if (_firstNode == node) - _firstNode = next; - else - prev->_next = next; - - if (_lastNode == node) - _lastNode = prev; - else - next->_prev = prev; - - node->_prev = nullptr; - node->_next = nullptr; - - if (_cursor == node) - _cursor = prev; - CodeBuilder_nodeRemoved(this, node); - - return node; -} - -void CodeBuilder::removeNodes(CBNode* first, CBNode* last) noexcept { - if (first == last) { - removeNode(first); - return; - } - - CBNode* prev = first->_prev; - CBNode* next = last->_next; - - if (_firstNode == first) - _firstNode = next; - else - prev->_next = next; - - if (_lastNode == last) - _lastNode = prev; - else - next->_prev = prev; - - CBNode* node = first; - for (;;) { - CBNode* next = node->getNext(); - ASMJIT_ASSERT(next != nullptr); - - node->_prev = nullptr; - node->_next = nullptr; - - if (_cursor == node) - _cursor = prev; - CodeBuilder_nodeRemoved(this, node); - - if (node == last) - break; - node = next; - } -} - -CBNode* CodeBuilder::setCursor(CBNode* node) noexcept { - CBNode* old = _cursor; - _cursor = node; - return old; -} - -// ============================================================================ -// [asmjit::CodeBuilder - Passes] -// ============================================================================ - -ASMJIT_FAVOR_SIZE CBPass* CodeBuilder::getPassByName(const char* name) const noexcept { - for (size_t i = 0, len = _cbPasses.getLength(); i < len; i++) { - CBPass* pass = _cbPasses[i]; - if (::strcmp(pass->getName(), name) == 0) - return pass; - } - - return nullptr; -} - -ASMJIT_FAVOR_SIZE Error CodeBuilder::addPass(CBPass* pass) noexcept { - if (ASMJIT_UNLIKELY(pass == nullptr)) { - // Since this is directly called by `addPassT()` we treat `null` argument - // as out-of-memory condition. Otherwise it would be API misuse. - return DebugUtils::errored(kErrorNoHeapMemory); - } - else if (ASMJIT_UNLIKELY(pass->_cb)) { - // Kind of weird, but okay... - if (pass->_cb == this) - return kErrorOk; - return DebugUtils::errored(kErrorInvalidState); - } - - ASMJIT_PROPAGATE(_cbPasses.append(&_cbHeap, pass)); - pass->_cb = this; - return kErrorOk; -} - -ASMJIT_FAVOR_SIZE Error CodeBuilder::deletePass(CBPass* pass) noexcept { - if (ASMJIT_UNLIKELY(pass == nullptr)) - return DebugUtils::errored(kErrorInvalidArgument); - - if (pass->_cb != nullptr) { - if (pass->_cb != this) - return DebugUtils::errored(kErrorInvalidState); - - size_t index = _cbPasses.indexOf(pass); - ASMJIT_ASSERT(index != Globals::kInvalidIndex); - - pass->_cb = nullptr; - _cbPasses.removeAt(index); - } - - pass->~CBPass(); - return kErrorOk; -} - -// ============================================================================ -// [asmjit::CodeBuilder - Serialization] -// ============================================================================ - -Error CodeBuilder::serialize(CodeEmitter* dst) { - Error err = kErrorOk; - CBNode* node_ = getFirstNode(); - - do { - dst->setInlineComment(node_->getInlineComment()); - - switch (node_->getType()) { - case CBNode::kNodeAlign: { - CBAlign* node = static_cast(node_); - err = dst->align(node->getMode(), node->getAlignment()); - break; - } - - case CBNode::kNodeData: { - CBData* node = static_cast(node_); - err = dst->embed(node->getData(), node->getSize()); - break; - } - - case CBNode::kNodeFunc: - case CBNode::kNodeLabel: { - CBLabel* node = static_cast(node_); - err = dst->bind(node->getLabel()); - break; - } - - case CBNode::kNodeLabelData: { - CBLabelData* node = static_cast(node_); - err = dst->embedLabel(node->getLabel()); - break; - } - - case CBNode::kNodeConstPool: { - CBConstPool* node = static_cast(node_); - err = dst->embedConstPool(node->getLabel(), node->getConstPool()); - break; - } - - case CBNode::kNodeInst: - case CBNode::kNodeFuncCall: { - CBInst* node = node_->as(); - dst->setOptions(node->getOptions()); - dst->setExtraReg(node->getExtraReg()); - err = dst->emitOpArray(node->getInstId(), node->getOpArray(), node->getOpCount()); - break; - } - - case CBNode::kNodeComment: { - CBComment* node = static_cast(node_); - err = dst->comment(node->getInlineComment()); - break; - } - - default: - break; - } - - if (err) break; - node_ = node_->getNext(); - } while (node_); - - return err; -} - -// ============================================================================ -// [asmjit::CBPass] -// ============================================================================ - -CBPass::CBPass(const char* name) noexcept - : _cb(nullptr), - _name(name) {} -CBPass::~CBPass() noexcept {} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_BUILDER diff --git a/src/asmjit/base/codebuilder.h b/src/asmjit/base/codebuilder.h deleted file mode 100644 index 231dd84..0000000 --- a/src/asmjit/base/codebuilder.h +++ /dev/null @@ -1,915 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CODEBUILDER_H -#define _ASMJIT_BASE_CODEBUILDER_H - -#include "../asmjit_build.h" -#if !defined(ASMJIT_DISABLE_BUILDER) - -// [Dependencies] -#include "../base/assembler.h" -#include "../base/codeholder.h" -#include "../base/constpool.h" -#include "../base/inst.h" -#include "../base/operand.h" -#include "../base/utils.h" -#include "../base/zone.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [Forward Declarations] -// ============================================================================ - -class CBNode; -class CBPass; - -class CBAlign; -class CBComment; -class CBConstPool; -class CBData; -class CBInst; -class CBJump; -class CBLabel; -class CBLabelData; -class CBSentinel; - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::CodeBuilder] -// ============================================================================ - -class ASMJIT_VIRTAPI CodeBuilder : public CodeEmitter { -public: - ASMJIT_NONCOPYABLE(CodeBuilder) - typedef CodeEmitter Base; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CodeBuilder` instance. - ASMJIT_API CodeBuilder() noexcept; - //! Destroy the `CodeBuilder` instance. - ASMJIT_API virtual ~CodeBuilder() noexcept; - - // -------------------------------------------------------------------------- - // [Events] - // -------------------------------------------------------------------------- - - ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override; - ASMJIT_API virtual Error onDetach(CodeHolder* code) noexcept override; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get a vector of CBPass objects that will be executed by `process()`. - ASMJIT_INLINE const ZoneVector& getPasses() const noexcept { return _cbPasses; } - - //! Get a vector of CBLabel nodes. - //! - //! NOTE: If a label of some index is not associated with `CodeBuilder` it - //! would be null, so always check for nulls if you iterate over the vector. - ASMJIT_INLINE const ZoneVector& getLabels() const noexcept { return _cbLabels; } - - //! Get the first node. - ASMJIT_INLINE CBNode* getFirstNode() const noexcept { return _firstNode; } - //! Get the last node. - ASMJIT_INLINE CBNode* getLastNode() const noexcept { return _lastNode; } - - // -------------------------------------------------------------------------- - // [Node-Management] - // -------------------------------------------------------------------------- - - //! \internal - template - ASMJIT_INLINE T* newNodeT() noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this); } - - //! \internal - template - ASMJIT_INLINE T* newNodeT(P0 p0) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0); } - - //! \internal - template - ASMJIT_INLINE T* newNodeT(P0 p0, P1 p1) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0, p1); } - - //! \internal - template - ASMJIT_INLINE T* newNodeT(P0 p0, P1 p1, P2 p2) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0, p1, p2); } - - ASMJIT_API Error registerLabelNode(CBLabel* node) noexcept; - //! Get `CBLabel` by `id`. - ASMJIT_API Error getCBLabel(CBLabel** pOut, uint32_t id) noexcept; - //! Get `CBLabel` by `label`. - ASMJIT_INLINE Error getCBLabel(CBLabel** pOut, const Label& label) noexcept { return getCBLabel(pOut, label.getId()); } - - //! Create a new \ref CBLabel node. - ASMJIT_API CBLabel* newLabelNode() noexcept; - //! Create a new \ref CBAlign node. - ASMJIT_API CBAlign* newAlignNode(uint32_t mode, uint32_t alignment) noexcept; - //! Create a new \ref CBData node. - ASMJIT_API CBData* newDataNode(const void* data, uint32_t size) noexcept; - //! Create a new \ref CBConstPool node. - ASMJIT_API CBConstPool* newConstPool() noexcept; - //! Create a new \ref CBComment node. - ASMJIT_API CBComment* newCommentNode(const char* s, size_t len) noexcept; - - // -------------------------------------------------------------------------- - // [Code-Emitter] - // -------------------------------------------------------------------------- - - ASMJIT_API virtual Label newLabel() override; - ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameLength = Globals::kInvalidIndex, uint32_t type = Label::kTypeGlobal, uint32_t parentId = kInvalidValue) override; - ASMJIT_API virtual Error bind(const Label& label) override; - ASMJIT_API virtual Error align(uint32_t mode, uint32_t alignment) override; - ASMJIT_API virtual Error embed(const void* data, uint32_t size) override; - ASMJIT_API virtual Error embedLabel(const Label& label) override; - ASMJIT_API virtual Error embedConstPool(const Label& label, const ConstPool& pool) override; - ASMJIT_API virtual Error comment(const char* s, size_t len = Globals::kInvalidIndex) override; - - // -------------------------------------------------------------------------- - // [Node-Management] - // -------------------------------------------------------------------------- - - //! Add `node` after the current and set current to `node`. - ASMJIT_API CBNode* addNode(CBNode* node) noexcept; - //! Insert `node` after `ref`. - ASMJIT_API CBNode* addAfter(CBNode* node, CBNode* ref) noexcept; - //! Insert `node` before `ref`. - ASMJIT_API CBNode* addBefore(CBNode* node, CBNode* ref) noexcept; - //! Remove `node`. - ASMJIT_API CBNode* removeNode(CBNode* node) noexcept; - //! Remove multiple nodes. - ASMJIT_API void removeNodes(CBNode* first, CBNode* last) noexcept; - - //! Get current node. - //! - //! \note If this method returns null it means that nothing has been - //! emitted yet. - ASMJIT_INLINE CBNode* getCursor() const noexcept { return _cursor; } - //! Set the current node without returning the previous node. - ASMJIT_INLINE void _setCursor(CBNode* node) noexcept { _cursor = node; } - //! Set the current node to `node` and return the previous one. - ASMJIT_API CBNode* setCursor(CBNode* node) noexcept; - - // -------------------------------------------------------------------------- - // [Passes] - // -------------------------------------------------------------------------- - - template - ASMJIT_INLINE T* newPassT() noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(); } - template - ASMJIT_INLINE T* newPassT(P0 p0) noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(p0); } - template - ASMJIT_INLINE T* newPassT(P0 p0, P1 p1) noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(p0, p1); } - - template - ASMJIT_INLINE Error addPassT() noexcept { return addPass(newPassT()); } - template - ASMJIT_INLINE Error addPassT(P0 p0) noexcept { return addPass(newPassT(p0)); } - template - ASMJIT_INLINE Error addPassT(P0 p0, P1 p1) noexcept { return addPass(newPassT(p0, p1)); } - - //! Get a `CBPass` by name. - ASMJIT_API CBPass* getPassByName(const char* name) const noexcept; - //! Add `pass` to the list of passes. - ASMJIT_API Error addPass(CBPass* pass) noexcept; - //! Remove `pass` from the list of passes and delete it. - ASMJIT_API Error deletePass(CBPass* pass) noexcept; - - // -------------------------------------------------------------------------- - // [Serialization] - // -------------------------------------------------------------------------- - - ASMJIT_API virtual Error serialize(CodeEmitter* dst); - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - Zone _cbBaseZone; //!< Base zone used to allocate nodes and `CBPass`. - Zone _cbDataZone; //!< Data zone used to allocate data and names. - Zone _cbPassZone; //!< Zone passed to `CBPass::process()`. - ZoneHeap _cbHeap; //!< ZoneHeap that uses `_cbBaseZone`. - - ZoneVector _cbPasses; //!< Array of `CBPass` objects. - ZoneVector _cbLabels; //!< Maps label indexes to `CBLabel` nodes. - - CBNode* _firstNode; //!< First node of the current section. - CBNode* _lastNode; //!< Last node of the current section. - CBNode* _cursor; //!< Current node (cursor). - - uint32_t _position; //!< Flow-id assigned to each new node. - uint32_t _nodeFlags; //!< Flags assigned to each new node. -}; - -// ============================================================================ -// [asmjit::CBPass] -// ============================================================================ - -//! `CodeBuilder` pass used to code transformations, analysis, and lowering. -class ASMJIT_VIRTAPI CBPass { -public: - ASMJIT_NONCOPYABLE(CBPass); - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_API CBPass(const char* name) noexcept; - ASMJIT_API virtual ~CBPass() noexcept; - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - //! Process the code stored in CodeBuffer `cb`. - //! - //! This is the only function that is called by the `CodeBuilder` to process - //! the code. It passes the CodeBuilder itself (`cb`) and also a zone memory - //! allocator `zone`, which will be reset after the `process()` returns. The - //! allocator should be used for all allocations as it's fast and everything - //! it allocates will be released at once when `process()` returns. - virtual Error process(Zone* zone) noexcept = 0; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE const CodeBuilder* cb() const noexcept { return _cb; } - ASMJIT_INLINE const char* getName() const noexcept { return _name; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CodeBuilder* _cb; //!< CodeBuilder this pass is assigned to. - const char* _name; //!< Name of the pass. -}; - -// ============================================================================ -// [asmjit::CBNode] -// ============================================================================ - -//! Node (CodeBuilder). -//! -//! Every node represents a building-block used by \ref CodeBuilder. It can be -//! instruction, data, label, comment, directive, or any other high-level -//! representation that can be transformed to the building blocks mentioned. -//! Every class that inherits \ref CodeBuilder can define its own nodes that it -//! can lower to basic nodes. -class CBNode { -public: - ASMJIT_NONCOPYABLE(CBNode) - - // -------------------------------------------------------------------------- - // [Type] - // -------------------------------------------------------------------------- - - //! Type of \ref CBNode. - ASMJIT_ENUM(NodeType) { - kNodeNone = 0, //!< Invalid node (internal, don't use). - - // [CodeBuilder] - kNodeInst = 1, //!< Node is \ref CBInst or \ref CBJump. - kNodeData = 2, //!< Node is \ref CBData. - kNodeAlign = 3, //!< Node is \ref CBAlign. - kNodeLabel = 4, //!< Node is \ref CBLabel. - kNodeLabelData = 5, //!< Node is \ref CBLabelData. - kNodeConstPool = 6, //!< Node is \ref CBConstPool. - kNodeComment = 7, //!< Node is \ref CBComment. - kNodeSentinel = 8, //!< Node is \ref CBSentinel. - - // [CodeCompiler] - kNodeFunc = 16, //!< Node is \ref CCFunc (considered as \ref CBLabel by \ref CodeBuilder). - kNodeFuncExit = 17, //!< Node is \ref CCFuncRet. - kNodeFuncCall = 18, //!< Node is \ref CCFuncCall. - kNodePushArg = 19, //!< Node is \ref CCPushArg. - kNodeHint = 20, //!< Node is \ref CCHint. - - // [UserDefined] - kNodeUser = 32 //!< First id of a user-defined node. - }; - - // -------------------------------------------------------------------------- - // [Flags] - // -------------------------------------------------------------------------- - - ASMJIT_ENUM(Flags) { - //! The node has been translated by the CodeCompiler. - kFlagIsTranslated = 0x0001, - //! If the node can be safely removed (has no effect). - kFlagIsRemovable = 0x0004, - //! If the node is informative only and can be safely removed. - kFlagIsInformative = 0x0008, - - //! If the `CBInst` is a jump. - kFlagIsJmp = 0x0010, - //! If the `CBInst` is a conditional jump. - kFlagIsJcc = 0x0020, - - //! If the `CBInst` is an unconditional jump or conditional jump that is - //! likely to be taken. - kFlagIsTaken = 0x0040, - - //! If the `CBNode` will return from a function. - //! - //! This flag is used by both `CBSentinel` and `CCFuncRet`. - kFlagIsRet = 0x0080, - - //! Whether the instruction is special. - kFlagIsSpecial = 0x0100, - - //! Whether the instruction is an FPU instruction. - kFlagIsFp = 0x0200 - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new \ref CBNode - always use \ref CodeBuilder to allocate nodes. - ASMJIT_INLINE CBNode(CodeBuilder* cb, uint32_t type) noexcept { - _prev = nullptr; - _next = nullptr; - _type = static_cast(type); - _opCount = 0; - _flags = static_cast(cb->_nodeFlags); - _position = cb->_position; - _inlineComment = nullptr; - _passData = nullptr; - } - //! Destroy the `CBNode` instance (NEVER CALLED). - ASMJIT_INLINE ~CBNode() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - template - ASMJIT_INLINE T* as() noexcept { return static_cast(this); } - template - ASMJIT_INLINE const T* as() const noexcept { return static_cast(this); } - - //! Get previous node in the compiler stream. - ASMJIT_INLINE CBNode* getPrev() const noexcept { return _prev; } - //! Get next node in the compiler stream. - ASMJIT_INLINE CBNode* getNext() const noexcept { return _next; } - - //! Get the node type, see \ref Type. - ASMJIT_INLINE uint32_t getType() const noexcept { return _type; } - //! Get the node flags. - ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; } - - //! Get whether the instruction has flag `flag`. - ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (static_cast(_flags) & flag) != 0; } - //! Set node flags to `flags`. - ASMJIT_INLINE void setFlags(uint32_t flags) noexcept { _flags = static_cast(flags); } - //! Add instruction `flags`. - ASMJIT_INLINE void orFlags(uint32_t flags) noexcept { _flags |= static_cast(flags); } - //! And instruction `flags`. - ASMJIT_INLINE void andFlags(uint32_t flags) noexcept { _flags &= static_cast(flags); } - //! Clear instruction `flags`. - ASMJIT_INLINE void andNotFlags(uint32_t flags) noexcept { _flags &= ~static_cast(flags); } - - //! Get whether the node has been translated. - ASMJIT_INLINE bool isTranslated() const noexcept { return hasFlag(kFlagIsTranslated); } - - //! Get whether the node is removable if it's in unreachable code block. - ASMJIT_INLINE bool isRemovable() const noexcept { return hasFlag(kFlagIsRemovable); } - //! Get whether the node is informative only (comment, hint). - ASMJIT_INLINE bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); } - - //! Whether the node is `CBLabel`. - ASMJIT_INLINE bool isLabel() const noexcept { return _type == kNodeLabel; } - //! Whether the `CBInst` node is an unconditional jump. - ASMJIT_INLINE bool isJmp() const noexcept { return hasFlag(kFlagIsJmp); } - //! Whether the `CBInst` node is a conditional jump. - ASMJIT_INLINE bool isJcc() const noexcept { return hasFlag(kFlagIsJcc); } - //! Whether the `CBInst` node is a conditional/unconditional jump. - ASMJIT_INLINE bool isJmpOrJcc() const noexcept { return hasFlag(kFlagIsJmp | kFlagIsJcc); } - //! Whether the `CBInst` node is a return. - ASMJIT_INLINE bool isRet() const noexcept { return hasFlag(kFlagIsRet); } - - //! Get whether the node is `CBInst` and the instruction is special. - ASMJIT_INLINE bool isSpecial() const noexcept { return hasFlag(kFlagIsSpecial); } - //! Get whether the node is `CBInst` and the instruction uses x87-FPU. - ASMJIT_INLINE bool isFp() const noexcept { return hasFlag(kFlagIsFp); } - - ASMJIT_INLINE bool hasPosition() const noexcept { return _position != 0; } - //! Get flow index. - ASMJIT_INLINE uint32_t getPosition() const noexcept { return _position; } - //! Set flow index. - ASMJIT_INLINE void setPosition(uint32_t position) noexcept { _position = position; } - - //! Get if the node has an inline comment. - ASMJIT_INLINE bool hasInlineComment() const noexcept { return _inlineComment != nullptr; } - //! Get an inline comment string. - ASMJIT_INLINE const char* getInlineComment() const noexcept { return _inlineComment; } - //! Set an inline comment string to `s`. - ASMJIT_INLINE void setInlineComment(const char* s) noexcept { _inlineComment = s; } - //! Set an inline comment string to null. - ASMJIT_INLINE void resetInlineComment() noexcept { _inlineComment = nullptr; } - - //! Get if the node has associated work-data. - ASMJIT_INLINE bool hasPassData() const noexcept { return _passData != nullptr; } - //! Get work-data - data used during processing & transformations. - template - ASMJIT_INLINE T* getPassData() const noexcept { return (T*)_passData; } - //! Set work-data to `data`. - template - ASMJIT_INLINE void setPassData(T* data) noexcept { _passData = (void*)data; } - //! Reset work-data to null. - ASMJIT_INLINE void resetPassData() noexcept { _passData = nullptr; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CBNode* _prev; //!< Previous node. - CBNode* _next; //!< Next node. - - uint8_t _type; //!< Node type, see \ref NodeType. - uint8_t _opCount; //!< Count of operands or zero. - uint16_t _flags; //!< Flags, different meaning for every type of the node. - uint32_t _position; //!< Flow index. - - const char* _inlineComment; //!< Inline comment or null if not used. - void* _passData; //!< Data used exclusively by the current `CBPass`. -}; - -// ============================================================================ -// [asmjit::CBInst] -// ============================================================================ - -//! Instruction (CodeBuilder). -//! -//! Wraps an instruction with its options and operands. -class CBInst : public CBNode { -public: - ASMJIT_NONCOPYABLE(CBInst) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CBInst` instance. - ASMJIT_INLINE CBInst(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept - : CBNode(cb, kNodeInst) { - - orFlags(kFlagIsRemovable); - _instDetail.instId = static_cast(instId); - _instDetail.options = options; - - _opCount = static_cast(opCount); - _opArray = opArray; - - _updateMemOp(); - } - - //! Destroy the `CBInst` instance (NEVER CALLED). - ASMJIT_INLINE ~CBInst() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE Inst::Detail& getInstDetail() noexcept { return _instDetail; } - ASMJIT_INLINE const Inst::Detail& getInstDetail() const noexcept { return _instDetail; } - - //! Get the instruction id, see \ref Inst::Id. - ASMJIT_INLINE uint32_t getInstId() const noexcept { return _instDetail.instId; } - //! Set the instruction id to `instId`, see \ref Inst::Id. - ASMJIT_INLINE void setInstId(uint32_t instId) noexcept { _instDetail.instId = instId; } - - //! Whether the instruction is either a jump or a conditional jump likely to be taken. - ASMJIT_INLINE bool isTaken() const noexcept { return hasFlag(kFlagIsTaken); } - - //! Get emit options. - ASMJIT_INLINE uint32_t getOptions() const noexcept { return _instDetail.options; } - //! Set emit options. - ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _instDetail.options = options; } - //! Add emit options. - ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _instDetail.options |= options; } - //! Mask emit options. - ASMJIT_INLINE void andOptions(uint32_t options) noexcept { _instDetail.options &= options; } - //! Clear emit options. - ASMJIT_INLINE void delOptions(uint32_t options) noexcept { _instDetail.options &= ~options; } - - //! Get if the node has an extra register operand. - ASMJIT_INLINE bool hasExtraReg() const noexcept { return _instDetail.hasExtraReg(); } - //! Get extra register operand. - ASMJIT_INLINE RegOnly& getExtraReg() noexcept { return _instDetail.extraReg; } - //! \overload - ASMJIT_INLINE const RegOnly& getExtraReg() const noexcept { return _instDetail.extraReg; } - //! Set extra register operand to `reg`. - ASMJIT_INLINE void setExtraReg(const Reg& reg) noexcept { _instDetail.extraReg.init(reg); } - //! Set extra register operand to `reg`. - ASMJIT_INLINE void setExtraReg(const RegOnly& reg) noexcept { _instDetail.extraReg.init(reg); } - //! Reset extra register operand. - ASMJIT_INLINE void resetExtraReg() noexcept { _instDetail.extraReg.reset(); } - - //! Get operands count. - ASMJIT_INLINE uint32_t getOpCount() const noexcept { return _opCount; } - //! Get operands list. - ASMJIT_INLINE Operand* getOpArray() noexcept { return _opArray; } - //! \overload - ASMJIT_INLINE const Operand* getOpArray() const noexcept { return _opArray; } - - //! Get whether the instruction contains a memory operand. - ASMJIT_INLINE bool hasMemOp() const noexcept { return _memOpIndex != 0xFF; } - //! Get memory operand. - //! - //! NOTE: Can only be called if the instruction has such operand, - //! see `hasMemOp()`. - ASMJIT_INLINE Mem* getMemOp() const noexcept { - ASMJIT_ASSERT(hasMemOp()); - return static_cast(&_opArray[_memOpIndex]); - } - //! \overload - template - ASMJIT_INLINE T* getMemOp() const noexcept { - ASMJIT_ASSERT(hasMemOp()); - return static_cast(&_opArray[_memOpIndex]); - } - - //! Set memory operand index, `0xFF` means no memory operand. - ASMJIT_INLINE void setMemOpIndex(uint32_t index) noexcept { _memOpIndex = static_cast(index); } - //! Reset memory operand index to `0xFF` (no operand). - ASMJIT_INLINE void resetMemOpIndex() noexcept { _memOpIndex = 0xFF; } - - // -------------------------------------------------------------------------- - // [Utils] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void _updateMemOp() noexcept { - Operand* opArray = getOpArray(); - uint32_t opCount = getOpCount(); - - uint32_t i; - for (i = 0; i < opCount; i++) - if (opArray[i].isMem()) - goto Update; - i = 0xFF; - -Update: - setMemOpIndex(i); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - Inst::Detail _instDetail; //!< Instruction id, options, and extra register. - uint8_t _memOpIndex; //!< \internal - uint8_t _reserved[7]; //!< \internal - Operand* _opArray; //!< Instruction operands. -}; - -// ============================================================================ -// [asmjit::CBInstEx] -// ============================================================================ - -struct CBInstEx : public CBInst { - Operand _op4; - Operand _op5; -}; - -// ============================================================================ -// [asmjit::CBJump] -// ============================================================================ - -//! Asm jump (conditional or direct). -//! -//! Extension of `CBInst` node, which stores more information about the jump. -class CBJump : public CBInst { -public: - ASMJIT_NONCOPYABLE(CBJump) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE CBJump(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept - : CBInst(cb, instId, options, opArray, opCount), - _target(nullptr), - _jumpNext(nullptr) {} - ASMJIT_INLINE ~CBJump() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE CBLabel* getTarget() const noexcept { return _target; } - ASMJIT_INLINE CBJump* getJumpNext() const noexcept { return _jumpNext; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CBLabel* _target; //!< Target node. - CBJump* _jumpNext; //!< Next jump to the same target in a single linked-list. -}; - -// ============================================================================ -// [asmjit::CBData] -// ============================================================================ - -//! Asm data (CodeBuilder). -//! -//! Wraps `.data` directive. The node contains data that will be placed at the -//! node's position in the assembler stream. The data is considered to be RAW; -//! no analysis nor byte-order conversion is performed on RAW data. -class CBData : public CBNode { -public: - ASMJIT_NONCOPYABLE(CBData) - enum { kInlineBufferSize = static_cast(64 - sizeof(CBNode) - 4) }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CBData` instance. - ASMJIT_INLINE CBData(CodeBuilder* cb, void* data, uint32_t size) noexcept : CBNode(cb, kNodeData) { - if (size <= kInlineBufferSize) { - if (data) ::memcpy(_buf, data, size); - } - else { - _externalPtr = static_cast(data); - } - _size = size; - } - - //! Destroy the `CBData` instance (NEVER CALLED). - ASMJIT_INLINE ~CBData() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get size of the data. - uint32_t getSize() const noexcept { return _size; } - //! Get pointer to the data. - uint8_t* getData() const noexcept { return _size <= kInlineBufferSize ? const_cast(_buf) : _externalPtr; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - union { - struct { - uint8_t _buf[kInlineBufferSize]; //!< Embedded data buffer. - uint32_t _size; //!< Size of the data. - }; - struct { - uint8_t* _externalPtr; //!< Pointer to external data. - }; - }; -}; - -// ============================================================================ -// [asmjit::CBAlign] -// ============================================================================ - -//! Align directive (CodeBuilder). -//! -//! Wraps `.align` directive. -class CBAlign : public CBNode { -public: - ASMJIT_NONCOPYABLE(CBAlign) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CBAlign` instance. - ASMJIT_INLINE CBAlign(CodeBuilder* cb, uint32_t mode, uint32_t alignment) noexcept - : CBNode(cb, kNodeAlign), - _mode(mode), - _alignment(alignment) {} - //! Destroy the `CBAlign` instance (NEVER CALLED). - ASMJIT_INLINE ~CBAlign() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get align mode. - ASMJIT_INLINE uint32_t getMode() const noexcept { return _mode; } - //! Set align mode. - ASMJIT_INLINE void setMode(uint32_t mode) noexcept { _mode = mode; } - - //! Get align offset in bytes. - ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; } - //! Set align offset in bytes to `offset`. - ASMJIT_INLINE void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint32_t _mode; //!< Align mode, see \ref AlignMode. - uint32_t _alignment; //!< Alignment (in bytes). -}; - -// ============================================================================ -// [asmjit::CBLabel] -// ============================================================================ - -//! Label (CodeBuilder). -class CBLabel : public CBNode { -public: - ASMJIT_NONCOPYABLE(CBLabel) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CBLabel` instance. - ASMJIT_INLINE CBLabel(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept - : CBNode(cb, kNodeLabel), - _id(id), - _numRefs(0), - _from(nullptr) {} - //! Destroy the `CBLabel` instance (NEVER CALLED). - ASMJIT_INLINE ~CBLabel() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the label id. - ASMJIT_INLINE uint32_t getId() const noexcept { return _id; } - //! Get the label as `Label` operand. - ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); } - - //! Get first jmp instruction. - ASMJIT_INLINE CBJump* getFrom() const noexcept { return _from; } - - //! Get number of jumps to this target. - ASMJIT_INLINE uint32_t getNumRefs() const noexcept { return _numRefs; } - //! Set number of jumps to this target. - ASMJIT_INLINE void setNumRefs(uint32_t i) noexcept { _numRefs = i; } - - //! Add number of jumps to this target. - ASMJIT_INLINE void addNumRefs(uint32_t i = 1) noexcept { _numRefs += i; } - //! Subtract number of jumps to this target. - ASMJIT_INLINE void subNumRefs(uint32_t i = 1) noexcept { _numRefs -= i; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint32_t _id; //!< Label id. - uint32_t _numRefs; //!< Count of jumps here. - CBJump* _from; //!< Linked-list of nodes that can jump here. -}; - -// ============================================================================ -// [asmjit::CBLabelData] -// ============================================================================ - -class CBLabelData : public CBNode { -public: - ASMJIT_NONCOPYABLE(CBLabelData) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CBLabelData` instance. - ASMJIT_INLINE CBLabelData(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept - : CBNode(cb, kNodeLabelData), - _id(id) {} - - //! Destroy the `CBLabelData` instance (NEVER CALLED). - ASMJIT_INLINE ~CBLabelData() noexcept {} - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - //! Get the label id. - ASMJIT_INLINE uint32_t getId() const noexcept { return _id; } - //! Get the label as `Label` operand. - ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint32_t _id; -}; - -// ============================================================================ -// [asmjit::CBConstPool] -// ============================================================================ - -class CBConstPool : public CBLabel { -public: - ASMJIT_NONCOPYABLE(CBConstPool) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CBConstPool` instance. - ASMJIT_INLINE CBConstPool(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept - : CBLabel(cb, id), - _constPool(&cb->_cbBaseZone) { _type = kNodeConstPool; } - - //! Destroy the `CBConstPool` instance (NEVER CALLED). - ASMJIT_INLINE ~CBConstPool() noexcept {} - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE ConstPool& getConstPool() noexcept { return _constPool; } - ASMJIT_INLINE const ConstPool& getConstPool() const noexcept { return _constPool; } - - //! Get whether the constant-pool is empty. - ASMJIT_INLINE bool isEmpty() const noexcept { return _constPool.isEmpty(); } - //! Get the size of the constant-pool in bytes. - ASMJIT_INLINE size_t getSize() const noexcept { return _constPool.getSize(); } - //! Get minimum alignment. - ASMJIT_INLINE size_t getAlignment() const noexcept { return _constPool.getAlignment(); } - - //! See \ref ConstPool::add(). - ASMJIT_INLINE Error add(const void* data, size_t size, size_t& dstOffset) noexcept { - return _constPool.add(data, size, dstOffset); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - ConstPool _constPool; -}; - -// ============================================================================ -// [asmjit::CBComment] -// ============================================================================ - -//! Comment (CodeBuilder). -class CBComment : public CBNode { -public: - ASMJIT_NONCOPYABLE(CBComment) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CBComment` instance. - ASMJIT_INLINE CBComment(CodeBuilder* cb, const char* comment) noexcept : CBNode(cb, kNodeComment) { - orFlags(kFlagIsRemovable | kFlagIsInformative); - _inlineComment = comment; - } - - //! Destroy the `CBComment` instance (NEVER CALLED). - ASMJIT_INLINE ~CBComment() noexcept {} -}; - -// ============================================================================ -// [asmjit::CBSentinel] -// ============================================================================ - -//! Sentinel (CodeBuilder). -//! -//! Sentinel is a marker that is completely ignored by the code builder. It's -//! used to remember a position in a code as it never gets removed by any pass. -class CBSentinel : public CBNode { -public: - ASMJIT_NONCOPYABLE(CBSentinel) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CBSentinel` instance. - ASMJIT_INLINE CBSentinel(CodeBuilder* cb) noexcept : CBNode(cb, kNodeSentinel) {} - //! Destroy the `CBSentinel` instance (NEVER CALLED). - ASMJIT_INLINE ~CBSentinel() noexcept {} -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_BUILDER -#endif // _ASMJIT_BASE_CODEBUILDER_H diff --git a/src/asmjit/base/codecompiler.cpp b/src/asmjit/base/codecompiler.cpp deleted file mode 100644 index 582e94a..0000000 --- a/src/asmjit/base/codecompiler.cpp +++ /dev/null @@ -1,573 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Guard] -#include "../asmjit_build.h" -#if !defined(ASMJIT_DISABLE_COMPILER) - -// [Dependencies] -#include "../base/assembler.h" -#include "../base/codecompiler.h" -#include "../base/cpuinfo.h" -#include "../base/logging.h" -#include "../base/regalloc_p.h" -#include "../base/utils.h" -#include - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [Constants] -// ============================================================================ - -static const char noName[1] = { '\0' }; - -// ============================================================================ -// [asmjit::CCFuncCall - Arg / Ret] -// ============================================================================ - -bool CCFuncCall::_setArg(uint32_t i, const Operand_& op) noexcept { - if ((i & ~kFuncArgHi) >= _funcDetail.getArgCount()) - return false; - - _args[i] = op; - return true; -} - -bool CCFuncCall::_setRet(uint32_t i, const Operand_& op) noexcept { - if (i >= 2) - return false; - - _ret[i] = op; - return true; -} - -// ============================================================================ -// [asmjit::CodeCompiler - Construction / Destruction] -// ============================================================================ - -CodeCompiler::CodeCompiler() noexcept - : CodeBuilder(), - _func(nullptr), - _vRegZone(4096 - Zone::kZoneOverhead), - _vRegArray(), - _localConstPool(nullptr), - _globalConstPool(nullptr) { - - _type = kTypeCompiler; -} -CodeCompiler::~CodeCompiler() noexcept {} - -// ============================================================================ -// [asmjit::CodeCompiler - Events] -// ============================================================================ - -Error CodeCompiler::onAttach(CodeHolder* code) noexcept { - return Base::onAttach(code); -} - -Error CodeCompiler::onDetach(CodeHolder* code) noexcept { - _func = nullptr; - - _localConstPool = nullptr; - _globalConstPool = nullptr; - - _vRegArray.reset(); - _vRegZone.reset(false); - - return Base::onDetach(code); -} - -// ============================================================================ -// [asmjit::CodeCompiler - Node-Factory] -// ============================================================================ - -CCHint* CodeCompiler::newHintNode(Reg& r, uint32_t hint, uint32_t value) noexcept { - if (!r.isVirtReg()) return nullptr; - - VirtReg* vr = getVirtReg(r); - return newNodeT(vr, hint, value); -} - -// ============================================================================ -// [asmjit::CodeCompiler - Func] -// ============================================================================ - -CCFunc* CodeCompiler::newFunc(const FuncSignature& sign) noexcept { - Error err; - - CCFunc* func = newNodeT(); - if (!func) goto _NoMemory; - - err = registerLabelNode(func); - if (ASMJIT_UNLIKELY(err)) { - // TODO: Calls setLastError, maybe rethink noexcept? - setLastError(err); - return nullptr; - } - - // Create helper nodes. - func->_exitNode = newLabelNode(); - func->_end = newNodeT(); - - if (!func->_exitNode || !func->_end) - goto _NoMemory; - - // Function prototype. - err = func->getDetail().init(sign); - if (err != kErrorOk) { - setLastError(err); - return nullptr; - } - - // If the CodeInfo guarantees higher alignment honor it. - if (_codeInfo.getStackAlignment() > func->_funcDetail._callConv.getNaturalStackAlignment()) - func->_funcDetail._callConv.setNaturalStackAlignment(_codeInfo.getStackAlignment()); - - // Allocate space for function arguments. - func->_args = nullptr; - if (func->getArgCount() != 0) { - func->_args = _cbHeap.allocT(func->getArgCount() * sizeof(VirtReg*)); - if (!func->_args) goto _NoMemory; - - ::memset(func->_args, 0, func->getArgCount() * sizeof(VirtReg*)); - } - - return func; - -_NoMemory: - setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - return nullptr; -} - -CCFunc* CodeCompiler::addFunc(CCFunc* func) { - ASMJIT_ASSERT(_func == nullptr); - _func = func; - - addNode(func); // Function node. - CBNode* cursor = getCursor(); // {CURSOR}. - addNode(func->getExitNode()); // Function exit label. - addNode(func->getEnd()); // Function end marker. - - _setCursor(cursor); - return func; -} - -CCFunc* CodeCompiler::addFunc(const FuncSignature& sign) { - CCFunc* func = newFunc(sign); - - if (!func) { - setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - return nullptr; - } - - return addFunc(func); -} - -CBSentinel* CodeCompiler::endFunc() { - CCFunc* func = getFunc(); - if (!func) { - // TODO: - return nullptr; - } - - // Add the local constant pool at the end of the function (if exists). - if (_localConstPool) { - setCursor(func->getEnd()->getPrev()); - addNode(_localConstPool); - _localConstPool = nullptr; - } - - // Mark as finished. - func->_isFinished = true; - _func = nullptr; - - CBSentinel* end = func->getEnd(); - setCursor(end); - return end; -} - -// ============================================================================ -// [asmjit::CodeCompiler - Ret] -// ============================================================================ - -CCFuncRet* CodeCompiler::newRet(const Operand_& o0, const Operand_& o1) noexcept { - CCFuncRet* node = newNodeT(o0, o1); - if (!node) { - setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - return nullptr; - } - return node; -} - -CCFuncRet* CodeCompiler::addRet(const Operand_& o0, const Operand_& o1) noexcept { - CCFuncRet* node = newRet(o0, o1); - if (!node) return nullptr; - return static_cast(addNode(node)); -} - -// ============================================================================ -// [asmjit::CodeCompiler - Call] -// ============================================================================ - -CCFuncCall* CodeCompiler::newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept { - Error err; - uint32_t nArgs; - - CCFuncCall* node = _cbHeap.allocT(sizeof(CCFuncCall) + sizeof(Operand)); - Operand* opArray = reinterpret_cast(reinterpret_cast(node) + sizeof(CCFuncCall)); - - if (ASMJIT_UNLIKELY(!node)) - goto _NoMemory; - - opArray[0].copyFrom(o0); - new (node) CCFuncCall(this, instId, 0, opArray, 1); - - if ((err = node->getDetail().init(sign)) != kErrorOk) { - setLastError(err); - return nullptr; - } - - // If there are no arguments skip the allocation. - if ((nArgs = sign.getArgCount()) == 0) - return node; - - node->_args = static_cast(_cbHeap.alloc(nArgs * sizeof(Operand))); - if (!node->_args) goto _NoMemory; - - ::memset(node->_args, 0, nArgs * sizeof(Operand)); - return node; - -_NoMemory: - setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - return nullptr; -} - -CCFuncCall* CodeCompiler::addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept { - CCFuncCall* node = newCall(instId, o0, sign); - if (!node) return nullptr; - return static_cast(addNode(node)); -} - -// ============================================================================ -// [asmjit::CodeCompiler - Vars] -// ============================================================================ - -Error CodeCompiler::setArg(uint32_t argIndex, const Reg& r) { - CCFunc* func = getFunc(); - - if (!func) - return setLastError(DebugUtils::errored(kErrorInvalidState)); - - if (!isVirtRegValid(r)) - return setLastError(DebugUtils::errored(kErrorInvalidVirtId)); - - VirtReg* vr = getVirtReg(r); - func->setArg(argIndex, vr); - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::CodeCompiler - Hint] -// ============================================================================ - -Error CodeCompiler::_hint(Reg& r, uint32_t hint, uint32_t value) { - if (!r.isVirtReg()) return kErrorOk; - - CCHint* node = newHintNode(r, hint, value); - if (!node) return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - - addNode(node); - return kErrorOk; -} - -// ============================================================================ -// [asmjit::CodeCompiler - Vars] -// ============================================================================ - -VirtReg* CodeCompiler::newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept { - size_t index = _vRegArray.getLength(); - if (ASMJIT_UNLIKELY(index > Operand::kPackedIdCount)) - return nullptr; - - VirtReg* vreg; - if (_vRegArray.willGrow(&_cbHeap, 1) != kErrorOk || !(vreg = _vRegZone.allocZeroedT())) - return nullptr; - - vreg->_id = Operand::packId(static_cast(index)); - vreg->_regInfo._signature = signature; - vreg->_name = noName; - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (name && name[0] != '\0') - vreg->_name = static_cast(_cbDataZone.dup(name, ::strlen(name), true)); -#endif // !ASMJIT_DISABLE_LOGGING - - vreg->_size = TypeId::sizeOf(typeId); - vreg->_typeId = typeId; - vreg->_alignment = static_cast(std::min(vreg->_size, 64)); - vreg->_priority = 10; - - // The following are only used by `RAPass`. - vreg->_raId = kInvalidValue; - vreg->_state = VirtReg::kStateNone; - vreg->_physId = Globals::kInvalidRegId; - - _vRegArray.appendUnsafe(vreg); - return vreg; -} - -Error CodeCompiler::_newReg(Reg& out, uint32_t typeId, const char* name) { - RegInfo regInfo; - - Error err = ArchUtils::typeIdToRegInfo(getArchType(), typeId, regInfo); - if (ASMJIT_UNLIKELY(err)) return setLastError(err); - - VirtReg* vReg = newVirtReg(typeId, regInfo.getSignature(), name); - if (ASMJIT_UNLIKELY(!vReg)) { - out.reset(); - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - } - - out._initReg(regInfo.getSignature(), vReg->getId()); - return kErrorOk; -} - -Error CodeCompiler::_newReg(Reg& out, uint32_t typeId, const char* nameFmt, va_list ap) { - StringBuilderTmp<256> sb; - sb.appendFormatVA(nameFmt, ap); - return _newReg(out, typeId, sb.getData()); -} - -Error CodeCompiler::_newReg(Reg& out, const Reg& ref, const char* name) { - RegInfo regInfo; - uint32_t typeId; - - if (isVirtRegValid(ref)) { - VirtReg* vRef = getVirtReg(ref); - typeId = vRef->getTypeId(); - - // NOTE: It's possible to cast one register type to another if it's the - // same register kind. However, VirtReg always contains the TypeId that - // was used to create the register. This means that in some cases we may - // end up having different size of `ref` and `vRef`. In such case we - // adjust the TypeId to match the `ref` register type instead of the - // original register type, which should be the expected behavior. - uint32_t typeSize = TypeId::sizeOf(typeId); - uint32_t refSize = ref.getSize(); - - if (typeSize != refSize) { - if (TypeId::isInt(typeId)) { - // GP register - change TypeId to match `ref`, but keep sign of `vRef`. - switch (refSize) { - case 1: typeId = TypeId::kI8 | (typeId & 1); break; - case 2: typeId = TypeId::kI16 | (typeId & 1); break; - case 4: typeId = TypeId::kI32 | (typeId & 1); break; - case 8: typeId = TypeId::kI64 | (typeId & 1); break; - default: typeId = TypeId::kVoid; break; - } - } - else if (TypeId::isMmx(typeId)) { - // MMX register - always use 64-bit. - typeId = TypeId::kMmx64; - } - else if (TypeId::isMask(typeId)) { - // Mask register - change TypeId to match `ref` size. - switch (refSize) { - case 1: typeId = TypeId::kMask8; break; - case 2: typeId = TypeId::kMask16; break; - case 4: typeId = TypeId::kMask32; break; - case 8: typeId = TypeId::kMask64; break; - default: typeId = TypeId::kVoid; break; - } - } - else { - // VEC register - change TypeId to match `ref` size, keep vector metadata. - uint32_t elementTypeId = TypeId::elementOf(typeId); - - switch (refSize) { - case 16: typeId = TypeId::_kVec128Start + (elementTypeId - TypeId::kI8); break; - case 32: typeId = TypeId::_kVec256Start + (elementTypeId - TypeId::kI8); break; - case 64: typeId = TypeId::_kVec512Start + (elementTypeId - TypeId::kI8); break; - default: typeId = TypeId::kVoid; break; - } - } - - if (typeId == TypeId::kVoid) - return setLastError(DebugUtils::errored(kErrorInvalidState)); - } - } - else { - typeId = ref.getType(); - } - - Error err = ArchUtils::typeIdToRegInfo(getArchType(), typeId, regInfo); - if (ASMJIT_UNLIKELY(err)) return setLastError(err); - - VirtReg* vReg = newVirtReg(typeId, regInfo.getSignature(), name); - if (ASMJIT_UNLIKELY(!vReg)) { - out.reset(); - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - } - - out._initReg(regInfo.getSignature(), vReg->getId()); - return kErrorOk; -} - -Error CodeCompiler::_newReg(Reg& out, const Reg& ref, const char* nameFmt, va_list ap) { - StringBuilderTmp<256> sb; - sb.appendFormatVA(nameFmt, ap); - return _newReg(out, ref, sb.getData()); -} - -Error CodeCompiler::_newStack(Mem& out, uint32_t size, uint32_t alignment, const char* name) { - if (size == 0) - return setLastError(DebugUtils::errored(kErrorInvalidArgument)); - - if (alignment == 0) alignment = 1; - if (!Utils::isPowerOf2(alignment)) - return setLastError(DebugUtils::errored(kErrorInvalidArgument)); - - if (alignment > 64) alignment = 64; - - VirtReg* vReg = newVirtReg(0, 0, name); - if (ASMJIT_UNLIKELY(!vReg)) { - out.reset(); - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - } - - vReg->_size = size; - vReg->_isStack = true; - vReg->_alignment = static_cast(alignment); - - // Set the memory operand to GPD/GPQ and its id to VirtReg. - out = Mem(Init, _nativeGpReg.getType(), vReg->getId(), Reg::kRegNone, kInvalidValue, 0, 0, Mem::kSignatureMemRegHomeFlag); - return kErrorOk; -} - -Error CodeCompiler::_newConst(Mem& out, uint32_t scope, const void* data, size_t size) { - CBConstPool** pPool; - if (scope == kConstScopeLocal) - pPool = &_localConstPool; - else if (scope == kConstScopeGlobal) - pPool = &_globalConstPool; - else - return setLastError(DebugUtils::errored(kErrorInvalidArgument)); - - if (!*pPool && !(*pPool = newConstPool())) - return setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - - CBConstPool* pool = *pPool; - size_t off; - - Error err = pool->add(data, size, off); - if (ASMJIT_UNLIKELY(err)) return setLastError(err); - - out = Mem(Init, - Label::kLabelTag, // Base type. - pool->getId(), // Base id. - 0, // Index type. - kInvalidValue, // Index id. - static_cast(off), // Offset. - static_cast(size), // Size. - 0); // Flags. - return kErrorOk; -} - -Error CodeCompiler::alloc(Reg& reg) { - if (!reg.isVirtReg()) return kErrorOk; - return _hint(reg, CCHint::kHintAlloc, kInvalidValue); -} - -Error CodeCompiler::alloc(Reg& reg, uint32_t physId) { - if (!reg.isVirtReg()) return kErrorOk; - return _hint(reg, CCHint::kHintAlloc, physId); -} - -Error CodeCompiler::alloc(Reg& reg, const Reg& physReg) { - if (!reg.isVirtReg()) return kErrorOk; - return _hint(reg, CCHint::kHintAlloc, physReg.getId()); -} - -Error CodeCompiler::save(Reg& reg) { - if (!reg.isVirtReg()) return kErrorOk; - return _hint(reg, CCHint::kHintSave, kInvalidValue); -} - -Error CodeCompiler::spill(Reg& reg) { - if (!reg.isVirtReg()) return kErrorOk; - return _hint(reg, CCHint::kHintSpill, kInvalidValue); -} - -Error CodeCompiler::unuse(Reg& reg) { - if (!reg.isVirtReg()) return kErrorOk; - return _hint(reg, CCHint::kHintUnuse, kInvalidValue); -} - -uint32_t CodeCompiler::getPriority(Reg& reg) const { - if (!reg.isVirtReg()) return 0; - return getVirtRegById(reg.getId())->getPriority(); -} - -void CodeCompiler::setPriority(Reg& reg, uint32_t priority) { - if (!reg.isVirtReg()) return; - if (priority > 255) priority = 255; - - VirtReg* vreg = getVirtRegById(reg.getId()); - if (vreg) vreg->_priority = static_cast(priority); -} - -bool CodeCompiler::getSaveOnUnuse(Reg& reg) const { - if (!reg.isVirtReg()) return false; - - VirtReg* vreg = getVirtRegById(reg.getId()); - return static_cast(vreg->_saveOnUnuse); -} - -void CodeCompiler::setSaveOnUnuse(Reg& reg, bool value) { - if (!reg.isVirtReg()) return; - - VirtReg* vreg = getVirtRegById(reg.getId()); - if (!vreg) return; - - vreg->_saveOnUnuse = value; -} - -void CodeCompiler::rename(Reg& reg, const char* fmt, ...) { - if (!reg.isVirtReg()) return; - - VirtReg* vreg = getVirtRegById(reg.getId()); - if (!vreg) return; - - vreg->_name = noName; - if (fmt && fmt[0] != '\0') { - char buf[64]; - - va_list ap; - va_start(ap, fmt); - - vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); - buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; - - vreg->_name = static_cast(_cbDataZone.dup(buf, ::strlen(buf), true)); - va_end(ap); - } -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_COMPILER diff --git a/src/asmjit/base/codecompiler.h b/src/asmjit/base/codecompiler.h deleted file mode 100644 index 44b9644..0000000 --- a/src/asmjit/base/codecompiler.h +++ /dev/null @@ -1,738 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CODECOMPILER_H -#define _ASMJIT_BASE_CODECOMPILER_H - -#include "../asmjit_build.h" -#if !defined(ASMJIT_DISABLE_COMPILER) - -// [Dependencies] -#include "../base/assembler.h" -#include "../base/codebuilder.h" -#include "../base/constpool.h" -#include "../base/func.h" -#include "../base/operand.h" -#include "../base/utils.h" -#include "../base/zone.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [Forward Declarations] -// ============================================================================ - -struct VirtReg; -struct TiedReg; -struct RAState; -struct RACell; - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::ConstScope] -// ============================================================================ - -//! Scope of the constant. -ASMJIT_ENUM(ConstScope) { - //! Local constant, always embedded right after the current function. - kConstScopeLocal = 0, - //! Global constant, embedded at the end of the currently compiled code. - kConstScopeGlobal = 1 -}; - -// ============================================================================ -// [asmjit::VirtReg] -// ============================================================================ - -//! Virtual register data (CodeCompiler). -struct VirtReg { - //! A state of a virtual register (used during register allocation). - ASMJIT_ENUM(State) { - kStateNone = 0, //!< Not allocated, not used. - kStateReg = 1, //!< Allocated in register. - kStateMem = 2 //!< Allocated in memory or spilled. - }; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the virtual-register id. - ASMJIT_INLINE uint32_t getId() const noexcept { return _id; } - //! Get virtual-register's name. - ASMJIT_INLINE const char* getName() const noexcept { return _name; } - - //! Get a physical register type. - ASMJIT_INLINE uint32_t getType() const noexcept { return _regInfo.getType(); } - //! Get a physical register kind. - ASMJIT_INLINE uint32_t getKind() const noexcept { return _regInfo.getKind(); } - //! Get a physical register size. - ASMJIT_INLINE uint32_t getRegSize() const noexcept { return _regInfo.getSize(); } - //! Get a register signature of this virtual register. - ASMJIT_INLINE uint32_t getSignature() const noexcept { return _regInfo.getSignature(); } - - //! Get a register's type-id, see \ref TypeId. - ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _typeId; } - - //! Get virtual-register's size. - ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; } - //! Get virtual-register's alignment. - ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; } - - //! Get the virtual-register priority, used by compiler to decide which variable to spill. - ASMJIT_INLINE uint32_t getPriority() const noexcept { return _priority; } - //! Set the virtual-register priority. - ASMJIT_INLINE void setPriority(uint32_t priority) noexcept { - ASMJIT_ASSERT(priority <= 0xFF); - _priority = static_cast(priority); - } - - //! Get variable state, only used by `RAPass`. - ASMJIT_INLINE uint32_t getState() const noexcept { return _state; } - //! Set variable state, only used by `RAPass`. - ASMJIT_INLINE void setState(uint32_t state) { - ASMJIT_ASSERT(state <= 0xFF); - _state = static_cast(state); - } - - //! Get register index. - ASMJIT_INLINE uint32_t getPhysId() const noexcept { return _physId; } - //! Set register index. - ASMJIT_INLINE void setPhysId(uint32_t physId) { - ASMJIT_ASSERT(physId <= Globals::kInvalidRegId); - _physId = static_cast(physId); - } - //! Reset register index. - ASMJIT_INLINE void resetPhysId() { - _physId = static_cast(Globals::kInvalidRegId); - } - - //! Get home registers mask. - ASMJIT_INLINE uint32_t getHomeMask() const { return _homeMask; } - //! Add a home register index to the home registers mask. - ASMJIT_INLINE void addHomeId(uint32_t physId) { _homeMask |= Utils::mask(physId); } - - ASMJIT_INLINE bool isFixed() const noexcept { return static_cast(_isFixed); } - - //! Get whether the VirtReg is only memory allocated on the stack. - ASMJIT_INLINE bool isStack() const noexcept { return static_cast(_isStack); } - - //! Get whether to save variable when it's unused (spill). - ASMJIT_INLINE bool saveOnUnuse() const noexcept { return static_cast(_saveOnUnuse); } - - //! Get whether the variable was changed. - ASMJIT_INLINE bool isModified() const noexcept { return static_cast(_modified); } - //! Set whether the variable was changed. - ASMJIT_INLINE void setModified(bool modified) noexcept { _modified = modified; } - - //! Get home memory offset. - ASMJIT_INLINE int32_t getMemOffset() const noexcept { return _memOffset; } - //! Set home memory offset. - ASMJIT_INLINE void setMemOffset(int32_t offset) noexcept { _memOffset = offset; } - - //! Get home memory cell. - ASMJIT_INLINE RACell* getMemCell() const noexcept { return _memCell; } - //! Set home memory cell. - ASMJIT_INLINE void setMemCell(RACell* cell) noexcept { _memCell = cell; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint32_t _id; //!< Virtual register id. - RegInfo _regInfo; //!< Physical register info & signature. - const char* _name; //!< Virtual name (user provided). - uint32_t _size; //!< Virtual size (can be smaller than `regInfo._size`). - uint8_t _typeId; //!< Type-id. - uint8_t _alignment; //!< Register's natural alignment (for spilling). - uint8_t _priority; //!< Allocation priority (hint for RAPass that can be ignored). - uint8_t _isFixed : 1; //!< True if this is a fixed register, never reallocated. - uint8_t _isStack : 1; //!< True if the virtual register is only used as a stack. - uint8_t _isMaterialized : 1; //!< Register is constant that is easily created by a single instruction. - uint8_t _saveOnUnuse : 1; //!< Save on unuse (at end of the variable scope). - - // ------------------------------------------------------------------------- - // The following members are used exclusively by RAPass. They are initialized - // when the VirtReg is created and then changed during RAPass. - // ------------------------------------------------------------------------- - - uint32_t _raId; //!< Register allocator work-id (used by RAPass). - int32_t _memOffset; //!< Home memory offset. - uint32_t _homeMask; //!< Mask of all registers variable has been allocated to. - - uint8_t _state; //!< Variable state (connected with actual `RAState)`. - uint8_t _physId; //!< Actual register index (only used by `RAPass)`, during translate. - uint8_t _modified; //!< Whether variable was changed (connected with actual `RAState)`. - - RACell* _memCell; //!< Home memory cell, used by `RAPass` (initially nullptr). - - //! Temporary link to TiedReg* used by the `RAPass` used in - //! various phases, but always set back to nullptr when finished. - //! - //! This temporary data is designed to be used by algorithms that need to - //! store some data into variables themselves during compilation. But it's - //! expected that after variable is compiled & translated the data is set - //! back to zero/null. Initial value is nullptr. - TiedReg* _tied; -}; - -// ============================================================================ -// [asmjit::CCHint] -// ============================================================================ - -//! Hint for register allocator (CodeCompiler). -class CCHint : public CBNode { -public: - ASMJIT_NONCOPYABLE(CCHint) - - //! Hint type. - ASMJIT_ENUM(Hint) { - //! Alloc to physical reg. - kHintAlloc = 0, - //! Spill to memory. - kHintSpill = 1, - //! Save if modified. - kHintSave = 2, - //! Save if modified and mark it as unused. - kHintSaveAndUnuse = 3, - //! Mark as unused. - kHintUnuse = 4 - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CCHint` instance. - ASMJIT_INLINE CCHint(CodeBuilder* cb, VirtReg* vreg, uint32_t hint, uint32_t value) noexcept : CBNode(cb, kNodeHint) { - orFlags(kFlagIsRemovable | kFlagIsInformative); - _vreg = vreg; - _hint = hint; - _value = value; - } - - //! Destroy the `CCHint` instance (NEVER CALLED). - ASMJIT_INLINE ~CCHint() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get variable. - ASMJIT_INLINE VirtReg* getVReg() const noexcept { return _vreg; } - - //! Get hint it, see \ref Hint. - ASMJIT_INLINE uint32_t getHint() const noexcept { return _hint; } - //! Set hint it, see \ref Hint. - ASMJIT_INLINE void setHint(uint32_t hint) noexcept { _hint = hint; } - - //! Get hint value. - ASMJIT_INLINE uint32_t getValue() const noexcept { return _value; } - //! Set hint value. - ASMJIT_INLINE void setValue(uint32_t value) noexcept { _value = value; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Variable. - VirtReg* _vreg; - //! Hint id. - uint32_t _hint; - //! Value. - uint32_t _value; -}; - -// ============================================================================ -// [asmjit::CCFunc] -// ============================================================================ - -//! Function entry (CodeCompiler). -class CCFunc : public CBLabel { -public: - ASMJIT_NONCOPYABLE(CCFunc) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CCFunc` instance. - //! - //! Always use `CodeCompiler::addFunc()` to create \ref CCFunc. - ASMJIT_INLINE CCFunc(CodeBuilder* cb) noexcept - : CBLabel(cb), - _funcDetail(), - _frameInfo(), - _exitNode(nullptr), - _end(nullptr), - _args(nullptr), - _isFinished(false) { - - _type = kNodeFunc; - } - - //! Destroy the `CCFunc` instance (NEVER CALLED). - ASMJIT_INLINE ~CCFunc() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get function exit `CBLabel`. - ASMJIT_INLINE CBLabel* getExitNode() const noexcept { return _exitNode; } - //! Get function exit label. - ASMJIT_INLINE Label getExitLabel() const noexcept { return _exitNode->getLabel(); } - - //! Get "End of Func" sentinel. - ASMJIT_INLINE CBSentinel* getEnd() const noexcept { return _end; } - - //! Get function declaration. - ASMJIT_INLINE FuncDetail& getDetail() noexcept { return _funcDetail; } - //! Get function declaration. - ASMJIT_INLINE const FuncDetail& getDetail() const noexcept { return _funcDetail; } - - //! Get function declaration. - ASMJIT_INLINE FuncFrameInfo& getFrameInfo() noexcept { return _frameInfo; } - //! Get function declaration. - ASMJIT_INLINE const FuncFrameInfo& getFrameInfo() const noexcept { return _frameInfo; } - - //! Get arguments count. - ASMJIT_INLINE uint32_t getArgCount() const noexcept { return _funcDetail.getArgCount(); } - //! Get returns count. - ASMJIT_INLINE uint32_t getRetCount() const noexcept { return _funcDetail.getRetCount(); } - - //! Get arguments list. - ASMJIT_INLINE VirtReg** getArgs() const noexcept { return _args; } - - //! Get argument at `i`. - ASMJIT_INLINE VirtReg* getArg(uint32_t i) const noexcept { - ASMJIT_ASSERT(i < getArgCount()); - return _args[i]; - } - - //! Set argument at `i`. - ASMJIT_INLINE void setArg(uint32_t i, VirtReg* vreg) noexcept { - ASMJIT_ASSERT(i < getArgCount()); - _args[i] = vreg; - } - - //! Reset argument at `i`. - ASMJIT_INLINE void resetArg(uint32_t i) noexcept { - ASMJIT_ASSERT(i < getArgCount()); - _args[i] = nullptr; - } - - ASMJIT_INLINE uint32_t getAttributes() const noexcept { return _frameInfo.getAttributes(); } - ASMJIT_INLINE void addAttributes(uint32_t attrs) noexcept { _frameInfo.addAttributes(attrs); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - FuncDetail _funcDetail; //!< Function detail. - FuncFrameInfo _frameInfo; //!< Function frame information. - - CBLabel* _exitNode; //!< Function exit. - CBSentinel* _end; //!< Function end. - - VirtReg** _args; //!< Arguments array as `VirtReg`. - - //! Function was finished by `Compiler::endFunc()`. - uint8_t _isFinished; -}; - -// ============================================================================ -// [asmjit::CCFuncRet] -// ============================================================================ - -//! Function return (CodeCompiler). -class CCFuncRet : public CBNode { -public: - ASMJIT_NONCOPYABLE(CCFuncRet) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CCFuncRet` instance. - ASMJIT_INLINE CCFuncRet(CodeBuilder* cb, const Operand_& o0, const Operand_& o1) noexcept : CBNode(cb, kNodeFuncExit) { - orFlags(kFlagIsRet); - _ret[0].copyFrom(o0); - _ret[1].copyFrom(o1); - } - - //! Destroy the `CCFuncRet` instance (NEVER CALLED). - ASMJIT_INLINE ~CCFuncRet() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the first return operand. - ASMJIT_INLINE Operand& getFirst() noexcept { return static_cast(_ret[0]); } - //! \overload - ASMJIT_INLINE const Operand& getFirst() const noexcept { return static_cast(_ret[0]); } - - //! Get the second return operand. - ASMJIT_INLINE Operand& getSecond() noexcept { return static_cast(_ret[1]); } - //! \overload - ASMJIT_INLINE const Operand& getSecond() const noexcept { return static_cast(_ret[1]); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Return operands. - Operand_ _ret[2]; -}; - -// ============================================================================ -// [asmjit::CCFuncCall] -// ============================================================================ - -//! Function call (CodeCompiler). -class CCFuncCall : public CBInst { -public: - ASMJIT_NONCOPYABLE(CCFuncCall) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CCFuncCall` instance. - ASMJIT_INLINE CCFuncCall(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept - : CBInst(cb, instId, options, opArray, opCount), - _funcDetail(), - _args(nullptr) { - - _type = kNodeFuncCall; - _ret[0].reset(); - _ret[1].reset(); - orFlags(kFlagIsRemovable); - } - - //! Destroy the `CCFuncCall` instance (NEVER CALLED). - ASMJIT_INLINE ~CCFuncCall() noexcept {} - - // -------------------------------------------------------------------------- - // [Signature] - // -------------------------------------------------------------------------- - - //! Set function signature. - ASMJIT_INLINE Error setSignature(const FuncSignature& sign) noexcept { - return _funcDetail.init(sign); - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get function declaration. - ASMJIT_INLINE FuncDetail& getDetail() noexcept { return _funcDetail; } - //! Get function declaration. - ASMJIT_INLINE const FuncDetail& getDetail() const noexcept { return _funcDetail; } - - //! Get target operand. - ASMJIT_INLINE Operand& getTarget() noexcept { return static_cast(_opArray[0]); } - //! \overload - ASMJIT_INLINE const Operand& getTarget() const noexcept { return static_cast(_opArray[0]); } - - //! Get return at `i`. - ASMJIT_INLINE Operand& getRet(uint32_t i = 0) noexcept { - ASMJIT_ASSERT(i < 2); - return static_cast(_ret[i]); - } - //! \overload - ASMJIT_INLINE const Operand& getRet(uint32_t i = 0) const noexcept { - ASMJIT_ASSERT(i < 2); - return static_cast(_ret[i]); - } - - //! Get argument at `i`. - ASMJIT_INLINE Operand& getArg(uint32_t i) noexcept { - ASMJIT_ASSERT(i < kFuncArgCountLoHi); - return static_cast(_args[i]); - } - //! \overload - ASMJIT_INLINE const Operand& getArg(uint32_t i) const noexcept { - ASMJIT_ASSERT(i < kFuncArgCountLoHi); - return static_cast(_args[i]); - } - - //! Set argument at `i` to `op`. - ASMJIT_API bool _setArg(uint32_t i, const Operand_& op) noexcept; - //! Set return at `i` to `op`. - ASMJIT_API bool _setRet(uint32_t i, const Operand_& op) noexcept; - - //! Set argument at `i` to `reg`. - ASMJIT_INLINE bool setArg(uint32_t i, const Reg& reg) noexcept { return _setArg(i, reg); } - //! Set argument at `i` to `imm`. - ASMJIT_INLINE bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); } - - //! Set return at `i` to `var`. - ASMJIT_INLINE bool setRet(uint32_t i, const Reg& reg) noexcept { return _setRet(i, reg); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - FuncDetail _funcDetail; //!< Function detail. - Operand_ _ret[2]; //!< Return. - Operand_* _args; //!< Arguments. -}; - -// ============================================================================ -// [asmjit::CCPushArg] -// ============================================================================ - -//! Push argument before a function call (CodeCompiler). -class CCPushArg : public CBNode { -public: - ASMJIT_NONCOPYABLE(CCPushArg) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CCPushArg` instance. - ASMJIT_INLINE CCPushArg(CodeBuilder* cb, CCFuncCall* call, VirtReg* src, VirtReg* cvt) noexcept - : CBNode(cb, kNodePushArg), - _call(call), - _src(src), - _cvt(cvt), - _args(0) { - orFlags(kFlagIsRemovable); - } - - //! Destroy the `CCPushArg` instance. - ASMJIT_INLINE ~CCPushArg() noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the associated function-call. - ASMJIT_INLINE CCFuncCall* getCall() const noexcept { return _call; } - //! Get source variable. - ASMJIT_INLINE VirtReg* getSrcReg() const noexcept { return _src; } - //! Get conversion variable. - ASMJIT_INLINE VirtReg* getCvtReg() const noexcept { return _cvt; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CCFuncCall* _call; //!< Associated `CCFuncCall`. - VirtReg* _src; //!< Source variable. - VirtReg* _cvt; //!< Temporary variable used for conversion (or null). - uint32_t _args; //!< Affected arguments bit-array. -}; - -// ============================================================================ -// [asmjit::CodeCompiler] -// ============================================================================ - -//! Code emitter that uses virtual registers and performs register allocation. -//! -//! Compiler is a high-level code-generation tool that provides register -//! allocation and automatic handling of function calling conventions. It was -//! primarily designed for merging multiple parts of code into a function -//! without worrying about registers and function calling conventions. -//! -//! CodeCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit -//! code at the same time. -//! -//! CodeCompiler is based on CodeBuilder and contains all the features it -//! provides. It means that the code it stores can be modified (removed, added, -//! injected) and analyzed. When the code is finalized the compiler can emit -//! the code into an Assembler to translate the abstract representation into a -//! machine code. -class ASMJIT_VIRTAPI CodeCompiler : public CodeBuilder { -public: - ASMJIT_NONCOPYABLE(CodeCompiler) - typedef CodeBuilder Base; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CodeCompiler` instance. - ASMJIT_API CodeCompiler() noexcept; - //! Destroy the `CodeCompiler` instance. - ASMJIT_API virtual ~CodeCompiler() noexcept; - - // -------------------------------------------------------------------------- - // [Events] - // -------------------------------------------------------------------------- - - ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override; - ASMJIT_API virtual Error onDetach(CodeHolder* code) noexcept override; - - // -------------------------------------------------------------------------- - // [Node-Factory] - // -------------------------------------------------------------------------- - - //! \internal - //! - //! Create a new `CCHint`. - ASMJIT_API CCHint* newHintNode(Reg& reg, uint32_t hint, uint32_t value) noexcept; - - // -------------------------------------------------------------------------- - // [Func] - // -------------------------------------------------------------------------- - - //! Get the current function. - ASMJIT_INLINE CCFunc* getFunc() const noexcept { return _func; } - - //! Create a new `CCFunc`. - ASMJIT_API CCFunc* newFunc(const FuncSignature& sign) noexcept; - //! Add a function `node` to the stream. - ASMJIT_API CCFunc* addFunc(CCFunc* func); - //! Add a new function. - ASMJIT_API CCFunc* addFunc(const FuncSignature& sign); - //! Emit a sentinel that marks the end of the current function. - ASMJIT_API CBSentinel* endFunc(); - - // -------------------------------------------------------------------------- - // [Ret] - // -------------------------------------------------------------------------- - - //! Create a new `CCFuncRet`. - ASMJIT_API CCFuncRet* newRet(const Operand_& o0, const Operand_& o1) noexcept; - //! Add a new `CCFuncRet`. - ASMJIT_API CCFuncRet* addRet(const Operand_& o0, const Operand_& o1) noexcept; - - // -------------------------------------------------------------------------- - // [Call] - // -------------------------------------------------------------------------- - - //! Create a new `CCFuncCall`. - ASMJIT_API CCFuncCall* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept; - //! Add a new `CCFuncCall`. - ASMJIT_API CCFuncCall* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept; - - // -------------------------------------------------------------------------- - // [Args] - // -------------------------------------------------------------------------- - - //! Set a function argument at `argIndex` to `reg`. - ASMJIT_API Error setArg(uint32_t argIndex, const Reg& reg); - - // -------------------------------------------------------------------------- - // [Hint] - // -------------------------------------------------------------------------- - - //! Emit a new hint (purely informational node). - ASMJIT_API Error _hint(Reg& reg, uint32_t hint, uint32_t value); - - // -------------------------------------------------------------------------- - // [VirtReg / Stack] - // -------------------------------------------------------------------------- - - //! Create a new virtual register representing the given `vti` and `signature`. - //! - //! This function accepts either register type representing a machine-specific - //! register, like `X86Reg`, or RegTag representation, which represents - //! machine independent register, and from the machine-specific register - //! is deduced. - ASMJIT_API VirtReg* newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept; - - ASMJIT_API Error _newReg(Reg& out, uint32_t typeId, const char* name); - ASMJIT_API Error _newReg(Reg& out, uint32_t typeId, const char* nameFmt, va_list ap); - - ASMJIT_API Error _newReg(Reg& out, const Reg& ref, const char* name); - ASMJIT_API Error _newReg(Reg& out, const Reg& ref, const char* nameFmt, va_list ap); - - ASMJIT_API Error _newStack(Mem& out, uint32_t size, uint32_t alignment, const char* name); - ASMJIT_API Error _newConst(Mem& out, uint32_t scope, const void* data, size_t size); - - // -------------------------------------------------------------------------- - // [VirtReg] - // -------------------------------------------------------------------------- - - //! Get whether the virtual register `r` is valid. - ASMJIT_INLINE bool isVirtRegValid(const Reg& reg) const noexcept { - return isVirtRegValid(reg.getId()); - } - //! \overload - ASMJIT_INLINE bool isVirtRegValid(uint32_t id) const noexcept { - size_t index = Operand::unpackId(id); - return index < _vRegArray.getLength(); - } - - //! Get \ref VirtReg associated with the given `r`. - ASMJIT_INLINE VirtReg* getVirtReg(const Reg& reg) const noexcept { - return getVirtRegById(reg.getId()); - } - //! Get \ref VirtReg associated with the given `id`. - ASMJIT_INLINE VirtReg* getVirtRegById(uint32_t id) const noexcept { - ASMJIT_ASSERT(id != kInvalidValue); - size_t index = Operand::unpackId(id); - - ASMJIT_ASSERT(index < _vRegArray.getLength()); - return _vRegArray[index]; - } - - //! Get an array of all virtual registers managed by CodeCompiler. - ASMJIT_INLINE const ZoneVector& getVirtRegArray() const noexcept { return _vRegArray; } - - //! Alloc a virtual register `reg`. - ASMJIT_API Error alloc(Reg& reg); - //! Alloc a virtual register `reg` using `physId` as a register id. - ASMJIT_API Error alloc(Reg& reg, uint32_t physId); - //! Alloc a virtual register `reg` using `ref` as a register operand. - ASMJIT_API Error alloc(Reg& reg, const Reg& ref); - //! Spill a virtual register `reg`. - ASMJIT_API Error spill(Reg& reg); - //! Save a virtual register `reg` if the status is `modified` at this point. - ASMJIT_API Error save(Reg& reg); - //! Unuse a virtual register `reg`. - ASMJIT_API Error unuse(Reg& reg); - - //! Get priority of a virtual register `reg`. - ASMJIT_API uint32_t getPriority(Reg& reg) const; - //! Set priority of variable `reg` to `priority`. - ASMJIT_API void setPriority(Reg& reg, uint32_t priority); - - //! Get save-on-unuse `reg` property. - ASMJIT_API bool getSaveOnUnuse(Reg& reg) const; - //! Set save-on-unuse `reg` property to `value`. - ASMJIT_API void setSaveOnUnuse(Reg& reg, bool value); - - //! Rename variable `reg` to `name`. - //! - //! NOTE: Only new name will appear in the logger. - ASMJIT_API void rename(Reg& reg, const char* fmt, ...); - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CCFunc* _func; //!< Current function. - - Zone _vRegZone; //!< Allocates \ref VirtReg objects. - ZoneVector _vRegArray; //!< Stores array of \ref VirtReg pointers. - - CBConstPool* _localConstPool; //!< Local constant pool, flushed at the end of each function. - CBConstPool* _globalConstPool; //!< Global constant pool, flushed at the end of the compilation. -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_COMPILER -#endif // _ASMJIT_BASE_CODECOMPILER_H diff --git a/src/asmjit/base/codeemitter.cpp b/src/asmjit/base/codeemitter.cpp deleted file mode 100644 index 48a4c9a..0000000 --- a/src/asmjit/base/codeemitter.cpp +++ /dev/null @@ -1,236 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/assembler.h" -#include "../base/utils.h" -#include "../base/vmem.h" - -#if defined(ASMJIT_BUILD_X86) -#include "../x86/x86inst.h" -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) -#include "../arm/arminst.h" -#endif // ASMJIT_BUILD_ARM - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::CodeEmitter - Construction / Destruction] -// ============================================================================ - -CodeEmitter::CodeEmitter(uint32_t type) noexcept - : _codeInfo(), - _code(nullptr), - _nextEmitter(nullptr), - _type(static_cast(type)), - _destroyed(false), - _finalized(false), - _reserved(false), - _lastError(kErrorNotInitialized), - _privateData(0), - _globalHints(0), - _globalOptions(kOptionMaybeFailureCase), - _options(0), - _extraReg(), - _inlineComment(nullptr), - _none(), - _nativeGpReg(), - _nativeGpArray(nullptr) {} - -CodeEmitter::~CodeEmitter() noexcept { - if (_code) { - _destroyed = true; - _code->detach(this); - } -} - -// ============================================================================ -// [asmjit::CodeEmitter - Events] -// ============================================================================ - -Error CodeEmitter::onAttach(CodeHolder* code) noexcept { - _codeInfo = code->getCodeInfo(); - _lastError = kErrorOk; - - _globalHints = code->getGlobalHints(); - _globalOptions = code->getGlobalOptions(); - - return kErrorOk; -} - -Error CodeEmitter::onDetach(CodeHolder* code) noexcept { - _codeInfo.reset(); - _finalized = false; - _lastError = kErrorNotInitialized; - - _privateData = 0; - _globalHints = 0; - _globalOptions = kOptionMaybeFailureCase; - - _options = 0; - _extraReg.reset(); - _inlineComment = nullptr; - - _nativeGpReg.reset(); - _nativeGpArray = nullptr; - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::CodeEmitter - Code-Generation] -// ============================================================================ - -Error CodeEmitter::_emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) { - const Operand_* op = opArray; - switch (opCount) { - case 0: return _emit(instId, _none, _none, _none, _none); - case 1: return _emit(instId, op[0], _none, _none, _none); - case 2: return _emit(instId, op[0], op[1], _none, _none); - case 3: return _emit(instId, op[0], op[1], op[2], _none); - case 4: return _emit(instId, op[0], op[1], op[2], op[3]); - case 5: return _emit(instId, op[0], op[1], op[2], op[3], op[4], _none); - case 6: return _emit(instId, op[0], op[1], op[2], op[3], op[4], op[5]); - - default: - return DebugUtils::errored(kErrorInvalidArgument); - } -} - -// ============================================================================ -// [asmjit::CodeEmitter - Finalize] -// ============================================================================ - -Label CodeEmitter::getLabelByName(const char* name, size_t nameLength, uint32_t parentId) noexcept { - return Label(_code ? _code->getLabelIdByName(name, nameLength, parentId) : static_cast(0)); -} - -// ============================================================================ -// [asmjit::CodeEmitter - Finalize] -// ============================================================================ - -Error CodeEmitter::finalize() { - // Finalization does nothing by default, overridden by `CodeBuilder`. - return kErrorOk; -} - -// ============================================================================ -// [asmjit::CodeEmitter - Error Handling] -// ============================================================================ - -Error CodeEmitter::setLastError(Error error, const char* message) { - // This is fatal, CodeEmitter can't set error without being attached to `CodeHolder`. - ASMJIT_ASSERT(_code != nullptr); - - // Special case used to reset the last error. - if (error == kErrorOk) { - _lastError = kErrorOk; - _globalOptions &= ~kOptionMaybeFailureCase; - return kErrorOk; - } - - if (!message) - message = DebugUtils::errorAsString(error); - - // Logging is skipped if the error is handled by `ErrorHandler`. - ErrorHandler* handler = _code->_errorHandler; - if (handler && handler->handleError(error, message, this)) - return error; - - // The handler->handleError() function may throw an exception or longjmp() - // to terminate the execution of `setLastError()`. This is the reason why - // we have delayed changing the `_error` member until now. - _lastError = error; - _globalOptions |= kOptionMaybeFailureCase; - - return error; -} - -// ============================================================================ -// [asmjit::CodeEmitter - Helpers] -// ============================================================================ - -bool CodeEmitter::isLabelValid(uint32_t id) const noexcept { - size_t index = Operand::unpackId(id); - return _code && index < _code->_labels.getLength(); -} - -Error CodeEmitter::commentf(const char* fmt, ...) { - Error err = _lastError; - if (err) return err; - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (_globalOptions & kOptionLoggingEnabled) { - va_list ap; - va_start(ap, fmt); - err = _code->_logger->logv(fmt, ap); - va_end(ap); - } -#else - ASMJIT_UNUSED(fmt); -#endif - - return err; -} - -Error CodeEmitter::commentv(const char* fmt, va_list ap) { - Error err = _lastError; - if (err) return err; - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (_globalOptions & kOptionLoggingEnabled) - err = _code->_logger->logv(fmt, ap); -#else - ASMJIT_UNUSED(fmt); - ASMJIT_UNUSED(ap); -#endif - - return err; -} - -// ============================================================================ -// [asmjit::CodeEmitter - Emit] -// ============================================================================ - -#define OP const Operand_& - -Error CodeEmitter::emit(uint32_t instId) { return _emit(instId, _none, _none, _none, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0) { return _emit(instId, o0, _none, _none, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1) { return _emit(instId, o0, o1, _none, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2) { return _emit(instId, o0, o1, o2, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3) { return _emit(instId, o0, o1, o2, o3); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4) { return _emit(instId, o0, o1, o2, o3, o4, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, OP o5) { return _emit(instId, o0, o1, o2, o3, o4, o5); } - -Error CodeEmitter::emit(uint32_t instId, int o0) { return _emit(instId, Imm(o0), _none, _none, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, int o1) { return _emit(instId, o0, Imm(o1), _none, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, int o2) { return _emit(instId, o0, o1, Imm(o2), _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, int o3) { return _emit(instId, o0, o1, o2, Imm(o3)); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); } - -Error CodeEmitter::emit(uint32_t instId, int64_t o0) { return _emit(instId, Imm(o0), _none, _none, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, int64_t o1) { return _emit(instId, o0, Imm(o1), _none, _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, int64_t o2) { return _emit(instId, o0, o1, Imm(o2), _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, int64_t o3) { return _emit(instId, o0, o1, o2, Imm(o3)); } - -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int64_t o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), _none); } -Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int64_t o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); } - -#undef OP - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/codeemitter.h b/src/asmjit/base/codeemitter.h deleted file mode 100644 index 93a2de3..0000000 --- a/src/asmjit/base/codeemitter.h +++ /dev/null @@ -1,499 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CODEEMITTER_H -#define _ASMJIT_BASE_CODEEMITTER_H - -// [Dependencies] -#include "../base/arch.h" -#include "../base/codeholder.h" -#include "../base/operand.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [Forward Declarations] -// ============================================================================ - -class ConstPool; - -// ============================================================================ -// [asmjit::CodeEmitter] -// ============================================================================ - -//! Provides a base foundation to emit code - specialized by \ref Assembler and -//! \ref CodeBuilder. -class ASMJIT_VIRTAPI CodeEmitter { -public: - //! CodeEmitter type. - ASMJIT_ENUM(Type) { - kTypeNone = 0, - kTypeAssembler = 1, - kTypeBuilder = 2, - kTypeCompiler = 3, - kTypeCount = 4 - }; - - //! CodeEmitter hints - global settings that affect machine-code generation. - ASMJIT_ENUM(Hints) { - //! Emit optimized code-alignment sequences. - //! - //! Default `true`. - //! - //! X86/X64 Specific - //! ---------------- - //! - //! Default align sequence used by X86/X64 architecture is one-byte (0x90) - //! opcode that is often shown by disassemblers as nop. However there are - //! more optimized align sequences for 2-11 bytes that may execute faster. - //! If this feature is enabled AsmJit will generate specialized sequences - //! for alignment between 2 to 11 bytes. - kHintOptimizedAlign = 0x00000001U, - - //! Emit jump-prediction hints. - //! - //! Default `false`. - //! - //! X86/X64 Specific - //! ---------------- - //! - //! Jump prediction is usually based on the direction of the jump. If the - //! jump is backward it is usually predicted as taken; and if the jump is - //! forward it is usually predicted as not-taken. The reason is that loops - //! generally use backward jumps and conditions usually use forward jumps. - //! However this behavior can be overridden by using instruction prefixes. - //! If this option is enabled these hints will be emitted. - //! - //! This feature is disabled by default, because the only processor that - //! used to take into consideration prediction hints was P4. Newer processors - //! implement heuristics for branch prediction that ignores any static hints. - kHintPredictedJumps = 0x00000002U - }; - - //! CodeEmitter options that are merged with instruction options. - ASMJIT_ENUM(Options) { - //! Reserved, used to check for errors in `Assembler::_emit()`. In addition, - //! if an emitter is in error state it will have `kOptionMaybeFailureCase` - //! set - kOptionMaybeFailureCase = 0x00000001U, - - //! Perform a strict validation before the instruction is emitted. - kOptionStrictValidation = 0x00000002U, - - //! Logging is enabled and `CodeHolder::getLogger()` should return a valid - //! \ref Logger pointer. - kOptionLoggingEnabled = 0x00000004U, - - //! Mask of all internal options that are not used to represent instruction - //! options, but are used to instrument Assembler and CodeBuilder. These - //! options are internal and should not be used outside of AsmJit itself. - //! - //! NOTE: Reserved options should never appear in `CBInst` options. - kOptionReservedMask = 0x00000007U, - - //! Used only by Assembler to mark `_op4` and `_op5` are used. - kOptionOp4Op5Used = 0x00000008U, - - //! Prevents following a jump during compilation (CodeCompiler). - kOptionUnfollow = 0x00000010U, - - //! Overwrite the destination operand (CodeCompiler). - //! - //! Hint that is important for register liveness analysis. It tells the - //! compiler that the destination operand will be overwritten now or by - //! adjacent instructions. CodeCompiler knows when a register is completely - //! overwritten by a single instruction, for example you don't have to - //! mark "movaps" or "pxor x, x", however, if a pair of instructions is - //! used and the first of them doesn't completely overwrite the content - //! of the destination, CodeCompiler fails to mark that register as dead. - //! - //! X86/X64 Specific - //! ---------------- - //! - //! - All instructions that always overwrite at least the size of the - //! register the virtual-register uses , for example "mov", "movq", - //! "movaps" don't need the overwrite option to be used - conversion, - //! shuffle, and other miscellaneous instructions included. - //! - //! - All instructions that clear the destination register if all operands - //! are the same, for example "xor x, x", "pcmpeqb x x", etc... - //! - //! - Consecutive instructions that partially overwrite the variable until - //! there is no old content require the `overwrite()` to be used. Some - //! examples (not always the best use cases thought): - //! - //! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa - //! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa - //! - `mov al, ?` followed by `and ax, 0xFF` - //! - `mov al, ?` followed by `mov ah, al` - //! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1` - //! - //! - If allocated variable is used temporarily for scalar operations. For - //! example if you allocate a full vector like `X86Compiler::newXmm()` - //! and then use that vector for scalar operations you should use - //! `overwrite()` directive: - //! - //! - `sqrtss x, y` - only LO element of `x` is changed, if you don't use - //! HI elements, use `X86Compiler.overwrite().sqrtss(x, y)`. - kOptionOverwrite = 0x00000020U - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_API CodeEmitter(uint32_t type) noexcept; - ASMJIT_API virtual ~CodeEmitter() noexcept; - - // -------------------------------------------------------------------------- - // [Events] - // -------------------------------------------------------------------------- - - //! Called after the \ref CodeEmitter was attached to the \ref CodeHolder. - virtual Error onAttach(CodeHolder* code) noexcept = 0; - //! Called after the \ref CodeEmitter was detached from the \ref CodeHolder. - virtual Error onDetach(CodeHolder* code) noexcept = 0; - - // -------------------------------------------------------------------------- - // [Code-Generation] - // -------------------------------------------------------------------------- - - //! Emit instruction having max 4 operands. - virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) = 0; - //! Emit instruction having max 6 operands. - virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) = 0; - //! Emit instruction having operands stored in array. - virtual Error _emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount); - - //! Create a new label. - virtual Label newLabel() = 0; - //! Create a new named label. - virtual Label newNamedLabel( - const char* name, - size_t nameLength = Globals::kInvalidIndex, - uint32_t type = Label::kTypeGlobal, - uint32_t parentId = 0) = 0; - - //! Get a label by name. - //! - //! Returns invalid Label in case that the name is invalid or label was not found. - //! - //! NOTE: This function doesn't trigger ErrorHandler in case the name is - //! invalid or no such label exist. You must always check the validity of the - //! \ref Label returned. - ASMJIT_API Label getLabelByName( - const char* name, - size_t nameLength = Globals::kInvalidIndex, - uint32_t parentId = 0) noexcept; - - //! Bind the `label` to the current position of the current section. - //! - //! NOTE: Attempt to bind the same label multiple times will return an error. - virtual Error bind(const Label& label) = 0; - - //! Align to the `alignment` specified. - //! - //! The sequence that is used to fill the gap between the aligned location - //! and the current location depends on the align `mode`, see \ref AlignMode. - virtual Error align(uint32_t mode, uint32_t alignment) = 0; - - //! Embed raw data into the code-buffer. - virtual Error embed(const void* data, uint32_t size) = 0; - - //! Embed absolute label address as data (4 or 8 bytes). - virtual Error embedLabel(const Label& label) = 0; - - //! Embed a constant pool into the code-buffer in the following steps: - //! 1. Align by using kAlignData to the minimum `pool` alignment. - //! 2. Bind `label` so it's bound to an aligned location. - //! 3. Emit constant pool data. - virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0; - - //! Emit a comment string `s` with an optional `len` parameter. - virtual Error comment(const char* s, size_t len = Globals::kInvalidIndex) = 0; - - // -------------------------------------------------------------------------- - // [Code-Generation Status] - // -------------------------------------------------------------------------- - - //! Get if the CodeEmitter is initialized (i.e. attached to a \ref CodeHolder). - ASMJIT_INLINE bool isInitialized() const noexcept { return _code != nullptr; } - - ASMJIT_API virtual Error finalize(); - - // -------------------------------------------------------------------------- - // [Code Information] - // -------------------------------------------------------------------------- - - //! Get information about the code, see \ref CodeInfo. - ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; } - //! Get \ref CodeHolder this CodeEmitter is attached to. - ASMJIT_INLINE CodeHolder* getCode() const noexcept { return _code; } - - //! Get information about the architecture, see \ref ArchInfo. - ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _codeInfo.getArchInfo(); } - - //! Get if the target architecture is 32-bit. - ASMJIT_INLINE bool is32Bit() const noexcept { return getArchInfo().is32Bit(); } - //! Get if the target architecture is 64-bit. - ASMJIT_INLINE bool is64Bit() const noexcept { return getArchInfo().is64Bit(); } - - //! Get the target architecture type. - ASMJIT_INLINE uint32_t getArchType() const noexcept { return getArchInfo().getType(); } - //! Get the target architecture sub-type. - ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return getArchInfo().getSubType(); } - //! Get the target architecture's GP register size (4 or 8 bytes). - ASMJIT_INLINE uint32_t getGpSize() const noexcept { return getArchInfo().getGpSize(); } - //! Get the number of target GP registers. - ASMJIT_INLINE uint32_t getGpCount() const noexcept { return getArchInfo().getGpCount(); } - - // -------------------------------------------------------------------------- - // [Code-Emitter Type] - // -------------------------------------------------------------------------- - - //! Get the type of this CodeEmitter, see \ref Type. - ASMJIT_INLINE uint32_t getType() const noexcept { return _type; } - - ASMJIT_INLINE bool isAssembler() const noexcept { return _type == kTypeAssembler; } - ASMJIT_INLINE bool isCodeBuilder() const noexcept { return _type == kTypeBuilder; } - ASMJIT_INLINE bool isCodeCompiler() const noexcept { return _type == kTypeCompiler; } - - // -------------------------------------------------------------------------- - // [Global Information] - // -------------------------------------------------------------------------- - - //! Get global hints. - ASMJIT_INLINE uint32_t getGlobalHints() const noexcept { return _globalHints; } - - //! Get global options. - //! - //! Global options are merged with instruction options before the instruction - //! is encoded. These options have some bits reserved that are used for error - //! checking, logging, and strict validation. Other options are globals that - //! affect each instruction, for example if VEX3 is set globally, it will all - //! instructions, even those that don't have such option set. - ASMJIT_INLINE uint32_t getGlobalOptions() const noexcept { return _globalOptions; } - - // -------------------------------------------------------------------------- - // [Error Handling] - // -------------------------------------------------------------------------- - - //! Get if the object is in error state. - //! - //! Error state means that it does not consume anything unless the error - //! state is reset by calling `resetLastError()`. Use `getLastError()` to - //! get the last error that put the object into the error state. - ASMJIT_INLINE bool isInErrorState() const noexcept { return _lastError != kErrorOk; } - - //! Get the last error code. - ASMJIT_INLINE Error getLastError() const noexcept { return _lastError; } - //! Set the last error code and propagate it through the error handler. - ASMJIT_API Error setLastError(Error error, const char* message = nullptr); - //! Clear the last error code and return `kErrorOk`. - ASMJIT_INLINE Error resetLastError() noexcept { return setLastError(kErrorOk); } - - // -------------------------------------------------------------------------- - // [Accessors That Affect the Next Instruction] - // -------------------------------------------------------------------------- - - //! Get options of the next instruction. - ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; } - //! Set options of the next instruction. - ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _options = options; } - //! Add options of the next instruction. - ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; } - //! Reset options of the next instruction. - ASMJIT_INLINE void resetOptions() noexcept { _options = 0; } - - //! Get if the extra register operand is valid. - ASMJIT_INLINE bool hasExtraReg() const noexcept { return _extraReg.isValid(); } - //! Get an extra operand that will be used by the next instruction (architecture specific). - ASMJIT_INLINE const RegOnly& getExtraReg() const noexcept { return _extraReg; } - //! Set an extra operand that will be used by the next instruction (architecture specific). - ASMJIT_INLINE void setExtraReg(const Reg& reg) noexcept { _extraReg.init(reg); } - //! Set an extra operand that will be used by the next instruction (architecture specific). - ASMJIT_INLINE void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); } - //! Reset an extra operand that will be used by the next instruction (architecture specific). - ASMJIT_INLINE void resetExtraReg() noexcept { _extraReg.reset(); } - - //! Get annotation of the next instruction. - ASMJIT_INLINE const char* getInlineComment() const noexcept { return _inlineComment; } - //! Set annotation of the next instruction. - //! - //! NOTE: This string is set back to null by `_emit()`, but until that it has - //! to remain valid as `CodeEmitter` is not required to make a copy of it (and - //! it would be slow to do that for each instruction). - ASMJIT_INLINE void setInlineComment(const char* s) noexcept { _inlineComment = s; } - //! Reset annotation of the next instruction to null. - ASMJIT_INLINE void resetInlineComment() noexcept { _inlineComment = nullptr; } - - // -------------------------------------------------------------------------- - // [Helpers] - // -------------------------------------------------------------------------- - - //! Get if the `label` is valid (i.e. registered). - ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept { - return isLabelValid(label.getId()); - } - - //! Get if the label `id` is valid (i.e. registered). - ASMJIT_API bool isLabelValid(uint32_t id) const noexcept; - - //! Emit a formatted string `fmt`. - ASMJIT_API Error commentf(const char* fmt, ...); - //! Emit a formatted string `fmt` (va_list version). - ASMJIT_API Error commentv(const char* fmt, va_list ap); - - // -------------------------------------------------------------------------- - // [Emit] - // -------------------------------------------------------------------------- - - // NOTE: These `emit()` helpers are designed to address a code-bloat generated - // by C++ compilers to call a function having many arguments. Each parameter to - // `_emit()` requires code to pass it, which means that if we default to 4 - // operand parameters in `_emit()` and instId the C++ compiler would have to - // generate a virtual function call having 5 parameters, which is quite a lot. - // Since by default asm instructions have 2 to 3 operands it's better to - // introduce helpers that pass those and fill all the remaining with `_none`. - - //! Emit an instruction. - ASMJIT_API Error emit(uint32_t instId); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5); - - //! Emit an instruction that has a 32-bit signed immediate operand. - ASMJIT_API Error emit(uint32_t instId, int o0); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, int o1); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, int o2); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, int o3); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, int o4); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, int o5); - - //! Emit an instruction that has a 64-bit signed immediate operand. - ASMJIT_API Error emit(uint32_t instId, int64_t o0); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, int64_t o1); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, int64_t o2); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, int64_t o3); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, int64_t o4); - //! \overload - ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, int64_t o5); - - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, unsigned int o0) { - return emit(instId, static_cast(o0)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, unsigned int o1) { - return emit(instId, o0, static_cast(o1)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, unsigned int o2) { - return emit(instId, o0, o1, static_cast(o2)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, unsigned int o3) { - return emit(instId, o0, o1, o2, static_cast(o3)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, unsigned int o4) { - return emit(instId, o0, o1, o2, o3, static_cast(o4)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, unsigned int o5) { - return emit(instId, o0, o1, o2, o3, o4, static_cast(o5)); - } - - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, uint64_t o0) { - return emit(instId, static_cast(o0)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, uint64_t o1) { - return emit(instId, o0, static_cast(o1)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, uint64_t o2) { - return emit(instId, o0, o1, static_cast(o2)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, uint64_t o3) { - return emit(instId, o0, o1, o2, static_cast(o3)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, uint64_t o4) { - return emit(instId, o0, o1, o2, o3, static_cast(o4)); - } - //! \overload - ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, uint64_t o5) { - return emit(instId, o0, o1, o2, o3, o4, static_cast(o5)); - } - - ASMJIT_INLINE Error emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) { - return _emitOpArray(instId, opArray, opCount); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CodeInfo _codeInfo; //!< Basic information about the code (matches CodeHolder::_codeInfo). - CodeHolder* _code; //!< CodeHolder the CodeEmitter is attached to. - CodeEmitter* _nextEmitter; //!< Linked list of `CodeEmitter`s attached to the same \ref CodeHolder. - - uint8_t _type; //!< See CodeEmitter::Type. - uint8_t _destroyed; //!< Set by ~CodeEmitter() before calling `_code->detach()`. - uint8_t _finalized; //!< True if the CodeEmitter is finalized (CodeBuilder & CodeCompiler). - uint8_t _reserved; //!< \internal - Error _lastError; //!< Last error code. - - uint32_t _privateData; //!< Internal private data used freely by any CodeEmitter. - uint32_t _globalHints; //!< Global hints, always in sync with CodeHolder. - uint32_t _globalOptions; //!< Global options, combined with `_options` before used by each instruction. - - uint32_t _options; //!< Used to pass instruction options (affects the next instruction). - RegOnly _extraReg; //!< Extra register (op-mask {k} on AVX-512) (affects the next instruction). - const char* _inlineComment; //!< Inline comment of the next instruction (affects the next instruction). - - Operand_ _none; //!< Used to pass unused operands to `_emit()` instead of passing null. - Reg _nativeGpReg; //!< Native GP register with zero id. - const Reg* _nativeGpArray; //!< Array of native registers indexed from zero. -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_CODEEMITTER_H diff --git a/src/asmjit/base/codeholder.cpp b/src/asmjit/base/codeholder.cpp deleted file mode 100644 index b8fe4d1..0000000 --- a/src/asmjit/base/codeholder.cpp +++ /dev/null @@ -1,697 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/assembler.h" -#include "../base/utils.h" -#include "../base/vmem.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::ErrorHandler] -// ============================================================================ - -ErrorHandler::ErrorHandler() noexcept {} -ErrorHandler::~ErrorHandler() noexcept {} - -// ============================================================================ -// [asmjit::CodeHolder - Utilities] -// ============================================================================ - -static void CodeHolder_setGlobalOption(CodeHolder* self, uint32_t clear, uint32_t add) noexcept { - // Modify global options of `CodeHolder` itself. - self->_globalOptions = (self->_globalOptions & ~clear) | add; - - // Modify all global options of all `CodeEmitter`s attached. - CodeEmitter* emitter = self->_emitters; - while (emitter) { - emitter->_globalOptions = (emitter->_globalOptions & ~clear) | add; - emitter = emitter->_nextEmitter; - } -} - -static void CodeHolder_resetInternal(CodeHolder* self, bool releaseMemory) noexcept { - // Detach all `CodeEmitter`s. - while (self->_emitters) - self->detach(self->_emitters); - - // Reset everything into its construction state. - self->_codeInfo.reset(); - self->_globalHints = 0; - self->_globalOptions = 0; - self->_logger = nullptr; - self->_errorHandler = nullptr; - - self->_unresolvedLabelsCount = 0; - self->_trampolinesSize = 0; - - // Reset all sections. - size_t numSections = self->_sections.getLength(); - for (size_t i = 0; i < numSections; i++) { - SectionEntry* section = self->_sections[i]; - if (section->_buffer.hasData() && !section->_buffer.isExternal()) - Internal::releaseMemory(section->_buffer._data); - section->_buffer._data = nullptr; - section->_buffer._capacity = 0; - } - - // Reset zone allocator and all containers using it. - ZoneHeap* heap = &self->_baseHeap; - - self->_namedLabels.reset(heap); - self->_relocations.reset(); - self->_labels.reset(); - self->_sections.reset(); - - heap->reset(&self->_baseZone); - self->_baseZone.reset(releaseMemory); -} - -// ============================================================================ -// [asmjit::CodeHolder - Construction / Destruction] -// ============================================================================ - -CodeHolder::CodeHolder() noexcept - : _codeInfo(), - _globalHints(0), - _globalOptions(0), - _emitters(nullptr), - _cgAsm(nullptr), - _logger(nullptr), - _errorHandler(nullptr), - _unresolvedLabelsCount(0), - _trampolinesSize(0), - _baseZone(16384 - Zone::kZoneOverhead), - _dataZone(16384 - Zone::kZoneOverhead), - _baseHeap(&_baseZone), - _namedLabels(&_baseHeap) {} - -CodeHolder::~CodeHolder() noexcept { - CodeHolder_resetInternal(this, true); -} - -// ============================================================================ -// [asmjit::CodeHolder - Init / Reset] -// ============================================================================ - -Error CodeHolder::init(const CodeInfo& info) noexcept { - // Cannot reinitialize if it's locked or there is one or more CodeEmitter - // attached. - if (isInitialized()) - return DebugUtils::errored(kErrorAlreadyInitialized); - - // If we are just initializing there should be no emitters attached). - ASMJIT_ASSERT(_emitters == nullptr); - - // Create the default section and insert it to the `_sections` array. - Error err = _sections.willGrow(&_baseHeap); - if (err == kErrorOk) { - SectionEntry* se = _baseZone.allocZeroedT(); - if (ASMJIT_LIKELY(se)) { - se->_flags = SectionEntry::kFlagExec | SectionEntry::kFlagConst; - se->_setDefaultName('.', 't', 'e', 'x', 't'); - _sections.appendUnsafe(se); - } - else { - err = DebugUtils::errored(kErrorNoHeapMemory); - } - } - - if (ASMJIT_UNLIKELY(err)) { - _baseZone.reset(false); - return err; - } - else { - _codeInfo = info; - return kErrorOk; - } -} - -void CodeHolder::reset(bool releaseMemory) noexcept { - CodeHolder_resetInternal(this, releaseMemory); -} - -// ============================================================================ -// [asmjit::CodeHolder - Attach / Detach] -// ============================================================================ - -Error CodeHolder::attach(CodeEmitter* emitter) noexcept { - // Catch a possible misuse of the API. - if (!emitter) - return DebugUtils::errored(kErrorInvalidArgument); - - uint32_t type = emitter->getType(); - if (type == CodeEmitter::kTypeNone || type >= CodeEmitter::kTypeCount) - return DebugUtils::errored(kErrorInvalidState); - - // This is suspicious, but don't fail if `emitter` matches. - if (emitter->_code != nullptr) { - if (emitter->_code == this) return kErrorOk; - return DebugUtils::errored(kErrorInvalidState); - } - - // Special case - attach `Assembler`. - CodeEmitter** pSlot = nullptr; - if (type == CodeEmitter::kTypeAssembler) { - if (_cgAsm) - return DebugUtils::errored(kErrorSlotOccupied); - pSlot = reinterpret_cast(&_cgAsm); - } - - Error err = emitter->onAttach(this); - if (err != kErrorOk) return err; - - // Add to a single-linked list of `CodeEmitter`s. - emitter->_nextEmitter = _emitters; - _emitters = emitter; - if (pSlot) *pSlot = emitter; - - // Establish the connection. - emitter->_code = this; - return kErrorOk; -} - -Error CodeHolder::detach(CodeEmitter* emitter) noexcept { - if (!emitter) - return DebugUtils::errored(kErrorInvalidArgument); - - if (emitter->_code != this) - return DebugUtils::errored(kErrorInvalidState); - - uint32_t type = emitter->getType(); - Error err = kErrorOk; - - // NOTE: We always detach if we were asked to, if error happens during - // `emitter->onDetach()` we just propagate it, but the CodeEmitter will - // be detached. - if (!emitter->_destroyed) { - if (type == CodeEmitter::kTypeAssembler) - static_cast(emitter)->sync(); - err = emitter->onDetach(this); - } - - // Special case - detach `Assembler`. - if (type == CodeEmitter::kTypeAssembler) - _cgAsm = nullptr; - - // Remove from a single-linked list of `CodeEmitter`s. - CodeEmitter** pPrev = &_emitters; - for (;;) { - ASMJIT_ASSERT(*pPrev != nullptr); - CodeEmitter* cur = *pPrev; - - if (cur == emitter) { - *pPrev = emitter->_nextEmitter; - break; - } - - pPrev = &cur->_nextEmitter; - } - - emitter->_code = nullptr; - emitter->_nextEmitter = nullptr; - - return err; -} - -// ============================================================================ -// [asmjit::CodeHolder - Sync] -// ============================================================================ - -void CodeHolder::sync() noexcept { - if (_cgAsm) _cgAsm->sync(); -} - -// ============================================================================ -// [asmjit::CodeHolder - Result Information] -// ============================================================================ - -size_t CodeHolder::getCodeSize() const noexcept { - // Reflect all changes first. - const_cast(this)->sync(); - - // TODO: Support sections. - return _sections[0]->_buffer._length + getTrampolinesSize(); -} - -// ============================================================================ -// [asmjit::CodeHolder - Logging & Error Handling] -// ============================================================================ - -#if !defined(ASMJIT_DISABLE_LOGGING) -void CodeHolder::setLogger(Logger* logger) noexcept { - uint32_t opt = 0; - if (logger) opt = CodeEmitter::kOptionLoggingEnabled; - - _logger = logger; - CodeHolder_setGlobalOption(this, CodeEmitter::kOptionLoggingEnabled, opt); -} -#endif // !ASMJIT_DISABLE_LOGGING - -Error CodeHolder::setErrorHandler(ErrorHandler* handler) noexcept { - _errorHandler = handler; - return kErrorOk; -} - -// ============================================================================ -// [asmjit::CodeHolder - Sections] -// ============================================================================ - -static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept { - uint8_t* oldData = cb->_data; - uint8_t* newData; - - if (oldData && !cb->isExternal()) - newData = static_cast(Internal::reallocMemory(oldData, n)); - else - newData = static_cast(Internal::allocMemory(n)); - - if (ASMJIT_UNLIKELY(!newData)) - return DebugUtils::errored(kErrorNoHeapMemory); - - cb->_data = newData; - cb->_capacity = n; - - // Update the `Assembler` pointers if attached. Maybe we should introduce an - // event for this, but since only one Assembler can be attached at a time it - // should not matter how these pointers are updated. - Assembler* a = self->_cgAsm; - if (a && &a->_section->_buffer == cb) { - size_t offset = a->getOffset(); - - a->_bufferData = newData; - a->_bufferEnd = newData + n; - a->_bufferPtr = newData + offset; - } - - return kErrorOk; -} - -Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept { - // This is most likely called by `Assembler` so `sync()` shouldn't be needed, - // however, if this is called by the user and the currently attached Assembler - // did generate some code we could lose that, so sync now and make sure the - // section length is updated. - if (_cgAsm) _cgAsm->sync(); - - // Now the length of the section must be valid. - size_t length = cb->getLength(); - if (ASMJIT_UNLIKELY(n > IntTraits::maxValue() - length)) - return DebugUtils::errored(kErrorNoHeapMemory); - - // We can now check if growing the buffer is really necessary. It's unlikely - // that this function is called while there is still room for `n` bytes. - size_t capacity = cb->getCapacity(); - size_t required = cb->getLength() + n; - if (ASMJIT_UNLIKELY(required <= capacity)) return kErrorOk; - - if (cb->isFixedSize()) - return DebugUtils::errored(kErrorCodeTooLarge); - - if (capacity < 8096) - capacity = 8096; - else - capacity += Globals::kAllocOverhead; - - do { - size_t old = capacity; - if (capacity < Globals::kAllocThreshold) - capacity *= 2; - else - capacity += Globals::kAllocThreshold; - - if (capacity < Globals::kAllocThreshold) - capacity *= 2; - else - capacity += Globals::kAllocThreshold; - - // Overflow. - if (ASMJIT_UNLIKELY(old > capacity)) - return DebugUtils::errored(kErrorNoHeapMemory); - } while (capacity - Globals::kAllocOverhead < required); - - return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead); -} - -Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept { - size_t capacity = cb->getCapacity(); - if (n <= capacity) return kErrorOk; - - if (cb->isFixedSize()) - return DebugUtils::errored(kErrorCodeTooLarge); - - // We must sync, as mentioned in `growBuffer()` as well. - if (_cgAsm) _cgAsm->sync(); - - return CodeHolder_reserveInternal(this, cb, n); -} - -// ============================================================================ -// [asmjit::CodeHolder - Labels & Symbols] -// ============================================================================ - -namespace { - -//! \internal -//! -//! Only used to lookup a label from `_namedLabels`. -class LabelByName { -public: - ASMJIT_INLINE LabelByName(const char* name, size_t nameLength, uint32_t hVal) noexcept - : name(name), - nameLength(static_cast(nameLength)), - hVal(hVal) {} - - ASMJIT_INLINE bool matches(const LabelEntry* entry) const noexcept { - return static_cast(entry->getNameLength()) == nameLength && - ::memcmp(entry->getName(), name, nameLength) == 0; - } - - const char* name; - uint32_t nameLength; - uint32_t hVal; -}; - -// Returns a hash of `name` and fixes `nameLength` if it's `Globals::kInvalidIndex`. -static uint32_t CodeHolder_hashNameAndFixLen(const char* name, size_t& nameLength) noexcept { - uint32_t hVal = 0; - if (nameLength == Globals::kInvalidIndex) { - size_t i = 0; - for (;;) { - uint8_t c = static_cast(name[i]); - if (!c) break; - hVal = Utils::hashRound(hVal, c); - i++; - } - nameLength = i; - } - else { - for (size_t i = 0; i < nameLength; i++) { - uint8_t c = static_cast(name[i]); - if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName); - hVal = Utils::hashRound(hVal, c); - } - } - return hVal; -} - -} // anonymous namespace - -LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept { - LabelLink* link = _baseHeap.allocT(); - if (ASMJIT_UNLIKELY(!link)) return nullptr; - - link->prev = le->_links; - le->_links = link; - - link->sectionId = sectionId; - link->relocId = RelocEntry::kInvalidId; - link->offset = offset; - link->rel = rel; - - _unresolvedLabelsCount++; - return link; -} - -Error CodeHolder::newLabelId(uint32_t& idOut) noexcept { - idOut = 0; - - size_t index = _labels.getLength(); - if (ASMJIT_LIKELY(index >= Operand::kPackedIdCount)) - return DebugUtils::errored(kErrorLabelIndexOverflow); - - ASMJIT_PROPAGATE(_labels.willGrow(&_baseHeap)); - LabelEntry* le = _baseHeap.allocZeroedT(); - - if (ASMJIT_UNLIKELY(!le)) - return DebugUtils::errored(kErrorNoHeapMemory);; - - uint32_t id = Operand::packId(static_cast(index)); - le->_setId(id); - le->_parentId = 0; - le->_sectionId = SectionEntry::kInvalidId; - le->_offset = 0; - - _labels.appendUnsafe(le); - idOut = id; - return kErrorOk; -} - -Error CodeHolder::newNamedLabelId(uint32_t& idOut, const char* name, size_t nameLength, uint32_t type, uint32_t parentId) noexcept { - idOut = 0; - uint32_t hVal = CodeHolder_hashNameAndFixLen(name, nameLength); - - if (ASMJIT_UNLIKELY(nameLength == 0)) - return DebugUtils::errored(kErrorInvalidLabelName); - - if (ASMJIT_UNLIKELY(nameLength > Globals::kMaxLabelLength)) - return DebugUtils::errored(kErrorLabelNameTooLong); - - switch (type) { - case Label::kTypeLocal: - if (ASMJIT_UNLIKELY(Operand::unpackId(parentId) >= _labels.getLength())) - return DebugUtils::errored(kErrorInvalidParentLabel); - - hVal ^= parentId; - break; - - case Label::kTypeGlobal: - if (ASMJIT_UNLIKELY(parentId != 0)) - return DebugUtils::errored(kErrorNonLocalLabelCantHaveParent); - - break; - - default: - return DebugUtils::errored(kErrorInvalidArgument); - } - - // Don't allow to insert duplicates. Local labels allow duplicates that have - // different id, this is already accomplished by having a different hashes - // between the same label names having different parent labels. - LabelEntry* le = _namedLabels.get(LabelByName(name, nameLength, hVal)); - if (ASMJIT_UNLIKELY(le)) - return DebugUtils::errored(kErrorLabelAlreadyDefined); - - Error err = kErrorOk; - size_t index = _labels.getLength(); - - if (ASMJIT_UNLIKELY(index >= Operand::kPackedIdCount)) - return DebugUtils::errored(kErrorLabelIndexOverflow); - - ASMJIT_PROPAGATE(_labels.willGrow(&_baseHeap)); - le = _baseHeap.allocZeroedT(); - - if (ASMJIT_UNLIKELY(!le)) - return DebugUtils::errored(kErrorNoHeapMemory); - - uint32_t id = Operand::packId(static_cast(index)); - le->_hVal = hVal; - le->_setId(id); - le->_type = static_cast(type); - le->_parentId = 0; - le->_sectionId = SectionEntry::kInvalidId; - le->_offset = 0; - - if (le->_name.mustEmbed(nameLength)) { - le->_name.setEmbedded(name, nameLength); - } - else { - char* nameExternal = static_cast(_dataZone.dup(name, nameLength, true)); - if (ASMJIT_UNLIKELY(!nameExternal)) - return DebugUtils::errored(kErrorNoHeapMemory); - le->_name.setExternal(nameExternal, nameLength); - } - - _labels.appendUnsafe(le); - _namedLabels.put(le); - - idOut = id; - return err; -} - -uint32_t CodeHolder::getLabelIdByName(const char* name, size_t nameLength, uint32_t parentId) noexcept { - uint32_t hVal = CodeHolder_hashNameAndFixLen(name, nameLength); - if (ASMJIT_UNLIKELY(!nameLength)) return 0; - - LabelEntry* le = _namedLabels.get(LabelByName(name, nameLength, hVal)); - return le ? le->getId() : static_cast(0); -} - -// ============================================================================ -// [asmjit::CodeEmitter - Relocations] -// ============================================================================ - -//! Encode MOD byte. -static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept { - return (m << 6) | (o << 3) | rm; -} - -Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t type, uint32_t size) noexcept { - ASMJIT_PROPAGATE(_relocations.willGrow(&_baseHeap)); - - size_t index = _relocations.getLength(); - if (ASMJIT_UNLIKELY(index > size_t(0xFFFFFFFFU))) - return DebugUtils::errored(kErrorRelocIndexOverflow); - - RelocEntry* re = _baseHeap.allocZeroedT(); - if (ASMJIT_UNLIKELY(!re)) - return DebugUtils::errored(kErrorNoHeapMemory); - - re->_id = static_cast(index); - re->_type = static_cast(type); - re->_size = static_cast(size); - re->_sourceSectionId = SectionEntry::kInvalidId; - re->_targetSectionId = SectionEntry::kInvalidId; - _relocations.appendUnsafe(re); - - *dst = re; - return kErrorOk; -} - -// TODO: Support multiple sections, this only relocates the first. -// TODO: This should go to Runtime as it's responsible for relocating the -// code, CodeHolder should just hold it. -size_t CodeHolder::relocate(void* _dst, uint64_t baseAddress) const noexcept { - SectionEntry* section = _sections[0]; - ASMJIT_ASSERT(section != nullptr); - - uint8_t* dst = static_cast(_dst); - if (baseAddress == Globals::kNoBaseAddress) - baseAddress = static_cast((uintptr_t)dst); - -#if !defined(ASMJIT_DISABLE_LOGGING) - Logger* logger = getLogger(); -#endif // ASMJIT_DISABLE_LOGGING - - size_t minCodeSize = section->getBuffer().getLength(); // Minimum code size. - size_t maxCodeSize = getCodeSize(); // Includes all possible trampolines. - - // We will copy the exact size of the generated code. Extra code for trampolines - // is generated on-the-fly by the relocator (this code doesn't exist at the moment). - ::memcpy(dst, section->_buffer._data, minCodeSize); - - // Trampoline offset from the beginning of dst/baseAddress. - size_t trampOffset = minCodeSize; - - // Relocate all recorded locations. - size_t numRelocs = _relocations.getLength(); - const RelocEntry* const* reArray = _relocations.getData(); - - for (size_t i = 0; i < numRelocs; i++) { - const RelocEntry* re = reArray[i]; - - // Possibly deleted or optimized out relocation entry. - if (re->getType() == RelocEntry::kTypeNone) - continue; - - uint64_t ptr = re->getData(); - size_t codeOffset = static_cast(re->getSourceOffset()); - - // Make sure that the `RelocEntry` is correct, we don't want to write - // out of bounds in `dst`. - if (ASMJIT_UNLIKELY(codeOffset + re->getSize() > maxCodeSize)) - return DebugUtils::errored(kErrorInvalidRelocEntry); - - // Whether to use trampoline, can be only used if relocation type is `kRelocTrampoline`. - bool useTrampoline = false; - - switch (re->getType()) { - case RelocEntry::kTypeAbsToAbs: { - break; - } - - case RelocEntry::kTypeRelToAbs: { - ptr += baseAddress; - break; - } - - case RelocEntry::kTypeAbsToRel: { - ptr -= baseAddress + re->getSourceOffset() + re->getSize(); - break; - } - - case RelocEntry::kTypeTrampoline: { - if (re->getSize() != 4) - return DebugUtils::errored(kErrorInvalidRelocEntry); - - ptr -= baseAddress + re->getSourceOffset() + re->getSize(); - if (!Utils::isInt32(static_cast(ptr))) { - ptr = (uint64_t)trampOffset - re->getSourceOffset() - re->getSize(); - useTrampoline = true; - } - break; - } - - default: - return DebugUtils::errored(kErrorInvalidRelocEntry); - } - - switch (re->getSize()) { - case 1: - Utils::writeU8(dst + codeOffset, static_cast(ptr & 0xFFU)); - break; - - case 4: - Utils::writeU32u(dst + codeOffset, static_cast(ptr & 0xFFFFFFFFU)); - break; - - case 8: - Utils::writeU64u(dst + codeOffset, ptr); - break; - - default: - return DebugUtils::errored(kErrorInvalidRelocEntry); - } - - // Handle the trampoline case. - if (useTrampoline) { - // Bytes that replace [REX, OPCODE] bytes. - uint32_t byte0 = 0xFF; - uint32_t byte1 = dst[codeOffset - 1]; - - if (byte1 == 0xE8) { - // Patch CALL/MOD byte to FF/2 (-> 0x15). - byte1 = x86EncodeMod(0, 2, 5); - } - else if (byte1 == 0xE9) { - // Patch JMP/MOD byte to FF/4 (-> 0x25). - byte1 = x86EncodeMod(0, 4, 5); - } - else { - return DebugUtils::errored(kErrorInvalidRelocEntry); - } - - // Patch `jmp/call` instruction. - ASMJIT_ASSERT(codeOffset >= 2); - dst[codeOffset - 2] = static_cast(byte0); - dst[codeOffset - 1] = static_cast(byte1); - - // Store absolute address and advance the trampoline pointer. - Utils::writeU64u(dst + trampOffset, re->getData()); - trampOffset += 8; - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (logger) - logger->logf("[reloc] dq 0x%016llX ; Trampoline\n", re->getData()); -#endif // !ASMJIT_DISABLE_LOGGING - } - } - - // If there are no trampolines this is the same as `minCodeSize`. - return trampOffset; -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/codeholder.h b/src/asmjit/base/codeholder.h deleted file mode 100644 index f753ecc..0000000 --- a/src/asmjit/base/codeholder.h +++ /dev/null @@ -1,748 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CODEHOLDER_H -#define _ASMJIT_BASE_CODEHOLDER_H - -// [Dependencies] -#include "../base/arch.h" -#include "../base/func.h" -#include "../base/logging.h" -#include "../base/operand.h" -#include "../base/simdtypes.h" -#include "../base/utils.h" -#include "../base/zone.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [Forward Declarations] -// ============================================================================ - -class Assembler; -class CodeEmitter; -class CodeHolder; - -// ============================================================================ -// [asmjit::AlignMode] -// ============================================================================ - -//! Align mode. -ASMJIT_ENUM(AlignMode) { - kAlignCode = 0, //!< Align executable code. - kAlignData = 1, //!< Align non-executable code. - kAlignZero = 2, //!< Align by a sequence of zeros. - kAlignCount //!< Count of alignment modes. -}; - -// ============================================================================ -// [asmjit::ErrorHandler] -// ============================================================================ - -//! Error handler can be used to override the default behavior of error handling -//! available to all classes that inherit \ref CodeEmitter. See \ref handleError(). -class ASMJIT_VIRTAPI ErrorHandler { -public: - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `ErrorHandler` instance. - ASMJIT_API ErrorHandler() noexcept; - //! Destroy the `ErrorHandler` instance. - ASMJIT_API virtual ~ErrorHandler() noexcept; - - // -------------------------------------------------------------------------- - // [Handle Error] - // -------------------------------------------------------------------------- - - //! Error handler (abstract). - //! - //! Error handler is called after an error happened and before it's propagated - //! to the caller. There are multiple ways how the error handler can be used: - //! - //! 1. Returning `true` or `false` from `handleError()`. If `true` is returned - //! it means that the error was reported and AsmJit can continue execution. - //! The reported error still be propagated to the caller, but won't put the - //! CodeEmitter into an error state (it won't set last-error). However, - //! returning `false` means that the error cannot be handled - in such case - //! it stores the error, which can be then retrieved by using `getLastError()`. - //! Returning `false` is the default behavior when no error handler is present. - //! To put the assembler into a non-error state again a `resetLastError()` must - //! be called. - //! - //! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely - //! exception-safe, but you can throw exception from your error handler if - //! this way is the preferred way of handling errors in your project. Throwing - //! an exception acts virtually as returning `true` as AsmJit won't be able - //! to store the error because the exception changes execution path. - //! - //! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts - //! `CodeEmitter` to a consistent state before calling the `handleError()` - //! so `longjmp()` can be used without any issues to cancel the code - //! generation if an error occurred. There is no difference between - //! exceptions and longjmp() from AsmJit's perspective. - virtual bool handleError(Error err, const char* message, CodeEmitter* origin) = 0; -}; - -// ============================================================================ -// [asmjit::CodeInfo] -// ============================================================================ - -//! Basic information about a code (or target). It describes its architecture, -//! code generation mode (or optimization level), and base address. -class CodeInfo { -public: - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE CodeInfo() noexcept - : _archInfo(), - _stackAlignment(0), - _cdeclCallConv(CallConv::kIdNone), - _stdCallConv(CallConv::kIdNone), - _fastCallConv(CallConv::kIdNone), - _baseAddress(Globals::kNoBaseAddress) {} - ASMJIT_INLINE CodeInfo(const CodeInfo& other) noexcept { init(other); } - - explicit ASMJIT_INLINE CodeInfo(uint32_t archType, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept - : _archInfo(archType, archMode), - _packedMiscInfo(0), - _baseAddress(baseAddress) {} - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool isInitialized() const noexcept { - return _archInfo._type != ArchInfo::kTypeNone; - } - - ASMJIT_INLINE void init(const CodeInfo& other) noexcept { - _archInfo = other._archInfo; - _packedMiscInfo = other._packedMiscInfo; - _baseAddress = other._baseAddress; - } - - ASMJIT_INLINE void init(uint32_t archType, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept { - _archInfo.init(archType, archMode); - _packedMiscInfo = 0; - _baseAddress = baseAddress; - } - - ASMJIT_INLINE void reset() noexcept { - _archInfo.reset(); - _stackAlignment = 0; - _cdeclCallConv = CallConv::kIdNone; - _stdCallConv = CallConv::kIdNone; - _fastCallConv = CallConv::kIdNone; - _baseAddress = Globals::kNoBaseAddress; - } - - // -------------------------------------------------------------------------- - // [Architecture Information] - // -------------------------------------------------------------------------- - - //! Get architecture information, see \ref ArchInfo. - ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _archInfo; } - - //! Get architecture type, see \ref ArchInfo::Type. - ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archInfo.getType(); } - //! Get architecture sub-type, see \ref ArchInfo::SubType. - ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _archInfo.getSubType(); } - //! Get a size of a GP register of the architecture the code is using. - ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _archInfo.getGpSize(); } - //! Get number of GP registers available of the architecture the code is using. - ASMJIT_INLINE uint32_t getGpCount() const noexcept { return _archInfo.getGpCount(); } - - // -------------------------------------------------------------------------- - // [High-Level Information] - // -------------------------------------------------------------------------- - - //! Get a natural stack alignment that must be honored (or 0 if not known). - ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; } - //! Set a natural stack alignment that must be honored. - ASMJIT_INLINE void setStackAlignment(uint8_t sa) noexcept { _stackAlignment = static_cast(sa); } - - ASMJIT_INLINE uint32_t getCdeclCallConv() const noexcept { return _cdeclCallConv; } - ASMJIT_INLINE void setCdeclCallConv(uint32_t cc) noexcept { _cdeclCallConv = static_cast(cc); } - - ASMJIT_INLINE uint32_t getStdCallConv() const noexcept { return _stdCallConv; } - ASMJIT_INLINE void setStdCallConv(uint32_t cc) noexcept { _stdCallConv = static_cast(cc); } - - ASMJIT_INLINE uint32_t getFastCallConv() const noexcept { return _fastCallConv; } - ASMJIT_INLINE void setFastCallConv(uint32_t cc) noexcept { _fastCallConv = static_cast(cc); } - - // -------------------------------------------------------------------------- - // [Addressing Information] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; } - ASMJIT_INLINE uint64_t getBaseAddress() const noexcept { return _baseAddress; } - ASMJIT_INLINE void setBaseAddress(uint64_t p) noexcept { _baseAddress = p; } - ASMJIT_INLINE void resetBaseAddress() noexcept { _baseAddress = Globals::kNoBaseAddress; } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE CodeInfo& operator=(const CodeInfo& other) noexcept { init(other); return *this; } - ASMJIT_INLINE bool operator==(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) == 0; } - ASMJIT_INLINE bool operator!=(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) != 0; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - ArchInfo _archInfo; //!< Architecture information. - - union { - struct { - uint8_t _stackAlignment; //!< Natural stack alignment (ARCH+OS). - uint8_t _cdeclCallConv; //!< Default CDECL calling convention. - uint8_t _stdCallConv; //!< Default STDCALL calling convention. - uint8_t _fastCallConv; //!< Default FASTCALL calling convention. - }; - uint32_t _packedMiscInfo; //!< \internal - }; - - uint64_t _baseAddress; //!< Base address. -}; - -// ============================================================================ -// [asmjit::CodeBuffer] -// ============================================================================ - -//! Code or data buffer. -struct CodeBuffer { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool hasData() const noexcept { return _data != nullptr; } - ASMJIT_INLINE uint8_t* getData() noexcept { return _data; } - ASMJIT_INLINE const uint8_t* getData() const noexcept { return _data; } - - ASMJIT_INLINE size_t getLength() const noexcept { return _length; } - ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; } - - ASMJIT_INLINE bool isExternal() const noexcept { return _isExternal; } - ASMJIT_INLINE bool isFixedSize() const noexcept { return _isFixedSize; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint8_t* _data; //!< The content of the buffer (data). - size_t _length; //!< Number of bytes of `data` used. - size_t _capacity; //!< Buffer capacity (in bytes). - bool _isExternal; //!< True if this is external buffer. - bool _isFixedSize; //!< True if this buffer cannot grow. -}; - -// ============================================================================ -// [asmjit::SectionEntry] -// ============================================================================ - -//! Section entry. -class SectionEntry { -public: - ASMJIT_ENUM(Id) { - kInvalidId = 0xFFFFFFFFU //!< Invalid section id. - }; - - //! Section flags. - ASMJIT_ENUM(Flags) { - kFlagExec = 0x00000001U, //!< Executable (.text sections). - kFlagConst = 0x00000002U, //!< Read-only (.text and .data sections). - kFlagZero = 0x00000004U, //!< Zero initialized by the loader (BSS). - kFlagInfo = 0x00000008U, //!< Info / comment flag. - kFlagImplicit = 0x80000000U //!< Section created implicitly (can be deleted by the Runtime). - }; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE uint32_t getId() const noexcept { return _id; } - ASMJIT_INLINE const char* getName() const noexcept { return _name; } - - ASMJIT_INLINE void _setDefaultName( - char c0 = 0, char c1 = 0, char c2 = 0, char c3 = 0, - char c4 = 0, char c5 = 0, char c6 = 0, char c7 = 0) noexcept { - _nameAsU32[0] = Utils::pack32_4x8(c0, c1, c2, c3); - _nameAsU32[1] = Utils::pack32_4x8(c4, c5, c6, c7); - } - - ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; } - ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; } - ASMJIT_INLINE void addFlags(uint32_t flags) noexcept { _flags |= flags; } - ASMJIT_INLINE void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; } - - ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; } - ASMJIT_INLINE void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; } - - ASMJIT_INLINE size_t getPhysicalSize() const noexcept { return _buffer.getLength(); } - - ASMJIT_INLINE size_t getVirtualSize() const noexcept { return _virtualSize; } - ASMJIT_INLINE void setVirtualSize(uint32_t size) noexcept { _virtualSize = size; } - - ASMJIT_INLINE CodeBuffer& getBuffer() noexcept { return _buffer; } - ASMJIT_INLINE const CodeBuffer& getBuffer() const noexcept { return _buffer; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint32_t _id; //!< Section id. - uint32_t _flags; //!< Section flags. - uint32_t _alignment; //!< Section alignment requirements (0 if no requirements). - uint32_t _virtualSize; //!< Virtual size of the section (zero initialized mostly). - union { - char _name[36]; //!< Section name (max 35 characters, PE allows max 8). - uint32_t _nameAsU32[36 / 4]; //!< Section name as `uint32_t[]` (only optimization). - }; - CodeBuffer _buffer; //!< Code or data buffer. -}; - -// ============================================================================ -// [asmjit::LabelLink] -// ============================================================================ - -//! Data structure used to link labels. -struct LabelLink { - LabelLink* prev; //!< Previous link (single-linked list). - uint32_t sectionId; //!< Section id. - uint32_t relocId; //!< Relocation id or RelocEntry::kInvalidId. - size_t offset; //!< Label offset relative to the start of the section. - intptr_t rel; //!< Inlined rel8/rel32. -}; - -// ============================================================================ -// [asmjit::LabelEntry] -// ============================================================================ - -//! Label entry. -//! -//! Contains the following properties: -//! * Label id - This is the only thing that is set to the `Label` operand. -//! * Label name - Optional, used mostly to create executables and libraries. -//! * Label type - Type of the label, default `Label::kTypeAnonymous`. -//! * Label parent id - Derived from many assemblers that allow to define a -//! local label that falls under a global label. This allows to define -//! many labels of the same name that have different parent (global) label. -//! * Offset - offset of the label bound by `Assembler`. -//! * Links - single-linked list that contains locations of code that has -//! to be patched when the label gets bound. Every use of unbound label -//! adds one link to `_links` list. -//! * HVal - Hash value of label's name and optionally parentId. -//! * HashNext - Hash-table implementation detail. -class LabelEntry : public ZoneHashNode { -public: - // NOTE: Label id is stored in `_customData`, which is provided by ZoneHashNode - // to fill a padding that a C++ compiler targeting 64-bit CPU will add to align - // the structure to 64-bits. - - //! Get label id. - ASMJIT_INLINE uint32_t getId() const noexcept { return _customData; } - //! Set label id (internal, used only by \ref CodeHolder). - ASMJIT_INLINE void _setId(uint32_t id) noexcept { _customData = id; } - - //! Get label type, see \ref Label::Type. - ASMJIT_INLINE uint32_t getType() const noexcept { return _type; } - //! Get label flags, returns 0 at the moment. - ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; } - - ASMJIT_INLINE bool hasParent() const noexcept { return _parentId != 0; } - //! Get label's parent id. - ASMJIT_INLINE uint32_t getParentId() const noexcept { return _parentId; } - - //! Get label's section id where it's bound to (or `SectionEntry::kInvalidId` if it's not bound yet). - ASMJIT_INLINE uint32_t getSectionId() const noexcept { return _sectionId; } - - //! Get if the label has name. - ASMJIT_INLINE bool hasName() const noexcept { return !_name.isEmpty(); } - - //! Get the label's name. - //! - //! NOTE: Local labels will return their local name without their parent - //! part, for example ".L1". - ASMJIT_INLINE const char* getName() const noexcept { return _name.getData(); } - - //! Get length of label's name. - //! - //! NOTE: Label name is always null terminated, so you can use `strlen()` to - //! get it, however, it's also cached in `LabelEntry`, so if you want to know - //! the length the easiest way is to use `LabelEntry::getNameLength()`. - ASMJIT_INLINE size_t getNameLength() const noexcept { return _name.getLength(); } - - //! Get if the label is bound. - ASMJIT_INLINE bool isBound() const noexcept { return _sectionId != SectionEntry::kInvalidId; } - //! Get the label offset (only useful if the label is bound). - ASMJIT_INLINE intptr_t getOffset() const noexcept { return _offset; } - - //! Get the hash-value of label's name and its parent label (if any). - //! - //! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function - //! is implemented in `Utils::hashString()` and `Utils::hashRound()`. - ASMJIT_INLINE uint32_t getHVal() const noexcept { return _hVal; } - - // ------------------------------------------------------------------------ - // [Members] - // ------------------------------------------------------------------------ - - // Let's round the size of `LabelEntry` to 64 bytes (as ZoneHeap has 32 - // bytes granularity anyway). This gives `_name` the remaining space, which - // is roughly 16 bytes on 64-bit and 28 bytes on 32-bit architectures. - enum { kNameBytes = 64 - (sizeof(ZoneHashNode) + 16 + sizeof(intptr_t) + sizeof(LabelLink*)) }; - - uint8_t _type; //!< Label type, see Label::Type. - uint8_t _flags; //!< Must be zero. - uint16_t _reserved16; //!< Reserved. - uint32_t _parentId; //!< Label parent id or zero. - uint32_t _sectionId; //!< Section id or `SectionEntry::kInvalidId`. - uint32_t _reserved32; //!< Reserved. - intptr_t _offset; //!< Label offset. - LabelLink* _links; //!< Label links. - SmallString _name; //!< Label name. -}; - -// ============================================================================ -// [asmjit::RelocEntry] -// ============================================================================ - -//! Relocation entry. -struct RelocEntry { - ASMJIT_ENUM(Id) { - kInvalidId = 0xFFFFFFFFU //!< Invalid relocation id. - }; - - //! Relocation type. - ASMJIT_ENUM(Type) { - kTypeNone = 0, //!< Deleted entry (no relocation). - kTypeAbsToAbs = 1, //!< Relocate absolute to absolute. - kTypeRelToAbs = 2, //!< Relocate relative to absolute. - kTypeAbsToRel = 3, //!< Relocate absolute to relative. - kTypeTrampoline = 4 //!< Relocate absolute to relative or use trampoline. - }; - - // ------------------------------------------------------------------------ - // [Accessors] - // ------------------------------------------------------------------------ - - ASMJIT_INLINE uint32_t getId() const noexcept { return _id; } - - ASMJIT_INLINE uint32_t getType() const noexcept { return _type; } - ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; } - - ASMJIT_INLINE uint32_t getSourceSectionId() const noexcept { return _sourceSectionId; } - ASMJIT_INLINE uint32_t getTargetSectionId() const noexcept { return _targetSectionId; } - - ASMJIT_INLINE uint64_t getSourceOffset() const noexcept { return _sourceOffset; } - ASMJIT_INLINE uint64_t getData() const noexcept { return _data; } - - // ------------------------------------------------------------------------ - // [Members] - // ------------------------------------------------------------------------ - - uint32_t _id; //!< Relocation id. - uint8_t _type; //!< Type of the relocation. - uint8_t _size; //!< Size of the relocation (1, 2, 4 or 8 bytes). - uint8_t _reserved[2]; //!< Reserved. - uint32_t _sourceSectionId; //!< Source section id. - uint32_t _targetSectionId; //!< Destination section id. - uint64_t _sourceOffset; //!< Source offset (relative to start of the section). - uint64_t _data; //!< Relocation data (target offset, target address, etc). -}; - -// ============================================================================ -// [asmjit::CodeHolder] -// ============================================================================ - -//! Contains basic information about the target architecture plus its settings, -//! and holds code & data (including sections, labels, and relocation information). -//! CodeHolder can store both binary and intermediate representation of assembly, -//! which can be generated by \ref Assembler and/or \ref CodeBuilder. -//! -//! NOTE: CodeHolder has ability to attach an \ref ErrorHandler, however, this -//! error handler is not triggered by CodeHolder itself, it's only used by the -//! attached code generators. -class CodeHolder { -public: - ASMJIT_NONCOPYABLE(CodeHolder) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create an uninitialized CodeHolder (you must init() it before it can be used). - ASMJIT_API CodeHolder() noexcept; - //! Destroy the CodeHolder. - ASMJIT_API ~CodeHolder() noexcept; - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool isInitialized() const noexcept { return _codeInfo.isInitialized(); } - - //! Initialize to CodeHolder to hold code described by `codeInfo`. - ASMJIT_API Error init(const CodeInfo& info) noexcept; - //! Detach all code-generators attached and reset the \ref CodeHolder. - ASMJIT_API void reset(bool releaseMemory = false) noexcept; - - // -------------------------------------------------------------------------- - // [Attach / Detach] - // -------------------------------------------------------------------------- - - //! Attach a \ref CodeEmitter to this \ref CodeHolder. - ASMJIT_API Error attach(CodeEmitter* emitter) noexcept; - //! Detach a \ref CodeEmitter from this \ref CodeHolder. - ASMJIT_API Error detach(CodeEmitter* emitter) noexcept; - - // -------------------------------------------------------------------------- - // [Sync] - // -------------------------------------------------------------------------- - - //! Synchronize all states of all `CodeEmitter`s associated with the CodeHolder. - //! This is required as some code generators don't sync every time they do - //! something - for example \ref Assembler generally syncs when it needs to - //! reallocate the \ref CodeBuffer, but not each time it encodes instruction - //! or directive. - ASMJIT_API void sync() noexcept; - - // -------------------------------------------------------------------------- - // [Code-Information] - // -------------------------------------------------------------------------- - - //! Get code/target information, see \ref CodeInfo. - ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; } - //! Get architecture information, see \ref ArchInfo. - ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _codeInfo.getArchInfo(); } - - //! Get the target's architecture type. - ASMJIT_INLINE uint32_t getArchType() const noexcept { return getArchInfo().getType(); } - //! Get the target's architecture sub-type. - ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return getArchInfo().getSubType(); } - - //! Get if a static base-address is set. - ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _codeInfo.hasBaseAddress(); } - //! Get a static base-address (uint64_t). - ASMJIT_INLINE uint64_t getBaseAddress() const noexcept { return _codeInfo.getBaseAddress(); } - - // -------------------------------------------------------------------------- - // [Global Information] - // -------------------------------------------------------------------------- - - //! Get global hints, internally propagated to all `CodeEmitter`s attached. - ASMJIT_INLINE uint32_t getGlobalHints() const noexcept { return _globalHints; } - //! Get global options, internally propagated to all `CodeEmitter`s attached. - ASMJIT_INLINE uint32_t getGlobalOptions() const noexcept { return _globalOptions; } - - // -------------------------------------------------------------------------- - // [Result Information] - // -------------------------------------------------------------------------- - - //! Get the size code & data of all sections. - ASMJIT_API size_t getCodeSize() const noexcept; - - //! Get size of all possible trampolines. - //! - //! Trampolines are needed to successfully generate relative jumps to absolute - //! addresses. This value is only non-zero if jmp of call instructions were - //! used with immediate operand (this means jumping or calling an absolute - //! address directly). - ASMJIT_INLINE size_t getTrampolinesSize() const noexcept { return _trampolinesSize; } - - // -------------------------------------------------------------------------- - // [Logging & Error Handling] - // -------------------------------------------------------------------------- - -#if !defined(ASMJIT_DISABLE_LOGGING) - //! Get if a logger attached. - ASMJIT_INLINE bool hasLogger() const noexcept { return _logger != nullptr; } - //! Get the attached logger. - ASMJIT_INLINE Logger* getLogger() const noexcept { return _logger; } - //! Attach a `logger` to CodeHolder and propagate it to all attached `CodeEmitter`s. - ASMJIT_API void setLogger(Logger* logger) noexcept; - //! Reset the logger (does nothing if not attached). - ASMJIT_INLINE void resetLogger() noexcept { setLogger(nullptr); } -#endif // !ASMJIT_DISABLE_LOGGING - - //! Get if error-handler is attached. - ASMJIT_INLINE bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; } - //! Get the error-handler. - ASMJIT_INLINE ErrorHandler* getErrorHandler() const noexcept { return _errorHandler; } - //! Set the error handler, will affect all attached `CodeEmitter`s. - ASMJIT_API Error setErrorHandler(ErrorHandler* handler) noexcept; - //! Reset the error handler (does nothing if not attached). - ASMJIT_INLINE void resetErrorHandler() noexcept { setErrorHandler(nullptr); } - - // -------------------------------------------------------------------------- - // [Sections] - // -------------------------------------------------------------------------- - - //! Get array of `SectionEntry*` records. - ASMJIT_INLINE const ZoneVector& getSections() const noexcept { return _sections; } - - //! Get a section entry of the given index. - ASMJIT_INLINE SectionEntry* getSectionEntry(size_t index) const noexcept { return _sections[index]; } - - ASMJIT_API Error growBuffer(CodeBuffer* cb, size_t n) noexcept; - ASMJIT_API Error reserveBuffer(CodeBuffer* cb, size_t n) noexcept; - - // -------------------------------------------------------------------------- - // [Labels & Symbols] - // -------------------------------------------------------------------------- - - //! Create a new anonymous label and return its id in `idOut`. - //! - //! Returns `Error`, does not report error to \ref ErrorHandler. - ASMJIT_API Error newLabelId(uint32_t& idOut) noexcept; - - //! Create a new named label label-type `type`. - //! - //! Returns `Error`, does not report error to \ref ErrorHandler. - ASMJIT_API Error newNamedLabelId(uint32_t& idOut, const char* name, size_t nameLength, uint32_t type, uint32_t parentId) noexcept; - - //! Get a label id by name. - ASMJIT_API uint32_t getLabelIdByName(const char* name, size_t nameLength = Globals::kInvalidIndex, uint32_t parentId = 0) noexcept; - - //! Create a new label-link used to store information about yet unbound labels. - //! - //! Returns `null` if the allocation failed. - ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept; - - //! Get array of `LabelEntry*` records. - ASMJIT_INLINE const ZoneVector& getLabelEntries() const noexcept { return _labels; } - - //! Get number of labels created. - ASMJIT_INLINE size_t getLabelsCount() const noexcept { return _labels.getLength(); } - - //! Get number of label references, which are unresolved at the moment. - ASMJIT_INLINE size_t getUnresolvedLabelsCount() const noexcept { return _unresolvedLabelsCount; } - - //! Get if the `label` is valid (i.e. created by `newLabelId()`). - ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept { - return isLabelValid(label.getId()); - } - //! Get if the label having `id` is valid (i.e. created by `newLabelId()`). - ASMJIT_INLINE bool isLabelValid(uint32_t labelId) const noexcept { - size_t index = Operand::unpackId(labelId); - return index < _labels.getLength(); - } - - //! Get if the `label` is already bound. - //! - //! Returns `false` if the `label` is not valid. - ASMJIT_INLINE bool isLabelBound(const Label& label) const noexcept { - return isLabelBound(label.getId()); - } - //! \overload - ASMJIT_INLINE bool isLabelBound(uint32_t id) const noexcept { - size_t index = Operand::unpackId(id); - return index < _labels.getLength() && _labels[index]->isBound(); - } - - //! Get a `label` offset or -1 if the label is not yet bound. - ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const noexcept { - return getLabelOffset(label.getId()); - } - //! \overload - ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const noexcept { - ASMJIT_ASSERT(isLabelValid(id)); - return _labels[Operand::unpackId(id)]->getOffset(); - } - - //! Get information about the given `label`. - ASMJIT_INLINE LabelEntry* getLabelEntry(const Label& label) const noexcept { - return getLabelEntry(label.getId()); - } - //! Get information about a label having the given `id`. - ASMJIT_INLINE LabelEntry* getLabelEntry(uint32_t id) const noexcept { - size_t index = static_cast(Operand::unpackId(id)); - return index < _labels.getLength() ? _labels[index] : static_cast(nullptr); - } - - // -------------------------------------------------------------------------- - // [Relocations] - // -------------------------------------------------------------------------- - - //! Create a new relocation entry of type `type` and size `size`. - //! - //! Additional fields can be set after the relocation entry was created. - ASMJIT_API Error newRelocEntry(RelocEntry** dst, uint32_t type, uint32_t size) noexcept; - - //! Get if the code contains relocations. - ASMJIT_INLINE bool hasRelocations() const noexcept { return !_relocations.isEmpty(); } - //! Get array of `RelocEntry*` records. - ASMJIT_INLINE const ZoneVector& getRelocEntries() const noexcept { return _relocations; } - - ASMJIT_INLINE RelocEntry* getRelocEntry(uint32_t id) const noexcept { return _relocations[id]; } - - //! Relocate the code to `baseAddress` and copy it to `dst`. - //! - //! \param dst Contains the location where the relocated code should be - //! copied. The pointer can be address returned by virtual memory allocator - //! or any other address that has sufficient space. - //! - //! \param baseAddress Base address used for relocation. `JitRuntime` always - //! sets the `baseAddress` to be the same as `dst`. - //! - //! \return The number bytes actually used. If the code emitter reserved - //! space for possible trampolines, but didn't use it, the number of bytes - //! used can actually be less than the expected worst case. Virtual memory - //! allocator can shrink the memory it allocated initially. - //! - //! A given buffer will be overwritten, to get the number of bytes required, - //! use `getCodeSize()`. - ASMJIT_API size_t relocate(void* dst, uint64_t baseAddress = Globals::kNoBaseAddress) const noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CodeInfo _codeInfo; //!< Basic information about the code (architecture and other info). - - uint32_t _globalHints; //!< Global hints, propagated to all `CodeEmitter`s. - uint32_t _globalOptions; //!< Global options, propagated to all `CodeEmitter`s. - - CodeEmitter* _emitters; //!< Linked-list of all attached `CodeEmitter`s. - Assembler* _cgAsm; //!< Attached \ref Assembler (only one at a time). - - Logger* _logger; //!< Attached \ref Logger, used by all consumers. - ErrorHandler* _errorHandler; //!< Attached \ref ErrorHandler. - - uint32_t _unresolvedLabelsCount; //!< Count of label references which were not resolved. - uint32_t _trampolinesSize; //!< Size of all possible trampolines. - - Zone _baseZone; //!< Base zone (used to allocate core structures). - Zone _dataZone; //!< Data zone (used to allocate extra data like label names). - ZoneHeap _baseHeap; //!< Zone allocator, used to manage internal containers. - - ZoneVector _sections; //!< Section entries. - ZoneVector _labels; //!< Label entries (each label is stored here). - ZoneVector _relocations; //!< Relocation entries. - ZoneHash _namedLabels; //!< Label name -> LabelEntry (only named labels). -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_CODEHOLDER_H diff --git a/src/asmjit/base/constpool.cpp b/src/asmjit/base/constpool.cpp deleted file mode 100644 index 799abd1..0000000 --- a/src/asmjit/base/constpool.cpp +++ /dev/null @@ -1,511 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/constpool.h" -#include "../base/utils.h" - -#include - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// Binary tree code is based on Julienne Walker's "Andersson Binary Trees" -// article and implementation. However, only three operations are implemented - -// get, insert and traverse. - -// ============================================================================ -// [asmjit::ConstPool::Tree - Ops] -// ============================================================================ - -//! \internal -//! -//! Remove left horizontal links. -static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) noexcept { - ConstPool::Node* link = node->_link[0]; - uint32_t level = node->_level; - - if (level != 0 && link && link->_level == level) { - node->_link[0] = link->_link[1]; - link->_link[1] = node; - - node = link; - } - - return node; -} - -//! \internal -//! -//! Remove consecutive horizontal links. -static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) noexcept { - ConstPool::Node* link = node->_link[1]; - uint32_t level = node->_level; - - if (level != 0 && link && link->_link[1] && link->_link[1]->_level == level) { - node->_link[1] = link->_link[0]; - link->_link[0] = node; - - node = link; - node->_level++; - } - - return node; -} - -ConstPool::Node* ConstPool::Tree::get(const void* data) noexcept { - ConstPool::Node* node = _root; - size_t dataSize = _dataSize; - - while (node) { - int c = ::memcmp(node->getData(), data, dataSize); - if (c == 0) - return node; - node = node->_link[c < 0]; - } - - return nullptr; -} - -void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept { - size_t dataSize = _dataSize; - _length++; - - if (!_root) { - _root = newNode; - return; - } - - ConstPool::Node* node = _root; - ConstPool::Node* stack[kHeightLimit]; - - unsigned int top = 0; - unsigned int dir; - - // Find a spot and save the stack. - for (;;) { - stack[top++] = node; - dir = ::memcmp(node->getData(), newNode->getData(), dataSize) < 0; - - ConstPool::Node* link = node->_link[dir]; - if (!link) break; - - node = link; - } - - // Link and rebalance. - node->_link[dir] = newNode; - - while (top > 0) { - // Which child? - node = stack[--top]; - - if (top != 0) { - dir = stack[top - 1]->_link[1] == node; - } - - node = ConstPoolTree_skewNode(node); - node = ConstPoolTree_splitNode(node); - - // Fix the parent. - if (top != 0) - stack[top - 1]->_link[dir] = node; - else - _root = node; - } -} - -// ============================================================================ -// [asmjit::ConstPool - Construction / Destruction] -// ============================================================================ - -ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); } -ConstPool::~ConstPool() noexcept {} - -// ============================================================================ -// [asmjit::ConstPool - Reset] -// ============================================================================ - -void ConstPool::reset(Zone* zone) noexcept { - _zone = zone; - - size_t dataSize = 1; - for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { - _tree[i].reset(); - _tree[i].setDataSize(dataSize); - _gaps[i] = nullptr; - dataSize <<= 1; - } - - _gapPool = nullptr; - _size = 0; - _alignment = 0; -} - -// ============================================================================ -// [asmjit::ConstPool - Ops] -// ============================================================================ - -static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept { - ConstPool::Gap* gap = self->_gapPool; - if (!gap) return self->_zone->allocT(); - - self->_gapPool = gap->_next; - return gap; -} - -static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept { - gap->_next = self->_gapPool; - self->_gapPool = gap; -} - -static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) noexcept { - ASMJIT_ASSERT(length > 0); - - while (length > 0) { - size_t gapIndex; - size_t gapLength; - - gapIndex = ConstPool::kIndex16; - if (length >= 16 && Utils::isAligned(offset, 16)) { - gapLength = 16; - } - else if (length >= 8 && Utils::isAligned(offset, 8)) { - gapIndex = ConstPool::kIndex8; - gapLength = 8; - } - else if (length >= 4 && Utils::isAligned(offset, 4)) { - gapIndex = ConstPool::kIndex4; - gapLength = 4; - } - else if (length >= 2 && Utils::isAligned(offset, 2)) { - gapIndex = ConstPool::kIndex2; - gapLength = 2; - } - else { - gapIndex = ConstPool::kIndex1; - gapLength = 1; - } - - // We don't have to check for errors here, if this failed nothing really - // happened (just the gap won't be visible) and it will fail again at - // place where checking will cause kErrorNoHeapMemory. - ConstPool::Gap* gap = ConstPool_allocGap(self); - if (!gap) return; - - gap->_next = self->_gaps[gapIndex]; - self->_gaps[gapIndex] = gap; - - gap->_offset = offset; - gap->_length = gapLength; - - offset += gapLength; - length -= gapLength; - } -} - -Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept { - size_t treeIndex; - - if (size == 32) - treeIndex = kIndex32; - else if (size == 16) - treeIndex = kIndex16; - else if (size == 8) - treeIndex = kIndex8; - else if (size == 4) - treeIndex = kIndex4; - else if (size == 2) - treeIndex = kIndex2; - else if (size == 1) - treeIndex = kIndex1; - else - return DebugUtils::errored(kErrorInvalidArgument); - - ConstPool::Node* node = _tree[treeIndex].get(data); - if (node) { - dstOffset = node->_offset; - return kErrorOk; - } - - // Before incrementing the current offset try if there is a gap that can - // be used for the requested data. - size_t offset = ~static_cast(0); - size_t gapIndex = treeIndex; - - while (gapIndex != kIndexCount - 1) { - ConstPool::Gap* gap = _gaps[treeIndex]; - - // Check if there is a gap. - if (gap) { - size_t gapOffset = gap->_offset; - size_t gapLength = gap->_length; - - // Destroy the gap for now. - _gaps[treeIndex] = gap->_next; - ConstPool_freeGap(this, gap); - - offset = gapOffset; - ASMJIT_ASSERT(Utils::isAligned(offset, size)); - - gapLength -= size; - if (gapLength > 0) - ConstPool_addGap(this, gapOffset, gapLength); - } - - gapIndex++; - } - - if (offset == ~static_cast(0)) { - // Get how many bytes have to be skipped so the address is aligned accordingly - // to the 'size'. - size_t diff = Utils::alignDiff(_size, size); - - if (diff != 0) { - ConstPool_addGap(this, _size, diff); - _size += diff; - } - - offset = _size; - _size += size; - } - - // Add the initial node to the right index. - node = ConstPool::Tree::_newNode(_zone, data, size, offset, false); - if (!node) return DebugUtils::errored(kErrorNoHeapMemory); - - _tree[treeIndex].put(node); - _alignment = std::max(_alignment, size); - - dstOffset = offset; - - // Now create a bunch of shared constants that are based on the data pattern. - // We stop at size 4, it probably doesn't make sense to split constants down - // to 1 byte. - size_t pCount = 1; - while (size > 4) { - size >>= 1; - pCount <<= 1; - - ASMJIT_ASSERT(treeIndex != 0); - treeIndex--; - - const uint8_t* pData = static_cast(data); - for (size_t i = 0; i < pCount; i++, pData += size) { - node = _tree[treeIndex].get(pData); - if (node) continue; - - node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true); - _tree[treeIndex].put(node); - } - } - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::ConstPool - Reset] -// ============================================================================ - -struct ConstPoolFill { - ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept : - _dst(dst), - _dataSize(dataSize) {} - - ASMJIT_INLINE void visit(const ConstPool::Node* node) noexcept { - if (!node->_shared) - ::memcpy(_dst + node->_offset, node->getData(), _dataSize); - } - - uint8_t* _dst; - size_t _dataSize; -}; - -void ConstPool::fill(void* dst) const noexcept { - // Clears possible gaps, asmjit should never emit garbage to the output. - ::memset(dst, 0, _size); - - ConstPoolFill filler(static_cast(dst), 1); - for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { - _tree[i].iterate(filler); - filler._dataSize <<= 1; - } -} - -// ============================================================================ -// [asmjit::ConstPool - Test] -// ============================================================================ - -#if defined(ASMJIT_TEST) -UNIT(base_constpool) { - Zone zone(32384 - Zone::kZoneOverhead); - ConstPool pool(&zone); - - uint32_t i; - uint32_t kCount = 1000000; - - INFO("Adding %u constants to the pool.", kCount); - { - size_t prevOffset; - size_t curOffset; - uint64_t c = ASMJIT_UINT64_C(0x0101010101010101); - - EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk, - "pool.add() - Returned error"); - EXPECT(prevOffset == 0, - "pool.add() - First constant should have zero offset"); - - for (i = 1; i < kCount; i++) { - c++; - EXPECT(pool.add(&c, 8, curOffset) == kErrorOk, - "pool.add() - Returned error"); - EXPECT(prevOffset + 8 == curOffset, - "pool.add() - Returned incorrect curOffset"); - EXPECT(pool.getSize() == (i + 1) * 8, - "pool.getSize() - Reported incorrect size"); - prevOffset = curOffset; - } - - EXPECT(pool.getAlignment() == 8, - "pool.getAlignment() - Expected 8-byte alignment"); - } - - INFO("Retrieving %u constants from the pool.", kCount); - { - uint64_t c = ASMJIT_UINT64_C(0x0101010101010101); - - for (i = 0; i < kCount; i++) { - size_t offset; - EXPECT(pool.add(&c, 8, offset) == kErrorOk, - "pool.add() - Returned error"); - EXPECT(offset == i * 8, - "pool.add() - Should have reused constant"); - c++; - } - } - - INFO("Checking if the constants were split into 4-byte patterns"); - { - uint32_t c = 0x01010101; - for (i = 0; i < kCount; i++) { - size_t offset; - EXPECT(pool.add(&c, 4, offset) == kErrorOk, - "pool.add() - Returned error"); - EXPECT(offset == i * 8, - "pool.add() - Should reuse existing constant"); - c++; - } - } - - INFO("Adding 2 byte constant to misalign the current offset"); - { - uint16_t c = 0xFFFF; - size_t offset; - - EXPECT(pool.add(&c, 2, offset) == kErrorOk, - "pool.add() - Returned error"); - EXPECT(offset == kCount * 8, - "pool.add() - Didn't return expected position"); - EXPECT(pool.getAlignment() == 8, - "pool.getAlignment() - Expected 8-byte alignment"); - } - - INFO("Adding 8 byte constant to check if pool gets aligned again"); - { - uint64_t c = ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF); - size_t offset; - - EXPECT(pool.add(&c, 8, offset) == kErrorOk, - "pool.add() - Returned error"); - EXPECT(offset == kCount * 8 + 8, - "pool.add() - Didn't return aligned offset"); - } - - INFO("Adding 2 byte constant to verify the gap is filled"); - { - uint16_t c = 0xFFFE; - size_t offset; - - EXPECT(pool.add(&c, 2, offset) == kErrorOk, - "pool.add() - Returned error"); - EXPECT(offset == kCount * 8 + 2, - "pool.add() - Didn't fill the gap"); - EXPECT(pool.getAlignment() == 8, - "pool.getAlignment() - Expected 8-byte alignment"); - } - - INFO("Checking reset functionality"); - { - pool.reset(&zone); - zone.reset(); - - EXPECT(pool.getSize() == 0, - "pool.getSize() - Expected pool size to be zero"); - EXPECT(pool.getAlignment() == 0, - "pool.getSize() - Expected pool alignment to be zero"); - } - - INFO("Checking pool alignment when combined constants are added"); - { - uint8_t bytes[32] = { 0 }; - size_t offset; - - pool.add(bytes, 1, offset); - - EXPECT(pool.getSize() == 1, - "pool.getSize() - Expected pool size to be 1 byte"); - EXPECT(pool.getAlignment() == 1, - "pool.getSize() - Expected pool alignment to be 1 byte"); - EXPECT(offset == 0, - "pool.getSize() - Expected offset returned to be zero"); - - pool.add(bytes, 2, offset); - - EXPECT(pool.getSize() == 4, - "pool.getSize() - Expected pool size to be 4 bytes"); - EXPECT(pool.getAlignment() == 2, - "pool.getSize() - Expected pool alignment to be 2 bytes"); - EXPECT(offset == 2, - "pool.getSize() - Expected offset returned to be 2"); - - pool.add(bytes, 4, offset); - - EXPECT(pool.getSize() == 8, - "pool.getSize() - Expected pool size to be 8 bytes"); - EXPECT(pool.getAlignment() == 4, - "pool.getSize() - Expected pool alignment to be 4 bytes"); - EXPECT(offset == 4, - "pool.getSize() - Expected offset returned to be 4"); - - pool.add(bytes, 4, offset); - - EXPECT(pool.getSize() == 8, - "pool.getSize() - Expected pool size to be 8 bytes"); - EXPECT(pool.getAlignment() == 4, - "pool.getSize() - Expected pool alignment to be 4 bytes"); - EXPECT(offset == 4, - "pool.getSize() - Expected offset returned to be 8"); - - pool.add(bytes, 32, offset); - EXPECT(pool.getSize() == 64, - "pool.getSize() - Expected pool size to be 64 bytes"); - EXPECT(pool.getAlignment() == 32, - "pool.getSize() - Expected pool alignment to be 32 bytes"); - EXPECT(offset == 32, - "pool.getSize() - Expected offset returned to be 32"); - } -} -#endif // ASMJIT_TEST - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/constpool.h b/src/asmjit/base/constpool.h deleted file mode 100644 index 945ea64..0000000 --- a/src/asmjit/base/constpool.h +++ /dev/null @@ -1,257 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CONSTPOOL_H -#define _ASMJIT_BASE_CONSTPOOL_H - -// [Dependencies] -#include "../base/zone.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::ConstPool] -// ============================================================================ - -//! Constant pool. -class ConstPool { -public: - ASMJIT_NONCOPYABLE(ConstPool) - - enum { - kIndex1 = 0, - kIndex2 = 1, - kIndex4 = 2, - kIndex8 = 3, - kIndex16 = 4, - kIndex32 = 5, - kIndexCount = 6 - }; - - // -------------------------------------------------------------------------- - // [Gap] - // -------------------------------------------------------------------------- - - //! \internal - //! - //! Zone-allocated const-pool gap. - struct Gap { - Gap* _next; //!< Pointer to the next gap - size_t _offset; //!< Offset of the gap. - size_t _length; //!< Remaining bytes of the gap (basically a gap size). - }; - - // -------------------------------------------------------------------------- - // [Node] - // -------------------------------------------------------------------------- - - //! \internal - //! - //! Zone-allocated const-pool node. - struct Node { - ASMJIT_INLINE void* getData() const noexcept { - return static_cast(const_cast(this) + 1); - } - - Node* _link[2]; //!< Left/Right nodes. - uint32_t _level : 31; //!< Horizontal level for balance. - uint32_t _shared : 1; //!< If this constant is shared with another. - uint32_t _offset; //!< Data offset from the beginning of the pool. - }; - - // -------------------------------------------------------------------------- - // [Tree] - // -------------------------------------------------------------------------- - - //! \internal - //! - //! Zone-allocated const-pool tree. - struct Tree { - enum { - //! Maximum tree height == log2(1 << 64). - kHeightLimit = 64 - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE Tree(size_t dataSize = 0) noexcept - : _root(nullptr), - _length(0), - _dataSize(dataSize) {} - ASMJIT_INLINE ~Tree() {} - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void reset() noexcept { - _root = nullptr; - _length = 0; - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; } - ASMJIT_INLINE size_t getLength() const noexcept { return _length; } - - ASMJIT_INLINE void setDataSize(size_t dataSize) noexcept { - ASMJIT_ASSERT(isEmpty()); - _dataSize = dataSize; - } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - ASMJIT_API Node* get(const void* data) noexcept; - ASMJIT_API void put(Node* node) noexcept; - - // -------------------------------------------------------------------------- - // [Iterate] - // -------------------------------------------------------------------------- - - template - ASMJIT_INLINE void iterate(Visitor& visitor) const noexcept { - Node* node = const_cast(_root); - if (!node) return; - - Node* stack[kHeightLimit]; - size_t top = 0; - - for (;;) { - Node* left = node->_link[0]; - if (left != nullptr) { - ASMJIT_ASSERT(top != kHeightLimit); - stack[top++] = node; - - node = left; - continue; - } - -Visit: - visitor.visit(node); - node = node->_link[1]; - if (node != nullptr) - continue; - - if (top == 0) - return; - - node = stack[--top]; - goto Visit; - } - } - - // -------------------------------------------------------------------------- - // [Helpers] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept { - Node* node = zone->allocT(sizeof(Node) + size); - if (ASMJIT_UNLIKELY(!node)) return nullptr; - - node->_link[0] = nullptr; - node->_link[1] = nullptr; - node->_level = 1; - node->_shared = shared; - node->_offset = static_cast(offset); - - ::memcpy(node->getData(), data, size); - return node; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - Node* _root; //!< Root of the tree - size_t _length; //!< Length of the tree (count of nodes). - size_t _dataSize; //!< Size of the data. - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_API ConstPool(Zone* zone) noexcept; - ASMJIT_API ~ConstPool() noexcept; - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - ASMJIT_API void reset(Zone* zone) noexcept; - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - //! Get whether the constant-pool is empty. - ASMJIT_INLINE bool isEmpty() const noexcept { return _size == 0; } - //! Get the size of the constant-pool in bytes. - ASMJIT_INLINE size_t getSize() const noexcept { return _size; } - //! Get minimum alignment. - ASMJIT_INLINE size_t getAlignment() const noexcept { return _alignment; } - - //! Add a constant to the constant pool. - //! - //! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes. - //! The constant is added to the pool only if it doesn't not exist, otherwise - //! cached value is returned. - //! - //! AsmJit is able to subdivide added constants, so for example if you add - //! 8-byte constant 0x1122334455667788 it will create the following slots: - //! - //! 8-byte: 0x1122334455667788 - //! 4-byte: 0x11223344, 0x55667788 - //! - //! The reason is that when combining MMX/SSE/AVX code some patterns are used - //! frequently. However, AsmJit is not able to reallocate a constant that has - //! been already added. For example if you try to add 4-byte constant and then - //! 8-byte constant having the same 4-byte pattern as the previous one, two - //! independent slots will be generated by the pool. - ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept; - - // -------------------------------------------------------------------------- - // [Fill] - // -------------------------------------------------------------------------- - - //! Fill the destination with the constants from the pool. - ASMJIT_API void fill(void* dst) const noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - Zone* _zone; //!< Zone allocator. - Tree _tree[kIndexCount]; //!< Tree per size. - Gap* _gaps[kIndexCount]; //!< Gaps per size. - Gap* _gapPool; //!< Gaps pool - - size_t _size; //!< Size of the pool (in bytes). - size_t _alignment; //!< Required pool alignment. -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_CONSTPOOL_H diff --git a/src/asmjit/base/cpuinfo.cpp b/src/asmjit/base/cpuinfo.cpp deleted file mode 100644 index c842173..0000000 --- a/src/asmjit/base/cpuinfo.cpp +++ /dev/null @@ -1,674 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/cpuinfo.h" -#include "../base/utils.h" - -#if ASMJIT_OS_POSIX -# include -# include -# include -#endif // ASMJIT_OS_POSIX - -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 -# if ASMJIT_CC_MSC_GE(14, 0, 0) - # include // Required by `__cpuid()` and `_xgetbv()`. -# endif // _MSC_VER >= 1400 -#endif - -#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 -# if ASMJIT_OS_LINUX -# include // Required by `getauxval()`. -# endif -#endif - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::CpuInfo - Detect ARM] -// ============================================================================ - -// ARM information has to be retrieved by the OS (this is how ARM was designed). -#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 - -#if ASMJIT_ARCH_ARM32 -static ASMJIT_INLINE void armPopulateBaselineA32Features(CpuInfo* cpuInfo) noexcept { - cpuInfo->_archInfo.init(ArchInfo::kTypeA32); -} -#endif // ASMJIT_ARCH_ARM32 - -#if ASMJIT_ARCH_ARM64 -static ASMJIT_INLINE void armPopulateBaselineA64Features(CpuInfo* cpuInfo) noexcept { - cpuInfo->_archInfo.init(ArchInfo::kTypeA64); - - // Thumb (including all variations) is supported on A64 (but not accessible from A64). - cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB); - cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2); - - // A64 is based on ARMv8 and newer. - cpuInfo->addFeature(CpuInfo::kArmFeatureV6); - cpuInfo->addFeature(CpuInfo::kArmFeatureV7); - cpuInfo->addFeature(CpuInfo::kArmFeatureV8); - - // A64 comes with these features by default. - cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2); - cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv3); - cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv4); - cpuInfo->addFeature(CpuInfo::kArmFeatureEDSP); - cpuInfo->addFeature(CpuInfo::kArmFeatureASIMD); - cpuInfo->addFeature(CpuInfo::kArmFeatureIDIVA); - cpuInfo->addFeature(CpuInfo::kArmFeatureIDIVT); -} -#endif // ASMJIT_ARCH_ARM64 - -#if ASMJIT_OS_WINDOWS -//! \internal -//! -//! Detect ARM CPU features on Windows. -//! -//! The detection is based on `IsProcessorFeaturePresent()` API call. -static ASMJIT_INLINE void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept { -#if ASMJIT_ARCH_ARM32 - armPopulateBaselineA32Features(cpuInfo); - - // Windows for ARM requires at least ARMv7 with DSP extensions. - cpuInfo->addFeature(CpuInfo::kArmFeatureV6); - cpuInfo->addFeature(CpuInfo::kArmFeatureV7); - cpuInfo->addFeature(CpuInfo::kArmFeatureEDSP); - - // Windows for ARM requires VFPv3. - cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2); - cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv3); - - // Windows for ARM requires and uses THUMB2. - cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB); - cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2); -#else - armPopulateBaselineA64Features(cpuInfo); -#endif - - // Windows for ARM requires ASIMD. - cpuInfo->addFeature(CpuInfo::kArmFeatureASIMD); - - // Detect additional CPU features by calling `IsProcessorFeaturePresent()`. - struct WinPFPMapping { - uint32_t pfpId; - uint32_t featureId; - }; - - static const WinPFPMapping mapping[] = { - { PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFPv4 }, - { PF_ARM_VFP_32_REGISTERS_AVAILABLE , CpuInfo::kArmFeatureVFP_D32 }, - { PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIVT }, - { PF_ARM_64BIT_LOADSTORE_ATOMIC , CpuInfo::kArmFeatureAtomics64 } - }; - - for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(mapping); i++) - if (::IsProcessorFeaturePresent(mapping[i].pfpId)) - cpuInfo->addFeature(mapping[i].featureId); -} -#endif // ASMJIT_OS_WINDOWS - -#if ASMJIT_OS_LINUX -struct LinuxHWCapMapping { - uint32_t hwcapMask; - uint32_t featureId; -}; - -static void armDetectHWCaps(CpuInfo* cpuInfo, unsigned long type, const LinuxHWCapMapping* mapping, size_t length) noexcept { - unsigned long mask = getauxval(type); - - for (size_t i = 0; i < length; i++) - if ((mask & mapping[i].hwcapMask) == mapping[i].hwcapMask) - cpuInfo->addFeature(mapping[i].featureId); -} - -//! \internal -//! -//! Detect ARM CPU features on Linux. -//! -//! The detection is based on `getauxval()`. -ASMJIT_FAVOR_SIZE static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept { -#if ASMJIT_ARCH_ARM32 - armPopulateBaselineA32Features(cpuInfo); - - // `AT_HWCAP` provides ARMv7 (and less) related flags. - static const LinuxHWCapMapping hwCapMapping[] = { - { /* HWCAP_VFP */ (1 << 6), CpuInfo::kArmFeatureVFPv2 }, - { /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureEDSP }, - { /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureASIMD }, - { /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFPv3 }, - { /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFPv4 }, - { /* HWCAP_IDIVA */ (1 << 17), CpuInfo::kArmFeatureIDIVA }, - { /* HWCAP_IDIVT */ (1 << 18), CpuInfo::kArmFeatureIDIVT }, - { /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 } - }; - armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping)); - - // VFPv3 implies VFPv2. - if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv3)) { - cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2); - } - - // VFPv2 implies ARMv6. - if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv2)) { - cpuInfo->addFeature(CpuInfo::kArmFeatureV6); - } - - // VFPv3 or ASIMD implies ARMv7. - if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv3) || - cpuInfo->hasFeature(CpuInfo::kArmFeatureASIMD)) { - cpuInfo->addFeature(CpuInfo::kArmFeatureV7); - } - - // `AT_HWCAP2` provides ARMv8+ related flags. - static const LinuxHWCapMapping hwCap2Mapping[] = { - { /* HWCAP2_AES */ (1 << 0), CpuInfo::kArmFeatureAES }, - { /* HWCAP2_PMULL */ (1 << 1), CpuInfo::kArmFeaturePMULL }, - { /* HWCAP2_SHA1 */ (1 << 2), CpuInfo::kArmFeatureSHA1 }, - { /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 }, - { /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 } - }; - armDetectHWCaps(cpuInfo, AT_HWCAP2, hwCap2Mapping, ASMJIT_ARRAY_SIZE(hwCap2Mapping)); - - if (cpuInfo->hasFeature(CpuInfo::kArmFeatureAES ) || - cpuInfo->hasFeature(CpuInfo::kArmFeatureCRC32 ) || - cpuInfo->hasFeature(CpuInfo::kArmFeaturePMULL ) || - cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA1 ) || - cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA256)) { - cpuInfo->addFeature(CpuInfo::kArmFeatureV8); - } -#else - armPopulateBaselineA64Features(cpuInfo); - - // `AT_HWCAP` provides ARMv8+ related flags. - static const LinuxHWCapMapping hwCapMapping[] = { - { /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureASIMD }, - { /* HWCAP_AES */ (1 << 3), CpuInfo::kArmFeatureAES }, - { /* HWCAP_CRC32 */ (1 << 7), CpuInfo::kArmFeatureCRC32 }, - { /* HWCAP_PMULL */ (1 << 4), CpuInfo::kArmFeaturePMULL }, - { /* HWCAP_SHA1 */ (1 << 5), CpuInfo::kArmFeatureSHA1 }, - { /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 }, - { /* HWCAP_ATOMICS */ (1 << 8), CpuInfo::kArmFeatureAtomics64 } - }; - armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping)); - - // `AT_HWCAP2` is not used at the moment. -#endif -} -#endif // ASMJIT_OS_LINUX - -ASMJIT_FAVOR_SIZE static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept { -#if ASMJIT_OS_WINDOWS - armDetectCpuInfoOnWindows(cpuInfo); -#elif ASMJIT_OS_LINUX - armDetectCpuInfoOnLinux(cpuInfo); -#else -# error "[asmjit] armDetectCpuInfo() - Unsupported OS." -#endif -} -#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 - -// ============================================================================ -// [asmjit::CpuInfo - Detect X86] -// ============================================================================ - -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - -//! \internal -//! -//! X86 CPUID result. -struct CpuIdResult { - uint32_t eax, ebx, ecx, edx; -}; - -//! \internal -//! -//! Content of XCR register, result of XGETBV instruction. -struct XGetBVResult { - uint32_t eax, edx; -}; - -#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(15, 0, 30729) && ASMJIT_ARCH_X64 -//! \internal -//! -//! HACK: VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However, -//! 64-bit calling convention specifies the first parameter to be passed by -//! ECX, so we may be lucky if compiler doesn't move the register, otherwise -//! the result would be wrong. -static void ASMJIT_NOINLINE void x86CallCpuIdWorkaround(uint32_t inEcx, uint32_t inEax, CpuIdResult* result) noexcept { - __cpuid(reinterpret_cast(result), inEax); -} -#endif - -//! \internal -//! -//! Wrapper to call `cpuid` instruction. -static void ASMJIT_INLINE x86CallCpuId(CpuIdResult* result, uint32_t inEax, uint32_t inEcx = 0) noexcept { -#if ASMJIT_CC_MSC && ASMJIT_CC_MSC_GE(15, 0, 30729) - __cpuidex(reinterpret_cast(result), inEax, inEcx); -#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X64 - x86CallCpuIdWorkaround(inEcx, inEax, result); -#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X86 - uint32_t paramEax = inEax; - uint32_t paramEcx = inEcx; - uint32_t* out = reinterpret_cast(result); - - __asm { - mov eax, paramEax - mov ecx, paramEcx - mov edi, out - cpuid - mov dword ptr[edi + 0], eax - mov dword ptr[edi + 4], ebx - mov dword ptr[edi + 8], ecx - mov dword ptr[edi + 12], edx - } -#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X86 - __asm__ __volatile__( - "mov %%ebx, %%edi\n" - "cpuid\n" - "xchg %%edi, %%ebx\n" - : "=a"(result->eax), - "=D"(result->ebx), - "=c"(result->ecx), - "=d"(result->edx) - : "a"(inEax), - "c"(inEcx)); -#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG || ASMJIT_CC_INTEL) && ASMJIT_ARCH_X64 - __asm__ __volatile__( - "mov %%rbx, %%rdi\n" - "cpuid\n" - "xchg %%rdi, %%rbx\n" - : "=a"(result->eax), - "=D"(result->ebx), - "=c"(result->ecx), - "=d"(result->edx) - : "a"(inEax), - "c"(inEcx)); -#else -# error "[asmjit] x86CallCpuid() - Unsupported compiler." -#endif -} - -//! \internal -//! -//! Wrapper to call `xgetbv` instruction. -static ASMJIT_INLINE void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept { -#if ASMJIT_CC_MSC_GE(16, 0, 40219) // 2010SP1+ - uint64_t value = _xgetbv(inEcx); - result->eax = static_cast(value & 0xFFFFFFFFU); - result->edx = static_cast(value >> 32); -#elif ASMJIT_CC_GCC || ASMJIT_CC_CLANG - uint32_t outEax; - uint32_t outEdx; - - // Replaced, because the world is not perfect: - // __asm__ __volatile__("xgetbv" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx)); - __asm__ __volatile__(".byte 0x0F, 0x01, 0xd0" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx)); - - result->eax = outEax; - result->edx = outEdx; -#else - result->eax = 0; - result->edx = 0; -#endif -} - -//! \internal -//! -//! Map a 12-byte vendor string returned by `cpuid` into a `CpuInfo::Vendor` ID. -static ASMJIT_INLINE uint32_t x86GetCpuVendorID(const char* vendorString) noexcept { - struct VendorData { - uint32_t id; - char text[12]; - }; - - static const VendorData vendorList[] = { - { CpuInfo::kVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } }, - { CpuInfo::kVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } }, - { CpuInfo::kVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } }, - { CpuInfo::kVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } } - }; - - uint32_t dw0 = reinterpret_cast(vendorString)[0]; - uint32_t dw1 = reinterpret_cast(vendorString)[1]; - uint32_t dw2 = reinterpret_cast(vendorString)[2]; - - for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(vendorList); i++) { - if (dw0 == reinterpret_cast(vendorList[i].text)[0] && - dw1 == reinterpret_cast(vendorList[i].text)[1] && - dw2 == reinterpret_cast(vendorList[i].text)[2]) - return vendorList[i].id; - } - - return CpuInfo::kVendorNone; -} - -static ASMJIT_INLINE void x86SimplifyBrandString(char* s) noexcept { - // Used to always clear the current character to ensure that the result - // doesn't contain garbage after the new zero terminator. - char* d = s; - - char prev = 0; - char curr = s[0]; - s[0] = '\0'; - - for (;;) { - if (curr == 0) - break; - - if (curr == ' ') { - if (prev == '@' || s[1] == ' ' || s[1] == '@') - goto L_Skip; - } - - d[0] = curr; - d++; - prev = curr; - -L_Skip: - curr = *++s; - s[0] = '\0'; - } - - d[0] = '\0'; -} - -ASMJIT_FAVOR_SIZE static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept { - uint32_t i, maxId; - - CpuIdResult regs; - XGetBVResult xcr0 = { 0, 0 }; - - cpuInfo->_archInfo.init(ArchInfo::kTypeHost); - cpuInfo->addFeature(CpuInfo::kX86FeatureI486); - - // -------------------------------------------------------------------------- - // [CPUID EAX=0x0] - // -------------------------------------------------------------------------- - - // Get vendor string/id. - x86CallCpuId(®s, 0x0); - - maxId = regs.eax; - ::memcpy(cpuInfo->_vendorString + 0, ®s.ebx, 4); - ::memcpy(cpuInfo->_vendorString + 4, ®s.edx, 4); - ::memcpy(cpuInfo->_vendorString + 8, ®s.ecx, 4); - cpuInfo->_vendorId = x86GetCpuVendorID(cpuInfo->_vendorString); - - // -------------------------------------------------------------------------- - // [CPUID EAX=0x1] - // -------------------------------------------------------------------------- - - if (maxId >= 0x1) { - // Get feature flags in ECX/EDX and family/model in EAX. - x86CallCpuId(®s, 0x1); - - // Fill family and model fields. - cpuInfo->_family = (regs.eax >> 8) & 0x0F; - cpuInfo->_model = (regs.eax >> 4) & 0x0F; - cpuInfo->_stepping = (regs.eax ) & 0x0F; - - // Use extended family and model fields. - if (cpuInfo->_family == 0x0F) { - cpuInfo->_family += ((regs.eax >> 20) & 0xFF); - cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4; - } - - cpuInfo->_x86Data._processorType = ((regs.eax >> 12) & 0x03); - cpuInfo->_x86Data._brandIndex = ((regs.ebx ) & 0xFF); - cpuInfo->_x86Data._flushCacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8; - cpuInfo->_x86Data._maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF); - - if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE3); - if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCLMULQDQ); - if (regs.ecx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureMONITOR); - if (regs.ecx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSSE3); - if (regs.ecx & 0x00002000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG16B); - if (regs.ecx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_1); - if (regs.ecx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_2); - if (regs.ecx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMOVBE); - if (regs.ecx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePOPCNT); - if (regs.ecx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAESNI); - if (regs.ecx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE); - if (regs.ecx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureOSXSAVE); - if (regs.ecx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDRAND); - if (regs.edx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSC); - if (regs.edx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSR); - if (regs.edx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG8B); - if (regs.edx & 0x00008000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMOV); - if (regs.edx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH); - if (regs.edx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX); - if (regs.edx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR); - if (regs.edx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE) - .addFeature(CpuInfo::kX86FeatureMMX2); - if (regs.edx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE) - .addFeature(CpuInfo::kX86FeatureSSE2); - if (regs.edx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMT); - - // Get the content of XCR0 if supported by CPU and enabled by OS. - if ((regs.ecx & 0x0C000000U) == 0x0C000000U) { - x86CallXGetBV(&xcr0, 0); - } - - // Detect AVX+. - if (regs.ecx & 0x10000000U) { - // - XCR0[2:1] == 11b - // XMM & YMM states need to be enabled by OS. - if ((xcr0.eax & 0x00000006U) == 0x00000006U) { - cpuInfo->addFeature(CpuInfo::kX86FeatureAVX); - - if (regs.ecx & 0x00001000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA); - if (regs.ecx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureF16C); - } - } - } - - // -------------------------------------------------------------------------- - // [CPUID EAX=0x7] - // -------------------------------------------------------------------------- - - // Detect new features if the processor supports CPUID-07. - bool maybeMPX = false; - - if (maxId >= 0x7) { - x86CallCpuId(®s, 0x7); - - if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureFSGSBASE); - if (regs.ebx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI); - if (regs.ebx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureHLE); - if (regs.ebx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureSMEP); - if (regs.ebx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI2); - if (regs.ebx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureERMS); - if (regs.ebx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureRTM); - if (regs.ebx & 0x00004000U) maybeMPX = true; - if (regs.ebx & 0x00040000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDSEED); - if (regs.ebx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureADX); - if (regs.ebx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSMAP); - if (regs.ebx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCOMMIT); - if (regs.ebx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSHOPT); - if (regs.ebx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLWB); - if (regs.ebx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSHA); - if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHWT1); - - // TSX is supported if at least one of `HLE` and `RTM` is supported. - if (regs.ebx & 0x00000810U) cpuInfo->addFeature(CpuInfo::kX86FeatureTSX); - - // Detect AVX2. - if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) { - if (regs.ebx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX2); - } - - // Detect AVX-512+. - if (regs.ebx & 0x00010000U) { - // - XCR0[2:1] == 11b - // XMM/YMM states need to be enabled by OS. - // - XCR0[7:5] == 111b - // Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 need to be enabled by the OS. - if ((xcr0.eax & 0x000000E6U) == 0x000000E6U) { - cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_F); - - if (regs.ebx & 0x00020000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_DQ); - if (regs.ebx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_IFMA); - if (regs.ebx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_PFI); - if (regs.ebx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_ERI); - if (regs.ebx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_CDI); - if (regs.ebx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_BW); - if (regs.ebx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VL); - if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VBMI); - if (regs.ecx & 0x00004000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VPOPCNTDQ); - if (regs.edx & 0x00000004U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_4VNNIW); - if (regs.edx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_4FMAPS); - } - } - } - - // -------------------------------------------------------------------------- - // [CPUID EAX=0xD] - // -------------------------------------------------------------------------- - - if (maxId >= 0xD) { - x86CallCpuId(®s, 0xD, 0); - - // Both CPUID result and XCR0 has to be enabled to have support for MPX. - if (((regs.eax & xcr0.eax) & 0x00000018U) == 0x00000018U && maybeMPX) - cpuInfo->addFeature(CpuInfo::kX86FeatureMPX); - - x86CallCpuId(®s, 0xD, 1); - if (regs.eax & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVEOPT); - if (regs.eax & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVEC); - if (regs.eax & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVES); - } - - // -------------------------------------------------------------------------- - // [CPUID EAX=0x80000000...maxId] - // -------------------------------------------------------------------------- - - // The highest EAX that we understand. - uint32_t kHighestProcessedEAX = 0x80000008U; - - // Several CPUID calls are required to get the whole branc string. It's easy - // to copy one DWORD at a time instead of performing a byte copy. - uint32_t* brand = reinterpret_cast(cpuInfo->_brandString); - - i = maxId = 0x80000000U; - do { - x86CallCpuId(®s, i); - switch (i) { - case 0x80000000U: - maxId = std::min(regs.eax, kHighestProcessedEAX); - break; - - case 0x80000001U: - if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureLAHFSAHF); - if (regs.ecx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureLZCNT); - if (regs.ecx & 0x00000040U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4A); - if (regs.ecx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSSE); - if (regs.ecx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHW); - if (regs.ecx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureTBM); - if (regs.edx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureNX); - if (regs.edx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSROPT); - if (regs.edx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX2); - if (regs.edx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSCP); - if (regs.edx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW2) - .addFeature(CpuInfo::kX86FeatureMMX2); - if (regs.edx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW); - - if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) { - if (regs.ecx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureXOP); - if (regs.ecx & 0x00010000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA4); - } - - // These seem to be only supported by AMD. - if (cpuInfo->getVendorId() == CpuInfo::kVendorAMD) { - if (regs.ecx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureALTMOVCR8); - } - break; - - case 0x80000002U: - case 0x80000003U: - case 0x80000004U: - *brand++ = regs.eax; - *brand++ = regs.ebx; - *brand++ = regs.ecx; - *brand++ = regs.edx; - - // Go directly to the last one. - if (i == 0x80000004U) i = 0x80000008U - 1; - break; - - case 0x80000008U: - if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLZERO); - break; - } - } while (++i <= maxId); - - // Simplify CPU brand string by removing unnecessary spaces. - x86SimplifyBrandString(cpuInfo->_brandString); -} -#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - -// ============================================================================ -// [asmjit::CpuInfo - Detect - HWThreadsCount] -// ============================================================================ - -static ASMJIT_INLINE uint32_t cpuDetectHWThreadsCount() noexcept { -#if ASMJIT_OS_WINDOWS - SYSTEM_INFO info; - ::GetSystemInfo(&info); - return info.dwNumberOfProcessors; -#elif ASMJIT_OS_POSIX && defined(_SC_NPROCESSORS_ONLN) - long res = ::sysconf(_SC_NPROCESSORS_ONLN); - if (res <= 0) return 1; - return static_cast(res); -#else - return 1; -#endif -} - -// ============================================================================ -// [asmjit::CpuInfo - Detect] -// ============================================================================ - -ASMJIT_FAVOR_SIZE void CpuInfo::detect() noexcept { - reset(); - -#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 - armDetectCpuInfo(this); -#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 - -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - x86DetectCpuInfo(this); -#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - - _hwThreadsCount = cpuDetectHWThreadsCount(); -} - -// ============================================================================ -// [asmjit::CpuInfo - GetHost] -// ============================================================================ - -struct HostCpuInfo : public CpuInfo { - ASMJIT_INLINE HostCpuInfo() noexcept : CpuInfo() { detect(); } -}; - -const CpuInfo& CpuInfo::getHost() noexcept { - static HostCpuInfo host; - return host; -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/cpuinfo.h b/src/asmjit/base/cpuinfo.h deleted file mode 100644 index e5dd191..0000000 --- a/src/asmjit/base/cpuinfo.h +++ /dev/null @@ -1,373 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CPUINFO_H -#define _ASMJIT_BASE_CPUINFO_H - -// [Dependencies] -#include "../base/arch.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::CpuFeatures] -// ============================================================================ - -class CpuFeatures { -public: - typedef uintptr_t BitWord; - - enum { - kMaxFeatures = 128, - kBitWordSize = static_cast(sizeof(BitWord)) * 8, - kNumBitWords = kMaxFeatures / kBitWordSize - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE CpuFeatures() noexcept { reset(); } - ASMJIT_INLINE CpuFeatures(const CpuFeatures& other) noexcept = default; - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void init(const CpuFeatures& other) noexcept { ::memcpy(this, &other, sizeof(*this)); } - ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - //! Get all features as `BitWord` array. - ASMJIT_INLINE BitWord* getBits() noexcept { return _bits; } - //! Get all features as `BitWord` array (const). - ASMJIT_INLINE const BitWord* getBits() const noexcept { return _bits; } - - //! Get if feature `feature` is present. - ASMJIT_INLINE bool has(uint32_t feature) const noexcept { - ASMJIT_ASSERT(feature < kMaxFeatures); - - uint32_t idx = feature / kBitWordSize; - uint32_t bit = feature % kBitWordSize; - - return static_cast((_bits[idx] >> bit) & 0x1); - } - - //! Get if all features as defined by `other` are present. - ASMJIT_INLINE bool hasAll(const CpuFeatures& other) const noexcept { - for (uint32_t i = 0; i < kNumBitWords; i++) - if ((_bits[i] & other._bits[i]) != other._bits[i]) - return false; - return true; - } - - //! Add a CPU `feature`. - ASMJIT_INLINE CpuFeatures& add(uint32_t feature) noexcept { - ASMJIT_ASSERT(feature < kMaxFeatures); - - uint32_t idx = feature / kBitWordSize; - uint32_t bit = feature % kBitWordSize; - - _bits[idx] |= static_cast(1) << bit; - return *this; - } - - //! Remove a CPU `feature`. - ASMJIT_INLINE CpuFeatures& remove(uint32_t feature) noexcept { - ASMJIT_ASSERT(feature < kMaxFeatures); - - uint32_t idx = feature / kBitWordSize; - uint32_t bit = feature % kBitWordSize; - - _bits[idx] &= ~(static_cast(1) << bit); - return *this; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - BitWord _bits[kNumBitWords]; -}; - -// ============================================================================ -// [asmjit::CpuInfo] -// ============================================================================ - -//! CPU information. -class CpuInfo { -public: - //! CPU vendor ID. - ASMJIT_ENUM(Vendor) { - kVendorNone = 0, //!< Generic or unknown. - kVendorIntel = 1, //!< Intel vendor. - kVendorAMD = 2, //!< AMD vendor. - kVendorVIA = 3 //!< VIA vendor. - }; - - //! ARM/ARM64 CPU features. - ASMJIT_ENUM(ArmFeatures) { - kArmFeatureV6 = 1, //!< ARMv6 instruction set. - kArmFeatureV7, //!< ARMv7 instruction set. - kArmFeatureV8, //!< ARMv8 instruction set. - kArmFeatureTHUMB, //!< CPU provides THUMB v1 instruction set (THUMB mode). - kArmFeatureTHUMB2, //!< CPU provides THUMB v2 instruction set (THUMB mode). - kArmFeatureVFPv2, //!< CPU provides VFPv2 instruction set. - kArmFeatureVFPv3, //!< CPU provides VFPv3 instruction set. - kArmFeatureVFPv4, //!< CPU provides VFPv4 instruction set. - kArmFeatureVFP_D32, //!< CPU provides 32 VFP-D (64-bit) registers. - kArmFeatureEDSP, //!< CPU provides EDSP extensions. - kArmFeatureASIMD, //!< CPU provides 'Advanced SIMD'. - kArmFeatureIDIVA, //!< CPU provides hardware SDIV and UDIV (ARM mode). - kArmFeatureIDIVT, //!< CPU provides hardware SDIV and UDIV (THUMB mode). - kArmFeatureAES, //!< CPU provides AES instructions (ARM64 only). - kArmFeatureCRC32, //!< CPU provides CRC32 instructions. - kArmFeaturePMULL, //!< CPU provides PMULL instructions (ARM64 only). - kArmFeatureSHA1, //!< CPU provides SHA1 instructions. - kArmFeatureSHA256, //!< CPU provides SHA256 instructions. - kArmFeatureAtomics64, //!< CPU provides 64-bit load/store atomics (ARM64 only). - - kArmFeaturesCount //!< Count of ARM/ARM64 CPU features. - }; - - //! X86/X64 CPU features. - ASMJIT_ENUM(X86Features) { - kX86FeatureI486 = 1, //!< CPU is at least I486. - kX86FeatureNX, //!< CPU has Not-Execute-Bit. - kX86FeatureMT, //!< CPU has multi-threading. - kX86FeatureALTMOVCR8, //!< CPU supports `LOCK MOV CR8` (AMD CPUs). - kX86FeatureCMOV, //!< CPU has CMOV. - kX86FeatureCMPXCHG8B, //!< CPU has CMPXCHG8B. - kX86FeatureCMPXCHG16B, //!< CPU has CMPXCHG16B (x64). - kX86FeatureMSR, //!< CPU has RDMSR/WRMSR. - kX86FeatureRDTSC, //!< CPU has RDTSC. - kX86FeatureRDTSCP, //!< CPU has RDTSCP. - kX86FeatureCLFLUSH, //!< CPU has CLFUSH. - kX86FeatureCLFLUSHOPT, //!< CPU has CLFUSHOPT. - kX86FeatureCLWB, //!< CPU has CLWB. - kX86FeatureCLZERO, //!< CPU has CLZERO. - kX86FeaturePCOMMIT, //!< CPU has PCOMMIT. - kX86FeaturePREFETCHW, //!< CPU has PREFETCHW. - kX86FeaturePREFETCHWT1, //!< CPU has PREFETCHWT1. - kX86FeatureLAHFSAHF, //!< CPU has LAHF/SAHF. - kX86FeatureFXSR, //!< CPU has FXSAVE/FXRSTOR. - kX86FeatureFXSROPT, //!< CPU has FXSAVE/FXRSTOR (optimized). - kX86FeatureMMX, //!< CPU has MMX. - kX86FeatureMMX2, //!< CPU has extended MMX. - kX86Feature3DNOW, //!< CPU has 3DNOW. - kX86Feature3DNOW2, //!< CPU has 3DNOW2 (enhanced). - kX86FeatureGEODE, //!< CPU has GEODE extensions (few additions to 3DNOW). - kX86FeatureSSE, //!< CPU has SSE. - kX86FeatureSSE2, //!< CPU has SSE2. - kX86FeatureSSE3, //!< CPU has SSE3. - kX86FeatureSSSE3, //!< CPU has SSSE3. - kX86FeatureSSE4A, //!< CPU has SSE4.A. - kX86FeatureSSE4_1, //!< CPU has SSE4.1. - kX86FeatureSSE4_2, //!< CPU has SSE4.2. - kX86FeatureMSSE, //!< CPU has Misaligned SSE (MSSE). - kX86FeatureMONITOR, //!< CPU has MONITOR and MWAIT. - kX86FeatureMOVBE, //!< CPU has MOVBE. - kX86FeaturePOPCNT, //!< CPU has POPCNT. - kX86FeatureLZCNT, //!< CPU has LZCNT. - kX86FeatureAESNI, //!< CPU has AESNI. - kX86FeaturePCLMULQDQ, //!< CPU has PCLMULQDQ. - kX86FeatureRDRAND, //!< CPU has RDRAND. - kX86FeatureRDSEED, //!< CPU has RDSEED. - kX86FeatureSMAP, //!< CPU has SMAP (supervisor-mode access prevention). - kX86FeatureSMEP, //!< CPU has SMEP (supervisor-mode execution prevention). - kX86FeatureSHA, //!< CPU has SHA-1 and SHA-256. - kX86FeatureXSAVE, //!< CPU has XSAVE support (XSAVE/XRSTOR, XSETBV/XGETBV, and XCR). - kX86FeatureXSAVEC, //!< CPU has XSAVEC support (XSAVEC). - kX86FeatureXSAVES, //!< CPU has XSAVES support (XSAVES/XRSTORS). - kX86FeatureXSAVEOPT, //!< CPU has XSAVEOPT support (XSAVEOPT/XSAVEOPT64). - kX86FeatureOSXSAVE, //!< CPU has XSAVE enabled by OS. - kX86FeatureAVX, //!< CPU has AVX. - kX86FeatureAVX2, //!< CPU has AVX2. - kX86FeatureF16C, //!< CPU has F16C. - kX86FeatureFMA, //!< CPU has FMA. - kX86FeatureFMA4, //!< CPU has FMA4. - kX86FeatureXOP, //!< CPU has XOP. - kX86FeatureBMI, //!< CPU has BMI (bit manipulation instructions #1). - kX86FeatureBMI2, //!< CPU has BMI2 (bit manipulation instructions #2). - kX86FeatureADX, //!< CPU has ADX (multi-precision add-carry instruction extensions). - kX86FeatureTBM, //!< CPU has TBM (trailing bit manipulation). - kX86FeatureMPX, //!< CPU has MPX (memory protection extensions). - kX86FeatureHLE, //!< CPU has HLE. - kX86FeatureRTM, //!< CPU has RTM. - kX86FeatureTSX, //!< CPU has TSX. - kX86FeatureERMS, //!< CPU has ERMS (enhanced REP MOVSB/STOSB). - kX86FeatureFSGSBASE, //!< CPU has FSGSBASE. - kX86FeatureAVX512_F, //!< CPU has AVX512-F (foundation). - kX86FeatureAVX512_CDI, //!< CPU has AVX512-CDI (conflict detection). - kX86FeatureAVX512_PFI, //!< CPU has AVX512-PFI (prefetch instructions). - kX86FeatureAVX512_ERI, //!< CPU has AVX512-ERI (exponential and reciprocal). - kX86FeatureAVX512_DQ, //!< CPU has AVX512-DQ (DWORD/QWORD). - kX86FeatureAVX512_BW, //!< CPU has AVX512-BW (BYTE/WORD). - kX86FeatureAVX512_VL, //!< CPU has AVX512-VL (vector length extensions). - kX86FeatureAVX512_IFMA, //!< CPU has AVX512-IFMA (integer fused-multiply-add using 52-bit precision). - kX86FeatureAVX512_VBMI, //!< CPU has AVX512-VBMI (vector byte manipulation). - kX86FeatureAVX512_VPOPCNTDQ, //!< CPU has AVX512-VPOPCNTDQ (VPOPCNT[D|Q] instructions). - kX86FeatureAVX512_4VNNIW, //!< CPU has AVX512-VNNIW (vector NN instructions word variable precision). - kX86FeatureAVX512_4FMAPS, //!< CPU has AVX512-FMAPS (FMA packed single). - - kX86FeaturesCount //!< Count of X86/X64 CPU features. - }; - - // -------------------------------------------------------------------------- - // [ArmInfo] - // -------------------------------------------------------------------------- - - struct ArmData { - }; - - // -------------------------------------------------------------------------- - // [X86Info] - // -------------------------------------------------------------------------- - - struct X86Data { - uint32_t _processorType; //!< Processor type. - uint32_t _brandIndex; //!< Brand index. - uint32_t _flushCacheLineSize; //!< Flush cache line size (in bytes). - uint32_t _maxLogicalProcessors; //!< Maximum number of addressable IDs for logical processors. - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE CpuInfo() noexcept { reset(); } - ASMJIT_INLINE CpuInfo(const CpuInfo& other) noexcept = default; - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - //! Initialize CpuInfo to the given architecture, see \ArchInfo. - ASMJIT_INLINE void initArch(uint32_t archType, uint32_t archMode = 0) noexcept { - _archInfo.init(archType, archMode); - } - - ASMJIT_INLINE void init(const CpuInfo& other) noexcept { ::memcpy(this, &other, sizeof(*this)); } - ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); } - - // -------------------------------------------------------------------------- - // [Detect] - // -------------------------------------------------------------------------- - - ASMJIT_API void detect() noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get generic architecture information. - ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _archInfo; } - //! Get CPU architecture type, see \ArchInfo::Type. - ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archInfo.getType(); } - //! Get CPU architecture sub-type, see \ArchInfo::SubType. - ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _archInfo.getSubType(); } - - //! Get CPU vendor ID. - ASMJIT_INLINE uint32_t getVendorId() const noexcept { return _vendorId; } - //! Get CPU family ID. - ASMJIT_INLINE uint32_t getFamily() const noexcept { return _family; } - //! Get CPU model ID. - ASMJIT_INLINE uint32_t getModel() const noexcept { return _model; } - //! Get CPU stepping. - ASMJIT_INLINE uint32_t getStepping() const noexcept { return _stepping; } - - //! Get number of hardware threads available. - ASMJIT_INLINE uint32_t getHwThreadsCount() const noexcept { - return _hwThreadsCount; - } - - //! Get all CPU features. - ASMJIT_INLINE const CpuFeatures& getFeatures() const noexcept { return _features; } - //! Get whether CPU has a `feature`. - ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept { return _features.has(feature); } - //! Add a CPU `feature`. - ASMJIT_INLINE CpuInfo& addFeature(uint32_t feature) noexcept { _features.add(feature); return *this; } - - //! Get CPU vendor string. - ASMJIT_INLINE const char* getVendorString() const noexcept { return _vendorString; } - //! Get CPU brand string. - ASMJIT_INLINE const char* getBrandString() const noexcept { return _brandString; } - - // -------------------------------------------------------------------------- - // [Accessors - ARM] - // -------------------------------------------------------------------------- - - // -------------------------------------------------------------------------- - // [Accessors - X86] - // -------------------------------------------------------------------------- - - //! Get processor type. - ASMJIT_INLINE uint32_t getX86ProcessorType() const noexcept { - return _x86Data._processorType; - } - - //! Get brand index. - ASMJIT_INLINE uint32_t getX86BrandIndex() const noexcept { - return _x86Data._brandIndex; - } - - //! Get flush cache line size. - ASMJIT_INLINE uint32_t getX86FlushCacheLineSize() const noexcept { - return _x86Data._flushCacheLineSize; - } - - //! Get maximum logical processors count. - ASMJIT_INLINE uint32_t getX86MaxLogicalProcessors() const noexcept { - return _x86Data._maxLogicalProcessors; - } - - // -------------------------------------------------------------------------- - // [Statics] - // -------------------------------------------------------------------------- - - //! Get the host CPU information. - ASMJIT_API static const CpuInfo& getHost() noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - ArchInfo _archInfo; //!< CPU architecture information. - uint32_t _vendorId; //!< CPU vendor id, see \ref Vendor. - uint32_t _family; //!< CPU family ID. - uint32_t _model; //!< CPU model ID. - uint32_t _stepping; //!< CPU stepping. - uint32_t _hwThreadsCount; //!< Number of hardware threads. - CpuFeatures _features; //!< CPU features. - char _vendorString[16]; //!< CPU vendor string. - char _brandString[64]; //!< CPU brand string. - - // Architecture specific data. - union { - ArmData _armData; - X86Data _x86Data; - }; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_CPUINFO_H diff --git a/src/asmjit/base/func.cpp b/src/asmjit/base/func.cpp deleted file mode 100644 index 5210765..0000000 --- a/src/asmjit/base/func.cpp +++ /dev/null @@ -1,186 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/arch.h" -#include "../base/func.h" - -#if defined(ASMJIT_BUILD_X86) -#include "../x86/x86internal_p.h" -#include "../x86/x86operand.h" -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) -#include "../arm/arminternal_p.h" -#include "../arm/armoperand.h" -#endif // ASMJIT_BUILD_ARM - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::CallConv - Init / Reset] -// ============================================================================ - -ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId) noexcept { - reset(); - -#if defined(ASMJIT_BUILD_X86) - if (CallConv::isX86Family(ccId)) - return X86Internal::initCallConv(*this, ccId); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - if (CallConv::isArmFamily(ccId)) - return ArmInternal::initCallConv(*this, ccId); -#endif // ASMJIT_BUILD_ARM - - return DebugUtils::errored(kErrorInvalidArgument); -} - -// ============================================================================ -// [asmjit::FuncDetail - Init / Reset] -// ============================================================================ - -ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& sign) { - uint32_t ccId = sign.getCallConv(); - CallConv& cc = _callConv; - - uint32_t argCount = sign.getArgCount(); - if (ASMJIT_UNLIKELY(argCount > kFuncArgCount)) - return DebugUtils::errored(kErrorInvalidArgument); - - ASMJIT_PROPAGATE(cc.init(ccId)); - - uint32_t gpSize = (cc.getArchType() == ArchInfo::kTypeX86) ? 4 : 8; - uint32_t deabstractDelta = TypeId::deabstractDeltaOfSize(gpSize); - - const uint8_t* args = sign.getArgs(); - for (uint32_t i = 0; i < argCount; i++) { - Value& arg = _args[i]; - arg.initTypeId(TypeId::deabstract(args[i], deabstractDelta)); - } - _argCount = static_cast(argCount); - - uint32_t ret = sign.getRet(); - if (ret != TypeId::kVoid) { - _rets[0].initTypeId(TypeId::deabstract(ret, deabstractDelta)); - _retCount = 1; - } - -#if defined(ASMJIT_BUILD_X86) - if (CallConv::isX86Family(ccId)) - return X86Internal::initFuncDetail(*this, sign, gpSize); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - if (CallConv::isArmFamily(ccId)) - return ArmInternal::initFuncDetail(*this, sign, gpSize); -#endif // ASMJIT_BUILD_ARM - - // We should never bubble here as if `cc.init()` succeeded then there has to - // be an implementation for the current architecture. However, stay safe. - return DebugUtils::errored(kErrorInvalidArgument); -} - -// ============================================================================ -// [asmjit::FuncFrameLayout - Init / Reset] -// ============================================================================ - -ASMJIT_FAVOR_SIZE Error FuncFrameLayout::init(const FuncDetail& func, const FuncFrameInfo& ffi) noexcept { - uint32_t ccId = func.getCallConv().getId(); - -#if defined(ASMJIT_BUILD_X86) - if (CallConv::isX86Family(ccId)) - return X86Internal::initFrameLayout(*this, func, ffi); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - if (CallConv::isArmFamily(ccId)) - return ArmInternal::initFrameLayout(*this, func, ffi); -#endif // ASMJIT_BUILD_ARM - - return DebugUtils::errored(kErrorInvalidArgument); -} - -// ============================================================================ -// [asmjit::FuncArgsMapper] -// ============================================================================ - -ASMJIT_FAVOR_SIZE Error FuncArgsMapper::updateFrameInfo(FuncFrameInfo& ffi) const noexcept { - const FuncDetail* func = getFuncDetail(); - if (!func) return DebugUtils::errored(kErrorInvalidState); - - uint32_t ccId = func->getCallConv().getId(); - -#if defined(ASMJIT_BUILD_X86) - if (CallConv::isX86Family(ccId)) - return X86Internal::argsToFrameInfo(*this, ffi); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - if (CallConv::isArmFamily(ccId)) - return ArmInternal::argsToFrameInfo(*this, ffi); -#endif // ASMJIT_BUILD_X86 - - return DebugUtils::errored(kErrorInvalidArch); -} - -// ============================================================================ -// [asmjit::FuncUtils] -// ============================================================================ - -ASMJIT_FAVOR_SIZE Error FuncUtils::emitProlog(CodeEmitter* emitter, const FuncFrameLayout& layout) { -#if defined(ASMJIT_BUILD_X86) - if (emitter->getArchInfo().isX86Family()) - return X86Internal::emitProlog(static_cast(emitter), layout); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - if (emitter->getArchInfo().isArmFamily()) - return ArmInternal::emitProlog(static_cast(emitter), layout); -#endif // ASMJIT_BUILD_ARM - - return DebugUtils::errored(kErrorInvalidArch); -} - -ASMJIT_FAVOR_SIZE Error FuncUtils::emitEpilog(CodeEmitter* emitter, const FuncFrameLayout& layout) { -#if defined(ASMJIT_BUILD_X86) - if (emitter->getArchInfo().isX86Family()) - return X86Internal::emitEpilog(static_cast(emitter), layout); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - if (emitter->getArchInfo().isArmFamily()) - return ArmInternal::emitEpilog(static_cast(emitter), layout); -#endif // ASMJIT_BUILD_ARM - - return DebugUtils::errored(kErrorInvalidArch); -} - -ASMJIT_FAVOR_SIZE Error FuncUtils::allocArgs(CodeEmitter* emitter, const FuncFrameLayout& layout, const FuncArgsMapper& args) { -#if defined(ASMJIT_BUILD_X86) - if (emitter->getArchInfo().isX86Family()) - return X86Internal::allocArgs(static_cast(emitter), layout, args); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - if (emitter->getArchInfo().isArmFamily()) - return ArmInternal::allocArgs(static_cast(emitter), layout, args); -#endif // ASMJIT_BUILD_ARM - - return DebugUtils::errored(kErrorInvalidArch); -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/func.h b/src/asmjit/base/func.h deleted file mode 100644 index c9ab052..0000000 --- a/src/asmjit/base/func.h +++ /dev/null @@ -1,1296 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_FUNC_H -#define _ASMJIT_BASE_FUNC_H - -#include "../asmjit_build.h" - -// [Dependencies] -#include "../base/arch.h" -#include "../base/operand.h" -#include "../base/utils.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [Forward Declarations] -// ============================================================================ - -class CodeEmitter; - -// ============================================================================ -// [asmjit::CallConv] -// ============================================================================ - -//! Function calling convention. -//! -//! Function calling convention is a scheme that defines how function parameters -//! are passed and how function returns its result. AsmJit defines a variety of -//! architecture and OS specific calling conventions and also provides a compile -//! time detection to make JIT code-generation easier. -struct CallConv { - //! Calling convention id. - ASMJIT_ENUM(Id) { - //! None or invalid (can't be used). - kIdNone = 0, - - // ------------------------------------------------------------------------ - // [Universal] - // ------------------------------------------------------------------------ - - // TODO: To make this possible we need to know target ARCH and ABI. - - /* - - // Universal calling conventions are applicable to any target and are - // converted to target dependent conventions at runtime. The purpose of - // these conventions is to make using functions less target dependent. - - kIdCDecl = 1, - kIdStdCall = 2, - kIdFastCall = 3, - - //! AsmJit specific calling convention designed for calling functions - //! inside a multimedia code like that don't use many registers internally, - //! but are long enough to be called and not inlined. These functions are - //! usually used to calculate trigonometric functions, logarithms, etc... - kIdFastEval2 = 10, - kIdFastEval3 = 11, - kIdFastEval4 = 12, - */ - - // ------------------------------------------------------------------------ - // [X86] - // ------------------------------------------------------------------------ - - //! X86 `__cdecl` calling convention (used by C runtime and libraries). - kIdX86CDecl = 16, - //! X86 `__stdcall` calling convention (used mostly by WinAPI). - kIdX86StdCall = 17, - //! X86 `__thiscall` calling convention (MSVC/Intel). - kIdX86MsThisCall = 18, - //! X86 `__fastcall` convention (MSVC/Intel). - kIdX86MsFastCall = 19, - //! X86 `__fastcall` convention (GCC and Clang). - kIdX86GccFastCall = 20, - //! X86 `regparm(1)` convention (GCC and Clang). - kIdX86GccRegParm1 = 21, - //! X86 `regparm(2)` convention (GCC and Clang). - kIdX86GccRegParm2 = 22, - //! X86 `regparm(3)` convention (GCC and Clang). - kIdX86GccRegParm3 = 23, - - kIdX86FastEval2 = 29, - kIdX86FastEval3 = 30, - kIdX86FastEval4 = 31, - - //! X64 calling convention defined by WIN64-ABI. - //! - //! Links: - //! * . - kIdX86Win64 = 32, - //! X64 calling convention used by Unix platforms (SYSV/AMD64-ABI). - kIdX86SysV64 = 33, - - kIdX64FastEval2 = 45, - kIdX64FastEval3 = 46, - kIdX64FastEval4 = 47, - - // ------------------------------------------------------------------------ - // [ARM] - // ------------------------------------------------------------------------ - - //! Legacy calling convention, floating point arguments are passed via GP registers. - kIdArm32SoftFP = 48, - //! Modern calling convention, uses VFP registers to pass floating point arguments. - kIdArm32HardFP = 49, - - // ------------------------------------------------------------------------ - // [Internal] - // ------------------------------------------------------------------------ - - _kIdX86Start = 16, //!< \internal - _kIdX86End = 31, //!< \internal - - _kIdX64Start = 32, //!< \internal - _kIdX64End = 47, //!< \internal - - _kIdArmStart = 48, //!< \internal - _kIdArmEnd = 49, //!< \internal - - // ------------------------------------------------------------------------ - // [Host] - // ------------------------------------------------------------------------ - -#if defined(ASMJIT_DOCGEN) - //! Default calling convention based on the current C++ compiler's settings. - //! - //! NOTE: This should be always the same as `kIdHostCDecl`, but some - //! compilers allow to override the default calling convention. Overriding - //! is not detected at the moment. - kIdHost = DETECTED_AT_COMPILE_TIME, - - //! Default CDECL calling convention based on the current C++ compiler's settings. - kIdHostCDecl = DETECTED_AT_COMPILE_TIME, - - //! Default STDCALL calling convention based on the current C++ compiler's settings. - //! - //! NOTE: If not defined by the host then it's the same as `kIdHostCDecl`. - kIdHostStdCall = DETECTED_AT_COMPILE_TIME, - - //! Compatibility for `__fastcall` calling convention. - //! - //! NOTE: If not defined by the host then it's the same as `kIdHostCDecl`. - kIdHostFastCall = DETECTED_AT_COMPILE_TIME -#elif ASMJIT_ARCH_X86 - kIdHost = kIdX86CDecl, - kIdHostCDecl = kIdX86CDecl, - kIdHostStdCall = kIdX86StdCall, - kIdHostFastCall = ASMJIT_CC_MSC ? kIdX86MsFastCall : - ASMJIT_CC_GCC ? kIdX86GccFastCall : - ASMJIT_CC_CLANG ? kIdX86GccFastCall : kIdNone, - kIdHostFastEval2 = kIdX86FastEval2, - kIdHostFastEval3 = kIdX86FastEval3, - kIdHostFastEval4 = kIdX86FastEval4 -#elif ASMJIT_ARCH_X64 - kIdHost = ASMJIT_OS_WINDOWS ? kIdX86Win64 : kIdX86SysV64, - kIdHostCDecl = kIdHost, // Doesn't exist, redirected to host. - kIdHostStdCall = kIdHost, // Doesn't exist, redirected to host. - kIdHostFastCall = kIdHost, // Doesn't exist, redirected to host. - kIdHostFastEval2 = kIdX64FastEval2, - kIdHostFastEval3 = kIdX64FastEval3, - kIdHostFastEval4 = kIdX64FastEval4 -#elif ASMJIT_ARCH_ARM32 -# if defined(__SOFTFP__) - kIdHost = kIdArm32SoftFP, -# else - kIdHost = kIdArm32HardFP, -# endif - // These don't exist on ARM. - kIdHostCDecl = kIdHost, // Doesn't exist, redirected to host. - kIdHostStdCall = kIdHost, // Doesn't exist, redirected to host. - kIdHostFastCall = kIdHost // Doesn't exist, redirected to host. -#else -# error "[asmjit] Couldn't determine the target's calling convention." -#endif - }; - - //! Calling convention algorithm. - //! - //! This is AsmJit specific. It basically describes how should AsmJit convert - //! the function arguments defined by `FuncSignature` into register ids or - //! stack offsets. The default algorithm is a standard algorithm that assigns - //! registers first, and then assigns stack. The Win64 algorithm does register - //! shadowing as defined by `WIN64` calling convention - it applies to 64-bit - //! calling conventions only. - ASMJIT_ENUM(Algorithm) { - kAlgorithmDefault = 0, //!< Default algorithm (cross-platform). - kAlgorithmWin64 = 1 //!< WIN64 specific algorithm. - }; - - //! Calling convention flags. - ASMJIT_ENUM(Flags) { - kFlagCalleePopsStack = 0x01, //!< Callee is responsible for cleaning up the stack. - kFlagPassFloatsByVec = 0x02, //!< Pass F32 and F64 arguments by VEC128 register. - kFlagVectorCall = 0x04, //!< This is a '__vectorcall' calling convention. - kFlagIndirectVecArgs = 0x08 //!< Pass vector arguments indirectly (as a pointer). - }; - - //! Internal limits of AsmJit/CallConv. - ASMJIT_ENUM(Limits) { - kMaxVRegKinds = Globals::kMaxVRegKinds, - kNumRegArgsPerKind = 8 - }; - - //! Passed registers' order. - union RegOrder { - uint8_t id[kNumRegArgsPerKind]; //!< Passed registers, ordered. - uint32_t packed[(kNumRegArgsPerKind + 3) / 4]; - }; - - // -------------------------------------------------------------------------- - // [Utilities] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE bool isX86Family(uint32_t ccId) noexcept { return ccId >= _kIdX86Start && ccId <= _kIdX64End; } - static ASMJIT_INLINE bool isArmFamily(uint32_t ccId) noexcept { return ccId >= _kIdArmStart && ccId <= _kIdArmEnd; } - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - ASMJIT_API Error init(uint32_t ccId) noexcept; - - ASMJIT_INLINE void reset() noexcept { - ::memset(this, 0, sizeof(*this)); - ::memset(_passedOrder, 0xFF, sizeof(_passedOrder)); - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get calling convention id, see \ref Id. - ASMJIT_INLINE uint32_t getId() const noexcept { return _id; } - //! Set calling convention id, see \ref Id. - ASMJIT_INLINE void setId(uint32_t id) noexcept { _id = static_cast(id); } - - //! Get architecture type. - ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archType; } - //! Set architecture type. - ASMJIT_INLINE void setArchType(uint32_t archType) noexcept { _archType = static_cast(archType); } - - //! Get calling convention algorithm, see \ref Algorithm. - ASMJIT_INLINE uint32_t getAlgorithm() const noexcept { return _algorithm; } - //! Set calling convention algorithm, see \ref Algorithm. - ASMJIT_INLINE void setAlgorithm(uint32_t algorithm) noexcept { _algorithm = static_cast(algorithm); } - - //! Get if the calling convention has the given `flag` set. - ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; } - //! Get calling convention flags, see \ref Flags. - ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; } - //! Add calling convention flags, see \ref Flags. - ASMJIT_INLINE void setFlags(uint32_t flag) noexcept { _flags = flag; }; - //! Add calling convention flags, see \ref Flags. - ASMJIT_INLINE void addFlags(uint32_t flag) noexcept { _flags |= flag; }; - - //! Get a natural stack alignment. - ASMJIT_INLINE uint32_t getNaturalStackAlignment() const noexcept { return _naturalStackAlignment; } - - //! Set a natural stack alignment. - //! - //! This function can be used to override the default stack alignment in case - //! that you know that it's alignment is different. For example it allows to - //! implement custom calling conventions that guarantee higher stack alignment. - ASMJIT_INLINE void setNaturalStackAlignment(uint32_t value) noexcept { - ASMJIT_ASSERT(value < 256); - _naturalStackAlignment = static_cast(value); - } - - //! Get if this calling convention specifies 'SpillZone'. - ASMJIT_INLINE bool hasSpillZone() const noexcept { return _spillZoneSize != 0; } - //! Get size of 'SpillZone'. - ASMJIT_INLINE uint32_t getSpillZoneSize() const noexcept { return _spillZoneSize; } - //! Set size of 'SpillZone'. - ASMJIT_INLINE void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = static_cast(size); } - - //! Get if this calling convention specifies 'RedZone'. - ASMJIT_INLINE bool hasRedZone() const noexcept { return _redZoneSize != 0; } - //! Get size of 'RedZone'. - ASMJIT_INLINE uint32_t getRedZoneSize() const noexcept { return _redZoneSize; } - //! Set size of 'RedZone'. - ASMJIT_INLINE void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = static_cast(size); } - - ASMJIT_INLINE const uint8_t* getPassedOrder(uint32_t kind) const noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - return _passedOrder[kind].id; - } - - ASMJIT_INLINE uint32_t getPassedRegs(uint32_t kind) const noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - return _passedRegs[kind]; - } - - ASMJIT_INLINE void _setPassedPacked(uint32_t kind, uint32_t p0, uint32_t p1) noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - - _passedOrder[kind].packed[0] = p0; - _passedOrder[kind].packed[1] = p1; - } - - ASMJIT_INLINE void setPassedToNone(uint32_t kind) noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - - _setPassedPacked(kind, ASMJIT_PACK32_4x8(0xFF, 0xFF, 0xFF, 0xFF), - ASMJIT_PACK32_4x8(0xFF, 0xFF, 0xFF, 0xFF)); - _passedRegs[kind] = 0; - } - - ASMJIT_INLINE void setPassedOrder(uint32_t kind, uint32_t a0, uint32_t a1 = 0xFF, uint32_t a2 = 0xFF, uint32_t a3 = 0xFF, uint32_t a4 = 0xFF, uint32_t a5 = 0xFF, uint32_t a6 = 0xFF, uint32_t a7 = 0xFF) noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - - _setPassedPacked(kind, ASMJIT_PACK32_4x8(a0, a1, a2, a3), - ASMJIT_PACK32_4x8(a4, a5, a6, a7)); - - // NOTE: This should always be called with all arguments known at compile - // time, so even if it looks scary it should be translated to a single - // instruction. - _passedRegs[kind] = (a0 != 0xFF ? 1U << a0 : 0U) | - (a1 != 0xFF ? 1U << a1 : 0U) | - (a2 != 0xFF ? 1U << a2 : 0U) | - (a3 != 0xFF ? 1U << a3 : 0U) | - (a4 != 0xFF ? 1U << a4 : 0U) | - (a5 != 0xFF ? 1U << a5 : 0U) | - (a6 != 0xFF ? 1U << a6 : 0U) | - (a7 != 0xFF ? 1U << a7 : 0U) ; - } - - ASMJIT_INLINE uint32_t getPreservedRegs(uint32_t kind) const noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - return _preservedRegs[kind]; - } - - - ASMJIT_INLINE void setPreservedRegs(uint32_t kind, uint32_t regs) noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - _preservedRegs[kind] = regs; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint8_t _id; //!< Calling convention id, see \ref Id. - uint8_t _archType; //!< Architecture type (see \ref ArchInfo::Type). - uint8_t _algorithm; //!< Calling convention algorithm. - uint8_t _flags; //!< Calling convention flags. - - uint8_t _naturalStackAlignment; //!< Natural stack alignment as defined by OS/ABI. - uint8_t _spillZoneSize; //!< Spill zone size (WIN64 == 32 bytes). - uint16_t _redZoneSize; //!< Red zone size (AMD64 == 128 bytes). - - RegOrder _passedOrder[kMaxVRegKinds]; //!< Passed registers' order, per kind. - uint32_t _passedRegs[kMaxVRegKinds]; //!< Mask of all passed registers, per kind. - uint32_t _preservedRegs[kMaxVRegKinds];//!< Mask of all preserved registers, per kind. -}; - -// ============================================================================ -// [asmjit::FuncArgIndex] -// ============================================================================ - -//! Function argument index (lo/hi). -ASMJIT_ENUM(FuncArgIndex) { - //! Maximum number of function arguments supported by AsmJit. - kFuncArgCount = 16, - //! Extended maximum number of arguments (used internally). - kFuncArgCountLoHi = kFuncArgCount * 2, - - //! Index to the LO part of function argument (default). - //! - //! This value is typically omitted and added only if there is HI argument - //! accessed. - kFuncArgLo = 0, - - //! Index to the HI part of function argument. - //! - //! HI part of function argument depends on target architecture. On x86 it's - //! typically used to transfer 64-bit integers (they form a pair of 32-bit - //! integers). - kFuncArgHi = kFuncArgCount -}; - -// ============================================================================ -// [asmjit::FuncSignature] -// ============================================================================ - -//! Function signature. -//! -//! Contains information about function return type, count of arguments and -//! their TypeIds. Function signature is a low level structure which doesn't -//! contain platform specific or calling convention specific information. -struct FuncSignature { - enum { - //! Doesn't have variable number of arguments (`...`). - kNoVarArgs = 0xFF - }; - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - //! Initialize the function signature. - ASMJIT_INLINE void init(uint32_t ccId, uint32_t ret, const uint8_t* args, uint32_t argCount) noexcept { - ASMJIT_ASSERT(ccId <= 0xFF); - ASMJIT_ASSERT(argCount <= 0xFF); - - _callConv = static_cast(ccId); - _argCount = static_cast(argCount); - _vaIndex = kNoVarArgs; - _ret = ret; - _args = args; - } - - ASMJIT_INLINE void reset() noexcept { - memset(this, 0, sizeof(*this)); - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the function's calling convention. - ASMJIT_INLINE uint32_t getCallConv() const noexcept { return _callConv; } - - //! Get if the function has variable number of arguments (...). - ASMJIT_INLINE bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; } - //! Get the variable arguments (...) index, `kNoVarArgs` if none. - ASMJIT_INLINE uint32_t getVAIndex() const noexcept { return _vaIndex; } - - //! Get the number of function arguments. - ASMJIT_INLINE uint32_t getArgCount() const noexcept { return _argCount; } - - ASMJIT_INLINE bool hasRet() const noexcept { return _ret != TypeId::kVoid; } - //! Get the return value type. - ASMJIT_INLINE uint32_t getRet() const noexcept { return _ret; } - - //! Get the type of the argument at index `i`. - ASMJIT_INLINE uint32_t getArg(uint32_t i) const noexcept { - ASMJIT_ASSERT(i < _argCount); - return _args[i]; - } - //! Get the array of function arguments' types. - ASMJIT_INLINE const uint8_t* getArgs() const noexcept { return _args; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint8_t _callConv; //!< Calling convention id. - uint8_t _argCount; //!< Count of arguments. - uint8_t _vaIndex; //!< Index to a first vararg or `kNoVarArgs`. - uint8_t _ret; //!< TypeId of a return value. - const uint8_t* _args; //!< TypeIds of function arguments. -}; - -// ============================================================================ -// [asmjit::FuncSignatureT] -// ============================================================================ - -//! \internal -#define T(TYPE) TypeIdOf::kTypeId - -//! Static function signature (no arguments). -template -class FuncSignature0 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature0(uint32_t ccId = CallConv::kIdHost) noexcept { - init(ccId, T(RET), nullptr, 0); - } -}; - -//! Static function signature (1 argument). -template -class FuncSignature1 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature1(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (2 arguments). -template -class FuncSignature2 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature2(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (3 arguments). -template -class FuncSignature3 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature3(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1), T(A2) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (4 arguments). -template -class FuncSignature4 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature4(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (5 arguments). -template -class FuncSignature5 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature5(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (6 arguments). -template -class FuncSignature6 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature6(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (7 arguments). -template -class FuncSignature7 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature7(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5), T(A6) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (8 arguments). -template -class FuncSignature8 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature8(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5), T(A6), T(A7) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (9 arguments). -template -class FuncSignature9 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature9(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5), T(A6), T(A7), T(A8) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Static function signature (10 arguments). -template -class FuncSignature10 : public FuncSignature { -public: - ASMJIT_INLINE FuncSignature10(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5), T(A6), T(A7), T(A8), T(A9) }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -#if ASMJIT_CC_HAS_VARIADIC_TEMPLATES -//! Static function signature (variadic). -template -class FuncSignatureT : public FuncSignature { -public: - ASMJIT_INLINE FuncSignatureT(uint32_t ccId = CallConv::kIdHost) noexcept { - static const uint8_t args[] = { (T(ARGS))... }; - init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; -#endif // ASMJIT_CC_HAS_VARIADIC_TEMPLATES - -#undef T - -// ============================================================================ -// [asmjit::FuncSignatureX] -// ============================================================================ - -//! Dynamic function signature. -class FuncSignatureX : public FuncSignature { -public: - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE FuncSignatureX(uint32_t ccId = CallConv::kIdHost) noexcept { - init(ccId, TypeId::kVoid, _builderArgList, 0); - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void setCallConv(uint32_t ccId) noexcept { - ASMJIT_ASSERT(ccId <= 0xFF); - _callConv = static_cast(ccId); - } - - //! Set the return type to `retType`. - ASMJIT_INLINE void setRet(uint32_t retType) noexcept { _ret = retType; } - //! Set the return type based on `T`. - template - ASMJIT_INLINE void setRetT() noexcept { setRet(TypeIdOf::kTypeId); } - - //! Set the argument at index `i` to the `type` - ASMJIT_INLINE void setArg(uint32_t i, uint32_t type) noexcept { - ASMJIT_ASSERT(i < _argCount); - _builderArgList[i] = type; - } - //! Set the argument at index `i` to the type based on `T`. - template - ASMJIT_INLINE void setArgT(uint32_t i) noexcept { setArg(i, TypeIdOf::kTypeId); } - - //! Append an argument of `type` to the function prototype. - ASMJIT_INLINE void addArg(uint32_t type) noexcept { - ASMJIT_ASSERT(_argCount < kFuncArgCount); - _builderArgList[_argCount++] = static_cast(type); - } - //! Append an argument of type based on `T` to the function prototype. - template - ASMJIT_INLINE void addArgT() noexcept { addArg(TypeIdOf::kTypeId); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint8_t _builderArgList[kFuncArgCount]; -}; - -// ============================================================================ -// [asmjit::FuncDetail] -// ============================================================================ - -//! Function detail - CallConv and expanded FuncSignature. -//! -//! Function details is architecture and OS dependent representation of function. -//! It contains calling convention and expanded function signature so all -//! arguments have assigned either register type & id or stack address. -class FuncDetail { -public: - ASMJIT_ENUM(Limits) { - kMaxVRegKinds = Globals::kMaxVRegKinds - }; - - //! Argument or return value as defined by `FuncSignature`, but with register - //! or stack address (and other metadata) assigned. - struct Value { - ASMJIT_ENUM(Parts) { - kTypeIdShift = 24, - kTypeIdMask = 0xFF000000U, - - kRegTypeShift = 8, - kRegTypeMask = 0x0000FF00U, - - kRegIdShift = 0, - kRegIdMask = 0x000000FFU, - - kStackOffsetShift = 0, - kStackOffsetMask = 0x0000FFFFU, - - kIsByReg = 0x00010000U, - kIsByStack = 0x00020000U, - kIsIndirect = 0x00040000U - }; - - //! Get if this value is initialized (i.e. contains a valid data). - ASMJIT_INLINE bool isInitialized() const noexcept { return _value != 0; } - //! Initialize this in/out by a given `typeId`. - ASMJIT_INLINE void initTypeId(uint32_t typeId) noexcept { _value = typeId << kTypeIdShift; } - //! Initialize this in/out by a given `typeId`, `regType`, and `regId`. - ASMJIT_INLINE void initReg(uint32_t typeId, uint32_t regType, uint32_t regId) noexcept { - _value = (typeId << kTypeIdShift) | (regType << kRegTypeShift) | (regId << kRegIdShift) | kIsByReg; - } - //! Initialize this in/out by a given `typeId` and `offset`. - ASMJIT_INLINE void initStack(uint32_t typeId, uint32_t stackOffset) noexcept { - _value = (typeId << kTypeIdShift) | (stackOffset << kStackOffsetShift) | kIsByStack; - } - //! Reset the value to its uninitialized and unassigned state. - ASMJIT_INLINE void reset() noexcept { _value = 0; } - - ASMJIT_INLINE void assignToReg(uint32_t regType, uint32_t regId) noexcept { - ASMJIT_ASSERT(!isAssigned()); - _value |= (regType << kRegTypeShift) | (regId << kRegIdShift) | kIsByReg; - } - - ASMJIT_INLINE void assignToStack(int32_t offset) noexcept { - ASMJIT_ASSERT(!isAssigned()); - _value |= (offset << kStackOffsetShift) | kIsByStack; - } - - //! Get if this argument is passed by register. - ASMJIT_INLINE bool byReg() const noexcept { return (_value & kIsByReg) != 0; } - //! Get if this argument is passed by stack. - ASMJIT_INLINE bool byStack() const noexcept { return (_value & kIsByStack) != 0; } - //! Get if this argument is passed by register. - ASMJIT_INLINE bool isAssigned() const noexcept { return (_value & (kIsByReg | kIsByStack)) != 0; } - //! Get if this argument is passed through a pointer (used by WIN64 to pass XMM|YMM|ZMM). - ASMJIT_INLINE bool isIndirect() const noexcept { return (_value & kIsIndirect) != 0; } - - //! Get virtual type of this argument or return value. - ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _value >> kTypeIdShift; } - //! Get a register type of the register used to pass the argument or return the value. - ASMJIT_INLINE uint32_t getRegType() const noexcept { return (_value & kRegTypeMask) >> kRegTypeShift; } - //! Get a physical id of the register used to pass the argument or return the value. - ASMJIT_INLINE uint32_t getRegId() const noexcept { return (_value & kRegIdMask) >> kRegIdShift; } - //! Get a stack offset of this argument (always positive). - ASMJIT_INLINE int32_t getStackOffset() const noexcept { return (_value & kStackOffsetMask) >> kStackOffsetShift; } - - uint32_t _value; - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE FuncDetail() noexcept { reset(); } - ASMJIT_INLINE FuncDetail(const FuncDetail& other) noexcept { - ::memcpy(this, &other, sizeof(*this)); - } - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - //! Initialize this `FuncDetail` to the given signature. - ASMJIT_API Error init(const FuncSignature& sign); - ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); } - - // -------------------------------------------------------------------------- - // [Accessors - Calling Convention] - // -------------------------------------------------------------------------- - - //! Get the function's calling convention, see `CallConv`. - ASMJIT_INLINE const CallConv& getCallConv() const noexcept { return _callConv; } - - //! Get CallConv flags, see \ref CallConv::Flags. - ASMJIT_INLINE uint32_t getFlags() const noexcept { return _callConv.getFlags(); } - //! Check if a CallConv `flag` is set, see \ref CallConv::Flags. - ASMJIT_INLINE bool hasFlag(uint32_t ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); } - - // -------------------------------------------------------------------------- - // [Accessors - Arguments and Return] - // -------------------------------------------------------------------------- - - //! Get count of function return values. - ASMJIT_INLINE uint32_t getRetCount() const noexcept { return _retCount; } - //! Get the number of function arguments. - ASMJIT_INLINE uint32_t getArgCount() const noexcept { return _argCount; } - - //! Get whether the function has a return value. - ASMJIT_INLINE bool hasRet() const noexcept { return _retCount != 0; } - //! Get function return value. - ASMJIT_INLINE Value& getRet(size_t index = 0) noexcept { - ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets)); - return _rets[index]; - } - //! Get function return value (const). - ASMJIT_INLINE const Value& getRet(size_t index = 0) const noexcept { - ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets)); - return _rets[index]; - } - - //! Get function arguments array. - ASMJIT_INLINE Value* getArgs() noexcept { return _args; } - //! Get function arguments array (const). - ASMJIT_INLINE const Value* getArgs() const noexcept { return _args; } - - ASMJIT_INLINE bool hasArg(size_t index) const noexcept { - ASMJIT_ASSERT(index < kFuncArgCountLoHi); - return _args[index].isInitialized(); - } - - //! Get function argument at index `index`. - ASMJIT_INLINE Value& getArg(size_t index) noexcept { - ASMJIT_ASSERT(index < kFuncArgCountLoHi); - return _args[index]; - } - - //! Get function argument at index `index`. - ASMJIT_INLINE const Value& getArg(size_t index) const noexcept { - ASMJIT_ASSERT(index < kFuncArgCountLoHi); - return _args[index]; - } - - ASMJIT_INLINE void resetArg(size_t index) noexcept { - ASMJIT_ASSERT(index < kFuncArgCountLoHi); - _args[index].reset(); - } - - //! Get if the function passes one or more argument by stack. - ASMJIT_INLINE bool hasStackArgs() const noexcept { return _argStackSize != 0; } - //! Get stack size needed for function arguments passed on the stack. - ASMJIT_INLINE uint32_t getArgStackSize() const noexcept { return _argStackSize; } - - ASMJIT_INLINE uint32_t getNaturalStackAlignment() const noexcept { return _callConv.getNaturalStackAlignment(); } - ASMJIT_INLINE uint32_t getSpillZoneSize() const noexcept { return _callConv.getSpillZoneSize(); } - ASMJIT_INLINE uint32_t getRedZoneSize() const noexcept { return _callConv.getRedZoneSize(); } - - ASMJIT_INLINE uint32_t getPassedRegs(uint32_t kind) const noexcept { return _callConv.getPassedRegs(kind); } - ASMJIT_INLINE uint32_t getPreservedRegs(uint32_t kind) const noexcept { return _callConv.getPreservedRegs(kind); } - - ASMJIT_INLINE uint32_t getUsedRegs(uint32_t kind) const noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - return _usedRegs[kind]; - } - - ASMJIT_INLINE void addUsedRegs(uint32_t kind, uint32_t regs) noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - _usedRegs[kind] |= regs; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CallConv _callConv; //!< Calling convention. - uint8_t _argCount; //!< Number of function arguments. - uint8_t _retCount; //!< Number of function return values. - uint32_t _usedRegs[kMaxVRegKinds]; //!< Registers that contains arguments (signature dependent). - uint32_t _argStackSize; //!< Size of arguments passed by stack. - Value _rets[2]; //!< Function return values. - Value _args[kFuncArgCountLoHi]; //!< Function arguments. -}; - -// ============================================================================ -// [asmjit::FuncFrameInfo] -// ============================================================================ - -//! Function-frame information. -//! -//! This structure can be used to create a function frame in a cross-platform -//! way. It contains information about the function's stack to be used and -//! registers to be saved and restored. Based on this information in can -//! calculate the optimal layout of a function as \ref FuncFrameLayout. -struct FuncFrameInfo { - ASMJIT_ENUM(Limits) { - kMaxVRegKinds = Globals::kMaxVRegKinds - }; - - //! Attributes. - //! - //! Attributes are designed in a way that all are initially false, and user - //! or function-frame finalizer sets them when necessary. Architecture-specific - //! attributes are prefixed with the architecture name. - ASMJIT_ENUM(Attributes) { - kAttrPreserveFP = 0x00000001U, //!< Preserve frame pointer (EBP|RBP). - kAttrCompactPE = 0x00000002U, //!< Use smaller, but possibly slower prolog/epilog. - kAttrHasCalls = 0x00000004U, //!< Function calls other functions (is not leaf). - - kX86AttrAlignedVecSR = 0x00010000U, //!< Use aligned save/restore of VEC regs. - kX86AttrMmxCleanup = 0x00020000U, //!< Emit EMMS instruction in epilog (X86). - kX86AttrAvxCleanup = 0x00040000U, //!< Emit VZEROUPPER instruction in epilog (X86). - kX86AttrAvxEnabled = 0x00080000U //!< Use AVX instead of SSE for all operations (X86). - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE FuncFrameInfo() noexcept { reset(); } - - ASMJIT_INLINE FuncFrameInfo(const FuncFrameInfo& other) noexcept { - ::memcpy(this, &other, sizeof(*this)); - } - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void reset() noexcept { - ::memset(this, 0, sizeof(*this)); - _stackArgsRegId = Globals::kInvalidRegId; - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get frame-info flags, see \ref Attributes. - ASMJIT_INLINE uint32_t getAttributes() const noexcept { return _attributes; } - //! Check if a frame-info `flag` is set, see \ref Attributes. - ASMJIT_INLINE bool hasAttribute(uint32_t attr) const noexcept { return (_attributes & attr) != 0; } - //! Add `flags` to the frame-info, see \ref Attributes. - ASMJIT_INLINE void addAttributes(uint32_t attrs) noexcept { _attributes |= attrs; } - //! Clear `flags` from the frame-info, see \ref Attributes. - ASMJIT_INLINE void clearAttributes(uint32_t attrs) noexcept { _attributes &= ~attrs; } - - //! Get if the function preserves frame pointer (EBP|ESP on X86). - ASMJIT_INLINE bool hasPreservedFP() const noexcept { return (_attributes & kAttrPreserveFP) != 0; } - //! Enable preserved frame pointer. - ASMJIT_INLINE void enablePreservedFP() noexcept { _attributes |= kAttrPreserveFP; } - //! Disable preserved frame pointer. - ASMJIT_INLINE void disablePreservedFP() noexcept { _attributes &= ~kAttrPreserveFP; } - - //! Get if the function prolog and epilog should be compacted (as small as possible). - ASMJIT_INLINE bool hasCompactPE() const noexcept { return (_attributes & kAttrCompactPE) != 0; } - //! Enable compact prolog/epilog. - ASMJIT_INLINE void enableCompactPE() noexcept { _attributes |= kAttrCompactPE; } - //! Disable compact prolog/epilog. - ASMJIT_INLINE void disableCompactPE() noexcept { _attributes &= ~kAttrCompactPE; } - - //! Get if the function calls other functions. - ASMJIT_INLINE bool hasCalls() const noexcept { return (_attributes & kAttrHasCalls) != 0; } - //! Set `kFlagHasCalls` to true. - ASMJIT_INLINE void enableCalls() noexcept { _attributes |= kAttrHasCalls; } - //! Set `kFlagHasCalls` to false. - ASMJIT_INLINE void disableCalls() noexcept { _attributes &= ~kAttrHasCalls; } - - //! Get if the function contains MMX cleanup - 'emms' instruction in epilog. - ASMJIT_INLINE bool hasMmxCleanup() const noexcept { return (_attributes & kX86AttrMmxCleanup) != 0; } - //! Enable MMX cleanup. - ASMJIT_INLINE void enableMmxCleanup() noexcept { _attributes |= kX86AttrMmxCleanup; } - //! Disable MMX cleanup. - ASMJIT_INLINE void disableMmxCleanup() noexcept { _attributes &= ~kX86AttrMmxCleanup; } - - //! Get if the function contains AVX cleanup - 'vzeroupper' instruction in epilog. - ASMJIT_INLINE bool hasAvxCleanup() const noexcept { return (_attributes & kX86AttrAvxCleanup) != 0; } - //! Enable AVX cleanup. - ASMJIT_INLINE void enableAvxCleanup() noexcept { _attributes |= kX86AttrAvxCleanup; } - //! Disable AVX cleanup. - ASMJIT_INLINE void disableAvxCleanup() noexcept { _attributes &= ~kX86AttrAvxCleanup; } - - //! Get if the function contains AVX cleanup - 'vzeroupper' instruction in epilog. - ASMJIT_INLINE bool isAvxEnabled() const noexcept { return (_attributes & kX86AttrAvxEnabled) != 0; } - //! Enable AVX cleanup. - ASMJIT_INLINE void enableAvx() noexcept { _attributes |= kX86AttrAvxEnabled; } - //! Disable AVX cleanup. - ASMJIT_INLINE void disableAvx() noexcept { _attributes &= ~kX86AttrAvxEnabled; } - - //! Get which registers (by `kind`) are saved/restored in prolog/epilog, respectively. - ASMJIT_INLINE uint32_t getDirtyRegs(uint32_t kind) const noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - return _dirtyRegs[kind]; - } - - //! Set which registers (by `kind`) are saved/restored in prolog/epilog, respectively. - ASMJIT_INLINE void setDirtyRegs(uint32_t kind, uint32_t regs) noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - _dirtyRegs[kind] = regs; - } - - //! Add registers (by `kind`) to saved/restored registers. - ASMJIT_INLINE void addDirtyRegs(uint32_t kind, uint32_t regs) noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - _dirtyRegs[kind] |= regs; - } - - ASMJIT_INLINE void setAllDirty() noexcept { - _dirtyRegs[0] = 0xFFFFFFFFU; - _dirtyRegs[1] = 0xFFFFFFFFU; - _dirtyRegs[2] = 0xFFFFFFFFU; - _dirtyRegs[3] = 0xFFFFFFFFU; - } - - ASMJIT_INLINE void setAllDirty(uint32_t kind) noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - _dirtyRegs[kind] = 0xFFFFFFFFU; - } - - //! Get stack-frame size used by the function. - ASMJIT_INLINE uint32_t getStackFrameSize() const noexcept { return _stackFrameSize; } - //! Get call-frame size used by the function. - ASMJIT_INLINE uint32_t getCallFrameSize() const noexcept { return _callFrameSize; } - - //! Get minimum stack-frame alignment required by the function. - ASMJIT_INLINE uint32_t getStackFrameAlignment() const noexcept { return _stackFrameAlignment; } - //! Get minimum call-frame alignment required by the function. - ASMJIT_INLINE uint32_t getCallFrameAlignment() const noexcept { return _callFrameAlignment; } - - ASMJIT_INLINE void setStackFrameSize(uint32_t size) noexcept { _stackFrameSize = size; } - ASMJIT_INLINE void setCallFrameSize(uint32_t size) noexcept { _callFrameSize = size; } - - ASMJIT_INLINE void setStackFrameAlignment(uint32_t value) noexcept { - ASMJIT_ASSERT(value < 256); - _stackFrameAlignment = static_cast(value); - } - - ASMJIT_INLINE void setCallFrameAlignment(uint32_t value) noexcept { - ASMJIT_ASSERT(value < 256); - _callFrameAlignment = static_cast(value); - } - - ASMJIT_INLINE void mergeStackFrameSize(uint32_t size) noexcept { _stackFrameSize = std::max(_stackFrameSize, size); } - ASMJIT_INLINE void mergeCallFrameSize(uint32_t size) noexcept { _callFrameSize = std::max(_callFrameSize, size); } - - ASMJIT_INLINE void mergeStackFrameAlignment(uint32_t value) noexcept { - ASMJIT_ASSERT(value < 256); - _stackFrameAlignment = static_cast(std::max(_stackFrameAlignment, value)); - } - - ASMJIT_INLINE void mergeCallFrameAlignment(uint32_t value) noexcept { - ASMJIT_ASSERT(value < 256); - _callFrameAlignment = static_cast(std::max(_callFrameAlignment, value)); - } - - ASMJIT_INLINE bool hasStackArgsRegId() const noexcept { - return _stackArgsRegId != Globals::kInvalidRegId; - } - ASMJIT_INLINE uint32_t getStackArgsRegId() const noexcept { return _stackArgsRegId; } - ASMJIT_INLINE void setStackArgsRegId(uint32_t regId) { _stackArgsRegId = regId; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint32_t _attributes; //!< Function attributes. - uint32_t _dirtyRegs[kMaxVRegKinds]; //!< Registers used by the function. - - uint8_t _stackFrameAlignment; //!< Minimum alignment of stack-frame. - uint8_t _callFrameAlignment; //!< Minimum alignment of call-frame. - uint8_t _stackArgsRegId; //!< Register that holds base-address to arguments passed by stack. - - uint32_t _stackFrameSize; //!< Size of a stack-frame used by the function. - uint32_t _callFrameSize; //!< Size of a call-frame (not part of _stackFrameSize). -}; - -// ============================================================================ -// [asmjit::FuncFrameLayout] -// ============================================================================ - -//! Function-frame layout. -//! -//! Function layout is used directly by prolog and epilog insertion helpers. It -//! contains only information necessary to insert proper prolog and epilog, and -//! should be always calculated from \ref FuncDetail and \ref FuncFrameInfo, where -//! \ref FuncDetail defines function's calling convention and signature, and \ref -//! FuncFrameInfo specifies how much stack is used, and which registers are dirty. -struct FuncFrameLayout { - ASMJIT_ENUM(Limits) { - kMaxVRegKinds = Globals::kMaxVRegKinds - }; - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - ASMJIT_API Error init(const FuncDetail& func, const FuncFrameInfo& ffi) noexcept; - ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool hasPreservedFP() const noexcept { return static_cast(_preservedFP); } - ASMJIT_INLINE bool hasDsaSlotUsed() const noexcept { return static_cast(_dsaSlotUsed); } - ASMJIT_INLINE bool hasAlignedVecSR() const noexcept { return static_cast(_alignedVecSR); } - ASMJIT_INLINE bool hasDynamicAlignment() const noexcept { return static_cast(_dynamicAlignment); } - - ASMJIT_INLINE bool hasMmxCleanup() const noexcept { return static_cast(_mmxCleanup); } - ASMJIT_INLINE bool hasAvxCleanup() const noexcept { return static_cast(_avxCleanup); } - ASMJIT_INLINE bool isAvxEnabled() const noexcept { return static_cast(_avxEnabled); } - - ASMJIT_INLINE uint32_t getSavedRegs(uint32_t kind) const noexcept { - ASMJIT_ASSERT(kind < kMaxVRegKinds); - return _savedRegs[kind]; - } - - //! Get stack size. - ASMJIT_INLINE uint32_t getStackSize() const noexcept { return _stackSize; } - //! Get stack alignment. - ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; } - //! Get the offset needed to access the function's stack (it skips call-stack). - ASMJIT_INLINE uint32_t getStackBaseOffset() const noexcept { return _stackBaseOffset; } - - //! Get stack size required to save GP registers. - ASMJIT_INLINE uint32_t getGpStackSize() const noexcept { return _gpStackSize; } - //! Get stack size required to save VEC registers. - ASMJIT_INLINE uint32_t getVecStackSize() const noexcept { return _vecStackSize; } - - ASMJIT_INLINE uint32_t getGpStackOffset() const noexcept { return _gpStackOffset; } - ASMJIT_INLINE uint32_t getVecStackOffset() const noexcept { return _vecStackOffset; } - - ASMJIT_INLINE uint32_t getStackArgsRegId() const noexcept { return _stackArgsRegId; } - ASMJIT_INLINE uint32_t getStackArgsOffset() const noexcept { return _stackArgsOffset; } - - ASMJIT_INLINE bool hasStackAdjustment() const noexcept { return _stackAdjustment != 0; } - ASMJIT_INLINE uint32_t getStackAdjustment() const noexcept { return _stackAdjustment; } - - ASMJIT_INLINE bool hasCalleeStackCleanup() const noexcept { return _calleeStackCleanup != 0; } - ASMJIT_INLINE uint32_t getCalleeStackCleanup() const noexcept { return _calleeStackCleanup; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint8_t _stackAlignment; //!< Final stack alignment of the functions. - uint8_t _stackBaseRegId; //!< GP register that holds address of base stack address. - uint8_t _stackArgsRegId; //!< GP register that holds address of the first argument passed by stack. - - uint32_t _savedRegs[kMaxVRegKinds]; //!< Registers that will be saved/restored in prolog/epilog. - - uint32_t _preservedFP : 1; //!< Function preserves frame-pointer. - uint32_t _dsaSlotUsed : 1; //!< True if `_dsaSlot` contains a valid memory slot/offset. - uint32_t _alignedVecSR : 1; //!< Use instructions that perform aligned ops to save/restore XMM regs. - uint32_t _dynamicAlignment : 1; //!< Function must dynamically align the stack. - - uint32_t _mmxCleanup : 1; //!< Emit 'emms' in epilog (X86). - uint32_t _avxCleanup : 1; //!< Emit 'vzeroupper' in epilog (X86). - uint32_t _avxEnabled : 1; //!< Use AVX instead of SSE for SIMD saves/restores (X86). - - uint32_t _stackSize; //!< Stack size (sum of function's stack and call stack). - uint32_t _stackBaseOffset; //!< Stack offset (non-zero if kFlagHasCalls is set). - uint32_t _stackAdjustment; //!< Stack adjustment in prolog/epilog. - uint32_t _stackArgsOffset; //!< Offset to the first argument passed by stack of _stackArgsRegId. - - uint32_t _dsaSlot; //!< Memory slot where the prolog inserter stores previous (unaligned) ESP. - uint16_t _calleeStackCleanup; //!< How many bytes the callee should add to the stack (X86 STDCALL). - uint16_t _gpStackSize; //!< Stack size required to save GP regs. - uint16_t _vecStackSize; //!< Stack size required to save VEC regs. - uint32_t _gpStackOffset; //!< Offset where saved GP regs are stored. - uint32_t _vecStackOffset; //!< Offset where saved GP regs are stored. -}; - -// ============================================================================ -// [asmjit::FuncArgsMapper] -// ============================================================================ - -//! Assign a physical register to each function argument. -//! -//! This is used to specify where each function argument should be shuffled -//! or allocated (in case it's passed by stack). -class FuncArgsMapper { -public: - struct Value { - // NOTE: The layout is compatible with FuncDetail::Value except stack. - ASMJIT_ENUM(Parts) { - kTypeIdShift = 24, - kTypeIdMask = 0xFF000000U, - - kRegTypeShift = 8, - kRegTypeMask = 0x0000FF00U, - - kRegIdShift = 0, - kRegIdMask = 0x000000FFU, - - kIsAssigned = 0x00010000U - }; - - //! Get if this value is initialized (i.e. contains a valid data). - ASMJIT_INLINE bool isAssigned() const noexcept { return _value != 0; } - //! Initialize this in/out by a given `typeId`, `regType`, and `regId`. - ASMJIT_INLINE void assign(uint32_t typeId, uint32_t regType, uint32_t regId) noexcept { - _value = (typeId << kTypeIdShift) | (regType << kRegTypeShift) | (regId << kRegIdShift) | kIsAssigned; - } - //! Reset the value to its unassigned state. - ASMJIT_INLINE void reset() noexcept { _value = 0; } - - //! Get virtual type of this argument or return value. - ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _value >> kTypeIdShift; } - //! Get a register type of the register used to pass the argument or return the value. - ASMJIT_INLINE uint32_t getRegType() const noexcept { return (_value & kRegTypeMask) >> kRegTypeShift; } - //! Get a physical id of the register used to pass the argument or return the value. - ASMJIT_INLINE uint32_t getRegId() const noexcept { return (_value & kRegIdMask) >> kRegIdShift; } - - uint32_t _value; - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - explicit ASMJIT_INLINE FuncArgsMapper(const FuncDetail* fd) noexcept { reset(fd); } - ASMJIT_INLINE FuncArgsMapper(const FuncArgsMapper& other) noexcept { - ::memcpy(this, &other, sizeof(*this)); - } - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void reset(const FuncDetail* fd = nullptr) noexcept { - _funcDetail = fd; - ::memset(_args, 0, sizeof(_args)); - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE const FuncDetail* getFuncDetail() const noexcept { return _funcDetail; } - ASMJIT_INLINE void setFuncDetail(const FuncDetail* fd) noexcept { _funcDetail = fd; } - - ASMJIT_INLINE Value& getArg(size_t index) noexcept { - ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args)); - return _args[index]; - } - ASMJIT_INLINE const Value& getArg(size_t index) const noexcept { - ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args)); - return _args[index]; - } - - ASMJIT_INLINE bool isAssigned(size_t index) const noexcept { - ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args)); - return _args[index].isAssigned(); - } - - ASMJIT_INLINE void assign(size_t index, const Reg& reg, uint32_t typeId = TypeId::kVoid) noexcept { - // Not designed for virtual registers. - ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args)); - ASMJIT_ASSERT(reg.isPhysReg()); - - _args[index].assign(typeId, reg.getType(), reg.getId()); - } - - // NOTE: All `assignAll()` methods are shortcuts to assign all arguments at - // once, however, since registers are passed all at once these initializers - // don't provide any way to pass TypeId and/or to keep any argument between - // the arguments passed uninitialized. - ASMJIT_INLINE void assignAll(const Reg& a0) noexcept { - assign(0, a0); - } - ASMJIT_INLINE void assignAll(const Reg& a0, const Reg& a1) noexcept { - assign(0, a0); assign(1, a1); - } - ASMJIT_INLINE void assignAll(const Reg& a0, const Reg& a1, const Reg& a2) noexcept { - assign(0, a0); assign(1, a1); assign(2, a2); - } - ASMJIT_INLINE void assignAll(const Reg& a0, const Reg& a1, const Reg& a2, const Reg& a3) noexcept { - assign(0, a0); assign(1, a1); assign(2, a2); assign(3, a3); - } - ASMJIT_INLINE void assignAll(const Reg& a0, const Reg& a1, const Reg& a2, const Reg& a3, const Reg& a4) noexcept { - assign(0, a0); assign(1, a1); assign(2, a2); assign(3, a3); - assign(4, a4); - } - ASMJIT_INLINE void assignAll(const Reg& a0, const Reg& a1, const Reg& a2, const Reg& a3, const Reg& a4, const Reg& a5) noexcept { - assign(0, a0); assign(1, a1); assign(2, a2); assign(3, a3); - assign(4, a4); assign(5, a5); - } - ASMJIT_INLINE void assignAll(const Reg& a0, const Reg& a1, const Reg& a2, const Reg& a3, const Reg& a4, const Reg& a5, const Reg& a6) noexcept { - assign(0, a0); assign(1, a1); assign(2, a2); assign(3, a3); - assign(4, a4); assign(5, a5); assign(6, a6); - } - ASMJIT_INLINE void assignAll(const Reg& a0, const Reg& a1, const Reg& a2, const Reg& a3, const Reg& a4, const Reg& a5, const Reg& a6, const Reg& a7) noexcept { - assign(0, a0); assign(1, a1); assign(2, a2); assign(3, a3); - assign(4, a4); assign(5, a5); assign(6, a6); assign(7, a7); - } - - // -------------------------------------------------------------------------- - // [Utilities] - // -------------------------------------------------------------------------- - - //! Update `FuncFrameInfo` accordingly to FuncArgsMapper. - //! - //! This method must be called if you use `FuncArgsMapper` and you plan to - //! use `FuncUtils::allocArgs()` to remap all arguments after the prolog is - //! inserted. - ASMJIT_API Error updateFrameInfo(FuncFrameInfo& ffi) const noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - const FuncDetail* _funcDetail; //!< Function detail. - Value _args[kFuncArgCountLoHi]; //!< Mapping of each function argument. -}; - -// ============================================================================ -// [asmjit::FuncUtils] -// ============================================================================ - -struct FuncUtils { - ASMJIT_API static Error emitProlog(CodeEmitter* emitter, const FuncFrameLayout& layout); - ASMJIT_API static Error emitEpilog(CodeEmitter* emitter, const FuncFrameLayout& layout); - ASMJIT_API static Error allocArgs(CodeEmitter* emitter, const FuncFrameLayout& layout, const FuncArgsMapper& args); -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_FUNC_H diff --git a/src/asmjit/base/globals.cpp b/src/asmjit/base/globals.cpp deleted file mode 100644 index b4612e5..0000000 --- a/src/asmjit/base/globals.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/globals.h" -#include "../base/utils.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::DebugUtils] -// ============================================================================ - -#if !defined(ASMJIT_DISABLE_TEXT) -static const char errorMessages[] = - "Ok\0" - "No heap memory\0" - "No virtual memory\0" - "Invalid argument\0" - "Invalid state\0" - "Invalid architecture\0" - "Not initialized\0" - "Already initialized\0" - "Feature not enabled\0" - "Slot occupied\0" - "No code generated\0" - "Code too large\0" - "Invalid label\0" - "Label index overflow\0" - "Label already bound\0" - "Label already defined\0" - "Label name too long\0" - "Invalid label name\0" - "Invalid parent label\0" - "Non-local label can't have parent\0" - "Relocation index overflow\0" - "Invalid relocation entry\0" - "Invalid instruction\0" - "Invalid register type\0" - "Invalid register kind\0" - "Invalid register's physical id\0" - "Invalid register's virtual id\0" - "Invalid prefix combination\0" - "Invalid lock prefix\0" - "Invalid xacquire prefix\0" - "Invalid xrelease prefix\0" - "Invalid rep prefix\0" - "Invalid rex prefix\0" - "Invalid mask, expected {k}\0" - "Invalid use of {k}\0" - "Invalid use of {k}{z}\0" - "Invalid broadcast {1tox}\0" - "Invalid {er} or {sae} option\0" - "Invalid address\0" - "Invalid address index\0" - "Invalid address scale\0" - "Invalid use of 64-bit address\0" - "Invalid displacement\0" - "Invalid segment\0" - "Invalid immediate value\0" - "Invalid operand size\0" - "Ambiguous operand size\0" - "Operand size mismatch\0" - "Invalid type-info\0" - "Invalid use of a low 8-bit GPB register\0" - "Invalid use of a 64-bit GPQ register in 32-bit mode\0" - "Invalid use of an 80-bit float\0" - "Not consecutive registers\0" - "No more physical registers\0" - "Overlapped registers\0" - "Overlapping register and arguments base-address register\0" - "Unknown error\0"; -#endif // ASMJIT_DISABLE_TEXT - -ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept { -#if !defined(ASMJIT_DISABLE_TEXT) - return Utils::findPackedString(errorMessages, std::min(err, kErrorCount)); -#else - static const char noMessage[] = ""; - return noMessage; -#endif -} - -ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept { -#if ASMJIT_OS_WINDOWS - ::OutputDebugStringA(str); -#else - ::fputs(str, stderr); -#endif -} - -ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept { - char str[1024]; - - snprintf(str, 1024, - "[asmjit] Assertion failed at %s (line %d):\n" - "[asmjit] %s\n", file, line, msg); - - // Support buggy `snprintf` implementations. - str[1023] = '\0'; - - debugOutput(str); - ::abort(); -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/globals.h b/src/asmjit/base/globals.h deleted file mode 100644 index 74c7251..0000000 --- a/src/asmjit/base/globals.h +++ /dev/null @@ -1,341 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_GLOBALS_H -#define _ASMJIT_BASE_GLOBALS_H - -// [Dependencies] -#include "../asmjit_build.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::Globals] -// ============================================================================ - -enum { kInvalidValue = 0xFFFFFFFFU }; - -//! AsmJit globals. -namespace Globals { - -//! Invalid index -//! -//! Invalid index is the last possible index that is never used in practice. In -//! AsmJit it is used exclusively with strings to indicate the the length of the -//! string is not known and has to be determined. -static const size_t kInvalidIndex = ~static_cast(0); - -//! Invalid base address. -static const uint64_t kNoBaseAddress = ~static_cast(0); - -//! Global definitions. -ASMJIT_ENUM(Defs) { - //! Invalid register id. - kInvalidRegId = 0xFF, - - //! Host memory allocator overhead. - kAllocOverhead = static_cast(sizeof(intptr_t) * 4), - //! Aggressive growing strategy threshold. - kAllocThreshold = 8192 * 1024 -}; - -ASMJIT_ENUM(Limits) { - //! Count of register kinds that are important to Function API and CodeCompiler. - //! The target architecture can define more register kinds for special registers, - //! but these will never map to virtual registers and will never be used to pass - //! and return function arguments and function return values, respectively. - kMaxVRegKinds = 4, - - //! Maximum number of physical registers of all kinds of all supported - //! architectures. This is only important for \ref CodeCompiler and its - //! \ref RAPass (register allocator pass). - //! - //! NOTE: The distribution of these registers is architecture specific. - kMaxPhysRegs = 64, - - //! Maximum alignment. - kMaxAlignment = 64, - - //! Maximum label or symbol length in bytes (take into consideration that a - //! single UTF-8 character can take more than single byte to encode it). - kMaxLabelLength = 2048 -}; - -} // Globals namespace - -// ============================================================================ -// [asmjit::Error] -// ============================================================================ - -//! AsmJit error type (uint32_t). -typedef uint32_t Error; - -//! AsmJit error codes. -ASMJIT_ENUM(ErrorCode) { - //! No error (success). - //! - //! This is default state and state you want. - kErrorOk = 0, - - //! Heap memory allocation failed. - kErrorNoHeapMemory, - - //! Virtual memory allocation failed. - kErrorNoVirtualMemory, - - //! Invalid argument. - kErrorInvalidArgument, - - //! Invalid state. - //! - //! If this error is returned it means that either you are doing something - //! wrong or AsmJit caught itself by doing something wrong. This error should - //! not be underestimated. - kErrorInvalidState, - - //! Invalid or incompatible architecture. - kErrorInvalidArch, - - //! The object is not initialized. - kErrorNotInitialized, - //! The object is already initialized. - kErrorAlreadyInitialized, - - //! Built-in feature was disabled at compile time and it's not available. - kErrorFeatureNotEnabled, - - //! CodeHolder can't have attached more than one \ref Assembler at a time. - kErrorSlotOccupied, - - //! No code generated. - //! - //! Returned by runtime if the \ref CodeHolder contains no code. - kErrorNoCodeGenerated, - //! Code generated is larger than allowed. - kErrorCodeTooLarge, - - //! Attempt to use uninitialized label. - kErrorInvalidLabel, - //! Label index overflow - a single `Assembler` instance can hold more than - //! 2 billion labels (2147483391 to be exact). If there is an attempt to - //! create more labels this error is returned. - kErrorLabelIndexOverflow, - //! Label is already bound. - kErrorLabelAlreadyBound, - //! Label is already defined (named labels). - kErrorLabelAlreadyDefined, - //! Label name is too long. - kErrorLabelNameTooLong, - //! Label must always be local if it's anonymous (without a name). - kErrorInvalidLabelName, - //! Parent id passed to `CodeHolder::newNamedLabelId()` was invalid. - kErrorInvalidParentLabel, - //! Parent id specified for a non-local (global) label. - kErrorNonLocalLabelCantHaveParent, - - //! Relocation index overflow. - kErrorRelocIndexOverflow, - //! Invalid relocation entry. - kErrorInvalidRelocEntry, - - //! Invalid instruction. - kErrorInvalidInstruction, - //! Invalid register type. - kErrorInvalidRegType, - //! Invalid register kind. - kErrorInvalidRegKind, - //! Invalid register's physical id. - kErrorInvalidPhysId, - //! Invalid register's virtual id. - kErrorInvalidVirtId, - //! Invalid prefix combination. - kErrorInvalidPrefixCombination, - //! Invalid LOCK prefix. - kErrorInvalidLockPrefix, - //! Invalid XACQUIRE prefix. - kErrorInvalidXAcquirePrefix, - //! Invalid XACQUIRE prefix. - kErrorInvalidXReleasePrefix, - //! Invalid REP prefix. - kErrorInvalidRepPrefix, - //! Invalid REX prefix. - kErrorInvalidRexPrefix, - //! Invalid mask register (not 'k'). - kErrorInvalidKMaskReg, - //! Invalid {k} use (not supported by the instruction). - kErrorInvalidKMaskUse, - //! Invalid {k}{z} use (not supported by the instruction). - kErrorInvalidKZeroUse, - //! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox}. - kErrorInvalidBroadcast, - //! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512). - kErrorInvalidEROrSAE, - //! Invalid address used (not encodable). - kErrorInvalidAddress, - //! Invalid index register used in memory address (not encodable). - kErrorInvalidAddressIndex, - //! Invalid address scale (not encodable). - kErrorInvalidAddressScale, - //! Invalid use of 64-bit address. - kErrorInvalidAddress64Bit, - //! Invalid displacement (not encodable). - kErrorInvalidDisplacement, - //! Invalid segment (X86). - kErrorInvalidSegment, - - //! Invalid immediate (out of bounds on X86 and invalid pattern on ARM). - kErrorInvalidImmediate, - - //! Invalid operand size. - kErrorInvalidOperandSize, - //! Ambiguous operand size (memory has zero size while it's required to determine the operation type. - kErrorAmbiguousOperandSize, - //! Mismatching operand size (size of multiple operands doesn't match the operation size). - kErrorOperandSizeMismatch, - - //! Invalid TypeId. - kErrorInvalidTypeId, - //! Invalid use of a 8-bit GPB-HIGH register. - kErrorInvalidUseOfGpbHi, - //! Invalid use of a 64-bit GPQ register in 32-bit mode. - kErrorInvalidUseOfGpq, - //! Invalid use of an 80-bit float (TypeId::kF80). - kErrorInvalidUseOfF80, - //! Some registers in the instruction muse be consecutive (some ARM and AVX512 neural-net instructions). - kErrorNotConsecutiveRegs, - - //! AsmJit requires a physical register, but no one is available. - kErrorNoMorePhysRegs, - //! A variable has been assigned more than once to a function argument (CodeCompiler). - kErrorOverlappedRegs, - //! Invalid register to hold stack arguments offset. - kErrorOverlappingStackRegWithRegArg, - - //! Count of AsmJit error codes. - kErrorCount -}; - -// ============================================================================ -// [asmjit::Internal] -// ============================================================================ - -namespace Internal { - -#if defined(ASMJIT_CUSTOM_ALLOC) && \ - defined(ASMJIT_CUSTOM_REALLOC) && \ - defined(ASMJIT_CUSTOM_FREE) -static ASMJIT_INLINE void* allocMemory(size_t size) noexcept { return ASMJIT_CUSTOM_ALLOC(size); } -static ASMJIT_INLINE void* reallocMemory(void* p, size_t size) noexcept { return ASMJIT_CUSTOM_REALLOC(p, size); } -static ASMJIT_INLINE void releaseMemory(void* p) noexcept { ASMJIT_CUSTOM_FREE(p); } -#elif !defined(ASMJIT_CUSTOM_ALLOC) && \ - !defined(ASMJIT_CUSTOM_REALLOC) && \ - !defined(ASMJIT_CUSTOM_FREE) -static ASMJIT_INLINE void* allocMemory(size_t size) noexcept { return ::malloc(size); } -static ASMJIT_INLINE void* reallocMemory(void* p, size_t size) noexcept { return ::realloc(p, size); } -static ASMJIT_INLINE void releaseMemory(void* p) noexcept { ::free(p); } -#else -# error "[asmjit] You must provide either none or all of ASMJIT_CUSTOM_[ALLOC|REALLOC|FREE]" -#endif - -//! Cast designed to cast between function and void* pointers. -template -static ASMJIT_INLINE Dst ptr_cast(Src p) noexcept { return (Dst)p; } - -} // Internal namespace - -template -static ASMJIT_INLINE Func ptr_as_func(void* func) noexcept { return Internal::ptr_cast(func); } - -template -static ASMJIT_INLINE void* func_as_ptr(Func func) noexcept { return Internal::ptr_cast(func); } - -// ============================================================================ -// [asmjit::DebugUtils] -// ============================================================================ - -namespace DebugUtils { - -//! Returns the error `err` passed. -//! -//! Provided for debugging purposes. Putting a breakpoint inside `errored` can -//! help with tracing the origin of any error reported / returned by AsmJit. -static ASMJIT_INLINE Error errored(Error err) noexcept { return err; } - -//! Get a printable version of `asmjit::Error` code. -ASMJIT_API const char* errorAsString(Error err) noexcept; - -//! Called to output debugging message(s). -ASMJIT_API void debugOutput(const char* str) noexcept; - -//! Called on assertion failure. -//! -//! \param file Source file name where it happened. -//! \param line Line in the source file. -//! \param msg Message to display. -//! -//! If you have problems with assertions put a breakpoint at assertionFailed() -//! function (asmjit/base/globals.cpp) and check the call stack to locate the -//! failing code. -ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept; - -#if defined(ASMJIT_DEBUG) -# define ASMJIT_ASSERT(exp) \ - do { \ - if (ASMJIT_LIKELY(exp)) \ - break; \ - ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #exp); \ - } while (0) -# define ASMJIT_NOT_REACHED() \ - do { \ - ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, \ - "ASMJIT_NOT_REACHED has been reached"); \ - ASMJIT_ASSUME(0); \ - } while (0) -#else -# define ASMJIT_ASSERT(exp) ASMJIT_NOP -# define ASMJIT_NOT_REACHED() ASMJIT_ASSUME(0) -#endif // DEBUG - -//! \internal -//! -//! Used by AsmJit to propagate a possible `Error` produced by `...` to the caller. -#define ASMJIT_PROPAGATE(...) \ - do { \ - ::asmjit::Error _err = __VA_ARGS__; \ - if (ASMJIT_UNLIKELY(_err)) \ - return _err; \ - } while (0) - -} // DebugUtils namespace - -// ============================================================================ -// [asmjit::Init / NoInit] -// ============================================================================ - -#if !defined(ASMJIT_DOCGEN) -struct _Init {}; -static const _Init Init = {}; - -struct _NoInit {}; -static const _NoInit NoInit = {}; -#endif // !ASMJIT_DOCGEN - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_GLOBALS_H diff --git a/src/asmjit/base/inst.cpp b/src/asmjit/base/inst.cpp deleted file mode 100644 index cc5ff39..0000000 --- a/src/asmjit/base/inst.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Guard] -#include "../asmjit_build.h" -#if defined(ASMJIT_BUILD_X86) - -// [Dependencies] -#include "../base/arch.h" -#include "../base/inst.h" - -#if defined(ASMJIT_BUILD_X86) -# include "../x86/x86instimpl_p.h" -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) -# include "../arm/arminstimpl_p.h" -#endif // ASMJIT_BUILD_ARM - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::Inst - Validate] -// ============================================================================ - -#if !defined(ASMJIT_DISABLE_VALIDATION) -Error Inst::validate(uint32_t archType, const Detail& detail, const Operand_* operands, uint32_t count) noexcept { - #if defined(ASMJIT_BUILD_X86) - if (ArchInfo::isX86Family(archType)) - return X86InstImpl::validate(archType, detail, operands, count); - #endif - - #if defined(ASMJIT_BUILD_ARM) - if (ArchInfo::isArmFamily(archType)) - return ArmInstImpl::validate(archType, detail, operands, count); - #endif - - return DebugUtils::errored(kErrorInvalidArch); -} -#endif - -// ============================================================================ -// [asmjit::Inst - CheckFeatures] -// ============================================================================ - -#if !defined(ASMJIT_DISABLE_EXTENSIONS) -Error Inst::checkFeatures(uint32_t archType, const Detail& detail, const Operand_* operands, uint32_t count, CpuFeatures& out) noexcept { - #if defined(ASMJIT_BUILD_X86) - if (ArchInfo::isX86Family(archType)) - return X86InstImpl::checkFeatures(archType, detail, operands, count, out); - #endif - - #if defined(ASMJIT_BUILD_ARM) - if (ArchInfo::isArmFamily(archType)) - return ArmInstImpl::checkFeatures(archType, detail, operands, count, out); - #endif - - return DebugUtils::errored(kErrorInvalidArch); -} -#endif // !defined(ASMJIT_DISABLE_EXTENSIONS) - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // ASMJIT_BUILD_X86 diff --git a/src/asmjit/base/inst.h b/src/asmjit/base/inst.h deleted file mode 100644 index 7bb210a..0000000 --- a/src/asmjit/base/inst.h +++ /dev/null @@ -1,108 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_INST_H -#define _ASMJIT_BASE_INST_H - -// [Dependencies] -#include "../base/cpuinfo.h" -#include "../base/operand.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::Inst] -// ============================================================================ - -//! Definitions and utilities related to instructions used by all architectures. -struct Inst { - ASMJIT_ENUM(Id) { - kIdNone = 0 //!< Invalid or uninitialized instruction id. - }; - - //! Describes an instruction's jump type, if any. - ASMJIT_ENUM(JumpType) { - kJumpTypeNone = 0, //!< Instruction doesn't jump (regular instruction). - kJumpTypeDirect = 1, //!< Instruction is a unconditional (direct) jump. - kJumpTypeConditional = 2, //!< Instruction is a conditional jump. - kJumpTypeCall = 3, //!< Instruction is a function call. - kJumpTypeReturn = 4 //!< Instruction is a function return. - }; - - // -------------------------------------------------------------------------- - // [Detail] - // -------------------------------------------------------------------------- - - //! Instruction id, options, and extraReg packed in a single structure. This - //! structure exists to simplify analysis and validation API that requires a - //! lot of information about the instruction to be processed. - class Detail { - public: - ASMJIT_INLINE Detail() noexcept - : instId(0), - options(0), - extraReg() {} - - explicit ASMJIT_INLINE Detail(uint32_t instId, uint32_t options = 0) noexcept - : instId(instId), - options(options), - extraReg() {} - - ASMJIT_INLINE Detail(uint32_t instId, uint32_t options, const RegOnly& reg) noexcept - : instId(instId), - options(options), - extraReg(reg) {} - - ASMJIT_INLINE Detail(uint32_t instId, uint32_t options, const Reg& reg) noexcept - : instId(instId), - options(options) { extraReg.init(reg); } - - // ------------------------------------------------------------------------ - // [Accessors] - // ------------------------------------------------------------------------ - - ASMJIT_INLINE bool hasExtraReg() const noexcept { return extraReg.isValid(); } - - // ------------------------------------------------------------------------ - // [Members] - // ------------------------------------------------------------------------ - - uint32_t instId; - uint32_t options; - RegOnly extraReg; - }; - - // -------------------------------------------------------------------------- - // [API] - // -------------------------------------------------------------------------- - -#if !defined(ASMJIT_DISABLE_VALIDATION) - //! Validate the given instruction. - ASMJIT_API static Error validate(uint32_t archType, const Detail& detail, const Operand_* operands, uint32_t count) noexcept; -#endif // !ASMJIT_DISABLE_VALIDATION - -#if !defined(ASMJIT_DISABLE_EXTENSIONS) - //! Check CPU features required to execute the given instruction. - ASMJIT_API static Error checkFeatures(uint32_t archType, const Detail& detail, const Operand_* operands, uint32_t count, CpuFeatures& out) noexcept; -#endif // !defined(ASMJIT_DISABLE_EXTENSIONS) -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_INST_H diff --git a/src/asmjit/base/logging.cpp b/src/asmjit/base/logging.cpp deleted file mode 100644 index efb4475..0000000 --- a/src/asmjit/base/logging.cpp +++ /dev/null @@ -1,497 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Guard] -#include "../asmjit_build.h" -#if !defined(ASMJIT_DISABLE_LOGGING) - -// [Dependencies] -#include "../base/codeholder.h" -#include "../base/codeemitter.h" -#include "../base/logging.h" -#include "../base/utils.h" - -#if !defined(ASMJIT_DISABLE_BUILDER) -# include "../base/codebuilder.h" -#endif // !ASMJIT_DISABLE_BUILDER - -#if !defined(ASMJIT_DISABLE_COMPILER) -# include "../base/codecompiler.h" -#else -namespace asmjit { class VirtReg; } -#endif // !ASMJIT_DISABLE_COMPILER - -#if defined(ASMJIT_BUILD_X86) -# include "../x86/x86logging_p.h" -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) -# include "../arm/armlogging_p.h" -#endif // ASMJIT_BUILD_ARM - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::Logger - Construction / Destruction] -// ============================================================================ - -Logger::Logger() noexcept { - _options = 0; - ::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation)); -} -Logger::~Logger() noexcept {} - -// ============================================================================ -// [asmjit::Logger - Logging] -// ============================================================================ - -Error Logger::logf(const char* fmt, ...) noexcept { - Error err; - - va_list ap; - va_start(ap, fmt); - err = logv(fmt, ap); - va_end(ap); - - return err; -} - -Error Logger::logv(const char* fmt, va_list ap) noexcept { - char buf[1024]; - size_t len = vsnprintf(buf, sizeof(buf), fmt, ap); - - if (len >= sizeof(buf)) - len = sizeof(buf) - 1; - return log(buf, len); -} - -Error Logger::logBinary(const void* data, size_t size) noexcept { - static const char prefix[] = ".data "; - static const char hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; - - const uint8_t* s = static_cast(data); - size_t i = size; - - char buffer[128]; - ::memcpy(buffer, prefix, ASMJIT_ARRAY_SIZE(prefix) - 1); - - while (i) { - uint32_t n = static_cast(std::min(i, 16)); - char* p = buffer + ASMJIT_ARRAY_SIZE(prefix) - 1; - - i -= n; - do { - uint32_t c = s[0]; - - p[0] = hex[c >> 4]; - p[1] = hex[c & 15]; - - p += 2; - s += 1; - } while (--n); - - *p++ = '\n'; - ASMJIT_PROPAGATE(log(buffer, (size_t)(p - buffer))); - } - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::Logger - Indentation] -// ============================================================================ - -void Logger::setIndentation(const char* indentation) noexcept { - ::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation)); - if (!indentation) - return; - - size_t length = Utils::strLen(indentation, ASMJIT_ARRAY_SIZE(_indentation) - 1); - ::memcpy(_indentation, indentation, length); -} - -// ============================================================================ -// [asmjit::FileLogger - Construction / Destruction] -// ============================================================================ - -FileLogger::FileLogger(FILE* stream) noexcept : _stream(nullptr) { setStream(stream); } -FileLogger::~FileLogger() noexcept {} - -// ============================================================================ -// [asmjit::FileLogger - Logging] -// ============================================================================ - -Error FileLogger::_log(const char* buf, size_t len) noexcept { - if (!_stream) - return kErrorOk; - - if (len == Globals::kInvalidIndex) - len = strlen(buf); - - fwrite(buf, 1, len, _stream); - return kErrorOk; -} - -// ============================================================================ -// [asmjit::StringLogger - Construction / Destruction] -// ============================================================================ - -StringLogger::StringLogger() noexcept {} -StringLogger::~StringLogger() noexcept {} - -// ============================================================================ -// [asmjit::StringLogger - Logging] -// ============================================================================ - -Error StringLogger::_log(const char* buf, size_t len) noexcept { - return _stringBuilder.appendString(buf, len); -} - -// ============================================================================ -// [asmjit::Logging] -// ============================================================================ - -Error Logging::formatLabel( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - uint32_t labelId) noexcept { - - const LabelEntry* le = emitter->getCode()->getLabelEntry(labelId); - if (ASMJIT_UNLIKELY(!le)) - return sb.appendFormat("InvalidLabel[Id=%u]", static_cast(labelId)); - - if (le->hasName()) { - if (le->hasParent()) { - uint32_t parentId = le->getParentId(); - const LabelEntry* pe = emitter->getCode()->getLabelEntry(parentId); - - if (ASMJIT_UNLIKELY(!pe)) - ASMJIT_PROPAGATE(sb.appendFormat("InvalidLabel[Id=%u]", static_cast(labelId))); - else if (ASMJIT_UNLIKELY(!pe->hasName())) - ASMJIT_PROPAGATE(sb.appendFormat("L%u", Operand::unpackId(parentId))); - else - ASMJIT_PROPAGATE(sb.appendString(pe->getName())); - - ASMJIT_PROPAGATE(sb.appendChar('.')); - } - return sb.appendString(le->getName()); - } - else { - return sb.appendFormat("L%u", Operand::unpackId(labelId)); - } -} - -Error Logging::formatRegister( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - uint32_t archType, - uint32_t regType, - uint32_t regId) noexcept { - -#if defined(ASMJIT_BUILD_X86) - return X86Logging::formatRegister(sb, logOptions, emitter, archType, regType, regId); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - return ArmLogging::formatRegister(sb, logOptions, emitter, archType, regType, regId); -#endif // ASMJIT_BUILD_ARM - - return kErrorInvalidArch; -} - -Error Logging::formatOperand( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - uint32_t archType, - const Operand_& op) noexcept { - -#if defined(ASMJIT_BUILD_X86) - return X86Logging::formatOperand(sb, logOptions, emitter, archType, op); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - return ArmLogging::formatOperand(sb, logOptions, emitter, archType, op); -#endif // ASMJIT_BUILD_ARM - - return kErrorInvalidArch; -} - -Error Logging::formatInstruction( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - uint32_t archType, - const Inst::Detail& detail, const Operand_* opArray, uint32_t opCount) noexcept { - -#if defined(ASMJIT_BUILD_X86) - return X86Logging::formatInstruction(sb, logOptions, emitter, archType, detail, opArray, opCount); -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_ARM) - return ArmLogging::formatInstruction(sb, logOptions, emitter, archType, detail, opArray, opCount); -#endif // ASMJIT_BUILD_ARM - - return kErrorInvalidArch; -} - -#if !defined(ASMJIT_DISABLE_BUILDER) -static Error formatTypeId(StringBuilder& sb, uint32_t typeId) noexcept { - if (typeId == TypeId::kVoid) - return sb.appendString("void"); - - if (!TypeId::isValid(typeId)) - return sb.appendString("unknown"); - - const char* typeName = "unknown"; - uint32_t typeSize = TypeId::sizeOf(typeId); - - uint32_t elementId = TypeId::elementOf(typeId); - switch (elementId) { - case TypeId::kIntPtr : typeName = "intptr" ; break; - case TypeId::kUIntPtr: typeName = "uintptr"; break; - case TypeId::kI8 : typeName = "i8" ; break; - case TypeId::kU8 : typeName = "u8" ; break; - case TypeId::kI16 : typeName = "i16" ; break; - case TypeId::kU16 : typeName = "u16" ; break; - case TypeId::kI32 : typeName = "i32" ; break; - case TypeId::kU32 : typeName = "u32" ; break; - case TypeId::kI64 : typeName = "i64" ; break; - case TypeId::kU64 : typeName = "u64" ; break; - case TypeId::kF32 : typeName = "f32" ; break; - case TypeId::kF64 : typeName = "f64" ; break; - case TypeId::kF80 : typeName = "f80" ; break; - case TypeId::kMask8 : typeName = "mask8" ; break; - case TypeId::kMask16 : typeName = "mask16" ; break; - case TypeId::kMask32 : typeName = "mask32" ; break; - case TypeId::kMask64 : typeName = "mask64" ; break; - case TypeId::kMmx32 : typeName = "mmx32" ; break; - case TypeId::kMmx64 : typeName = "mmx64" ; break; - } - - uint32_t elementSize = TypeId::sizeOf(elementId); - if (typeSize > elementSize) { - unsigned int numElements = typeSize / elementSize; - return sb.appendFormat("%sx%u", typeName, numElements); - } - else { - return sb.appendString(typeName); - } -} - -static Error formatFuncDetailValue( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - FuncDetail::Value value) noexcept { - - uint32_t typeId = value.getTypeId(); - ASMJIT_PROPAGATE(formatTypeId(sb, typeId)); - - if (value.byReg()) { - ASMJIT_PROPAGATE(sb.appendChar(':')); - ASMJIT_PROPAGATE(Logging::formatRegister(sb, logOptions, emitter, emitter->getArchType(), value.getRegType(), value.getRegId())); - } - - if (value.byStack()) { - ASMJIT_PROPAGATE(sb.appendFormat(":[%d]", static_cast(value.getStackOffset()))); - } - - return kErrorOk; -} - -static Error formatFuncRets( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - const FuncDetail& fd, - VirtReg* const* vRegs) noexcept { - - if (!fd.hasRet()) - return sb.appendString("void"); - - for (uint32_t i = 0; i < fd.getRetCount(); i++) { - if (i) ASMJIT_PROPAGATE(sb.appendString(", ")); - ASMJIT_PROPAGATE(formatFuncDetailValue(sb, logOptions, emitter, fd.getRet(i))); - -#if !defined(ASMJIT_DISABLE_COMPILER) - if (vRegs) - ASMJIT_PROPAGATE(sb.appendFormat(" {%s}", vRegs[i]->getName())); -#endif // !ASMJIT_DISABLE_COMPILER - } - - return kErrorOk; -} - -static Error formatFuncArgs( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - const FuncDetail& fd, - VirtReg* const* vRegs) noexcept { - - for (uint32_t i = 0; i < fd.getArgCount(); i++) { - if (i) ASMJIT_PROPAGATE(sb.appendString(", ")); - ASMJIT_PROPAGATE(formatFuncDetailValue(sb, logOptions, emitter, fd.getArg(i))); - -#if !defined(ASMJIT_DISABLE_COMPILER) - if (vRegs) - ASMJIT_PROPAGATE(sb.appendFormat(" {%s}", vRegs[i]->getName())); -#endif // !ASMJIT_DISABLE_COMPILER - } - - return kErrorOk; -} - -Error Logging::formatNode( - StringBuilder& sb, - uint32_t logOptions, - const CodeBuilder* cb, - const CBNode* node_) noexcept { - - if (node_->hasPosition()) - ASMJIT_PROPAGATE(sb.appendFormat("<%04u> ", node_->getPosition())); - - switch (node_->getType()) { - case CBNode::kNodeInst: { - const CBInst* node = node_->as(); - ASMJIT_PROPAGATE( - Logging::formatInstruction(sb, logOptions, cb, - cb->getArchType(), - node->getInstDetail(), node->getOpArray(), node->getOpCount())); - break; - } - - case CBNode::kNodeLabel: { - const CBLabel* node = node_->as(); - ASMJIT_PROPAGATE(sb.appendFormat("L%u:", Operand::unpackId(node->getId()))); - break; - } - - case CBNode::kNodeData: { - const CBData* node = node_->as(); - ASMJIT_PROPAGATE(sb.appendFormat(".embed (%u bytes)", node->getSize())); - break; - } - - case CBNode::kNodeAlign: { - const CBAlign* node = node_->as(); - ASMJIT_PROPAGATE( - sb.appendFormat(".align %u (%s)", - node->getAlignment(), - node->getMode() == kAlignCode ? "code" : "data")); - break; - } - - case CBNode::kNodeComment: { - const CBComment* node = node_->as(); - ASMJIT_PROPAGATE(sb.appendFormat("; %s", node->getInlineComment())); - break; - } - - case CBNode::kNodeSentinel: { - ASMJIT_PROPAGATE(sb.appendString("[sentinel]")); - break; - } - -#if !defined(ASMJIT_DISABLE_COMPILER) - case CBNode::kNodeFunc: { - const CCFunc* node = node_->as(); - ASMJIT_PROPAGATE(formatLabel(sb, logOptions, cb, node->getId())); - - ASMJIT_PROPAGATE(sb.appendString(": [")); - ASMJIT_PROPAGATE(formatFuncRets(sb, logOptions, cb, node->getDetail(), nullptr)); - ASMJIT_PROPAGATE(sb.appendString("]")); - - ASMJIT_PROPAGATE(sb.appendString("(")); - ASMJIT_PROPAGATE(formatFuncArgs(sb, logOptions, cb, node->getDetail(), node->getArgs())); - ASMJIT_PROPAGATE(sb.appendString(")")); - break; - } - - case CBNode::kNodeFuncExit: { - ASMJIT_PROPAGATE(sb.appendString("[ret]")); - break; - } - - case CBNode::kNodeFuncCall: { - const CCFuncCall* node = node_->as(); - ASMJIT_PROPAGATE( - Logging::formatInstruction(sb, logOptions, cb, - cb->getArchType(), - node->getInstDetail(), node->getOpArray(), node->getOpCount())); - break; - } -#endif // !ASMJIT_DISABLE_COMPILER - - default: { - ASMJIT_PROPAGATE(sb.appendFormat("[unknown (type=%u)]", node_->getType())); - break; - } - } - - return kErrorOk; -} -#endif // !ASMJIT_DISABLE_BUILDER - -Error Logging::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept { - size_t currentLen = sb.getLength(); - size_t commentLen = comment ? Utils::strLen(comment, kMaxCommentLength) : 0; - - ASMJIT_ASSERT(binLen >= dispLen); - - if ((binLen != 0 && binLen != Globals::kInvalidIndex) || commentLen) { - size_t align = kMaxInstLength; - char sep = ';'; - - for (size_t i = (binLen == Globals::kInvalidIndex); i < 2; i++) { - size_t begin = sb.getLength(); - - // Append align. - if (currentLen < align) - ASMJIT_PROPAGATE(sb.appendChars(' ', align - currentLen)); - - // Append separator. - if (sep) { - ASMJIT_PROPAGATE(sb.appendChar(sep)); - ASMJIT_PROPAGATE(sb.appendChar(' ')); - } - - // Append binary data or comment. - if (i == 0) { - ASMJIT_PROPAGATE(sb.appendHex(binData, binLen - dispLen - imLen)); - ASMJIT_PROPAGATE(sb.appendChars('.', dispLen * 2)); - ASMJIT_PROPAGATE(sb.appendHex(binData + binLen - imLen, imLen)); - if (commentLen == 0) break; - } - else { - ASMJIT_PROPAGATE(sb.appendString(comment, commentLen)); - } - - currentLen += sb.getLength() - begin; - align += kMaxBinaryLength; - sep = '|'; - } - } - - return sb.appendChar('\n'); -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_LOGGING diff --git a/src/asmjit/base/logging.h b/src/asmjit/base/logging.h deleted file mode 100644 index 609f188..0000000 --- a/src/asmjit/base/logging.h +++ /dev/null @@ -1,288 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_LOGGING_H -#define _ASMJIT_BASE_LOGGING_H - -// [Dependencies] -#include "../base/inst.h" -#include "../base/string.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -#if !defined(ASMJIT_DISABLE_LOGGING) - -// ============================================================================ -// [Forward Declarations] -// ============================================================================ - -class CodeEmitter; -class Reg; -struct Operand_; - -#if !defined(ASMJIT_DISABLE_BUILDER) -class CodeBuilder; -class CBNode; -#endif // !ASMJIT_DISABLE_BUILDER - -// ============================================================================ -// [asmjit::Logger] -// ============================================================================ - -//! Abstract logging interface and helpers. -//! -//! This class can be inherited and reimplemented to fit into your logging -//! subsystem. When reimplementing use `Logger::_log()` method to log into -//! a custom stream. -//! -//! There are two \ref Logger implementations offered by AsmJit: -//! - \ref FileLogger - allows to log into a `FILE*` stream. -//! - \ref StringLogger - logs into a \ref StringBuilder. -class ASMJIT_VIRTAPI Logger { -public: - ASMJIT_NONCOPYABLE(Logger) - - // -------------------------------------------------------------------------- - // [Options] - // -------------------------------------------------------------------------- - - //! Logger options. - ASMJIT_ENUM(Options) { - kOptionBinaryForm = 0x00000001, //! Output instructions also in binary form. - kOptionImmExtended = 0x00000002, //! Output a meaning of some immediates. - kOptionHexImmediate = 0x00000004, //! Output constants in hexadecimal form. - kOptionHexDisplacement = 0x00000008 //! Output displacements in hexadecimal form. - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a `Logger` instance. - ASMJIT_API Logger() noexcept; - //! Destroy the `Logger` instance. - ASMJIT_API virtual ~Logger() noexcept; - - // -------------------------------------------------------------------------- - // [Logging] - // -------------------------------------------------------------------------- - - //! Log `str` - must be reimplemented. - virtual Error _log(const char* str, size_t len) noexcept = 0; - - //! Log a string `str`, which is either null terminated or having `len` length. - ASMJIT_INLINE Error log(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _log(str, len); } - //! Log a content of a `StringBuilder` `str`. - ASMJIT_INLINE Error log(const StringBuilder& str) noexcept { return _log(str.getData(), str.getLength()); } - - //! Format the message by using `sprintf()` and then send to `log()`. - ASMJIT_API Error logf(const char* fmt, ...) noexcept; - //! Format the message by using `vsprintf()` and then send to `log()`. - ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept; - //! Log binary data. - ASMJIT_API Error logBinary(const void* data, size_t size) noexcept; - - // -------------------------------------------------------------------------- - // [Options] - // -------------------------------------------------------------------------- - - //! Get all logger options as a single integer. - ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; } - //! Get the given logger option. - ASMJIT_INLINE bool hasOption(uint32_t option) const noexcept { return (_options & option) != 0; } - ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; } - ASMJIT_INLINE void clearOptions(uint32_t options) noexcept { _options &= ~options; } - - // -------------------------------------------------------------------------- - // [Indentation] - // -------------------------------------------------------------------------- - - //! Get indentation. - ASMJIT_INLINE const char* getIndentation() const noexcept { return _indentation; } - //! Set indentation. - ASMJIT_API void setIndentation(const char* indentation) noexcept; - //! Reset indentation. - ASMJIT_INLINE void resetIndentation() noexcept { setIndentation(nullptr); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Options, see \ref LoggerOption. - uint32_t _options; - - //! Indentation. - char _indentation[12]; -}; - -// ============================================================================ -// [asmjit::FileLogger] -// ============================================================================ - -//! Logger that can log to a `FILE*` stream. -class ASMJIT_VIRTAPI FileLogger : public Logger { -public: - ASMJIT_NONCOPYABLE(FileLogger) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `FileLogger` that logs to a `FILE` stream. - ASMJIT_API FileLogger(FILE* stream = nullptr) noexcept; - //! Destroy the `FileLogger`. - ASMJIT_API virtual ~FileLogger() noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the logging out put stream or null. - ASMJIT_INLINE FILE* getStream() const noexcept { return _stream; } - - //! Set the logging output stream to `stream` or null. - //! - //! NOTE: If the `stream` is null it will disable logging, but it won't - //! stop calling `log()` unless the logger is detached from the - //! \ref Assembler. - ASMJIT_INLINE void setStream(FILE* stream) noexcept { _stream = stream; } - - // -------------------------------------------------------------------------- - // [Logging] - // -------------------------------------------------------------------------- - - ASMJIT_API Error _log(const char* buf, size_t len = Globals::kInvalidIndex) noexcept override; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! C file stream. - FILE* _stream; -}; - -// ============================================================================ -// [asmjit::StringLogger] -// ============================================================================ - -//! Logger that stores everything in an internal string buffer. -class ASMJIT_VIRTAPI StringLogger : public Logger { -public: - ASMJIT_NONCOPYABLE(StringLogger) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create new `StringLogger`. - ASMJIT_API StringLogger() noexcept; - //! Destroy the `StringLogger`. - ASMJIT_API virtual ~StringLogger() noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get `char*` pointer which represents the resulting string. - //! - //! The pointer is owned by `StringLogger`, it can't be modified or freed. - ASMJIT_INLINE const char* getString() const noexcept { return _stringBuilder.getData(); } - //! Clear the resulting string. - ASMJIT_INLINE void clearString() noexcept { _stringBuilder.clear(); } - - //! Get the length of the string returned by `getString()`. - ASMJIT_INLINE size_t getLength() const noexcept { return _stringBuilder.getLength(); } - - // -------------------------------------------------------------------------- - // [Logging] - // -------------------------------------------------------------------------- - - ASMJIT_API Error _log(const char* buf, size_t len = Globals::kInvalidIndex) noexcept override; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Output string. - StringBuilder _stringBuilder; -}; - -// ============================================================================ -// [asmjit::Logging] -// ============================================================================ - -struct Logging { - ASMJIT_API static Error formatRegister( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - uint32_t archType, - uint32_t regType, - uint32_t regId) noexcept; - - ASMJIT_API static Error formatLabel( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - uint32_t labelId) noexcept; - - ASMJIT_API static Error formatOperand( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - uint32_t archType, - const Operand_& op) noexcept; - - ASMJIT_API static Error formatInstruction( - StringBuilder& sb, - uint32_t logOptions, - const CodeEmitter* emitter, - uint32_t archType, - const Inst::Detail& detail, const Operand_* opArray, uint32_t opCount) noexcept; - -#if !defined(ASMJIT_DISABLE_BUILDER) - ASMJIT_API static Error formatNode( - StringBuilder& sb, - uint32_t logOptions, - const CodeBuilder* cb, - const CBNode* node_) noexcept; -#endif // !ASMJIT_DISABLE_BUILDER - -// Only used by AsmJit internals, not available to users. -#if defined(ASMJIT_EXPORTS) - enum { - // Has to be big to be able to hold all metadata compiler can assign to a - // single instruction. - kMaxCommentLength = 512, - kMaxInstLength = 40, - kMaxBinaryLength = 26 - }; - - static Error formatLine( - StringBuilder& sb, - const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept; -#endif // ASMJIT_EXPORTS -}; -#else -class Logger; -#endif // !ASMJIT_DISABLE_LOGGING - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_LOGGER_H diff --git a/src/asmjit/base/misc_p.h b/src/asmjit/base/misc_p.h deleted file mode 100644 index 5024f1c..0000000 --- a/src/asmjit/base/misc_p.h +++ /dev/null @@ -1,74 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_MISC_P_H -#define _ASMJIT_BASE_MISC_P_H - -// [Dependencies] -#include "../asmjit_build.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -//! \internal -//! -//! Macro used to populate a table with 16 elements starting at `I`. -#define ASMJIT_TABLE_16(DEF, I) DEF(I + 0), DEF(I + 1), DEF(I + 2), DEF(I + 3), \ - DEF(I + 4), DEF(I + 5), DEF(I + 6), DEF(I + 7), \ - DEF(I + 8), DEF(I + 9), DEF(I + 10), DEF(I + 11), \ - DEF(I + 12), DEF(I + 13), DEF(I + 14), DEF(I + 15) - -#define ASMJIT_TABLE_T_8(TABLE, VALUE, I) \ - TABLE< I + 0 >::VALUE, TABLE< I + 1 >::VALUE, \ - TABLE< I + 2 >::VALUE, TABLE< I + 3 >::VALUE, \ - TABLE< I + 4 >::VALUE, TABLE< I + 5 >::VALUE, \ - TABLE< I + 6 >::VALUE, TABLE< I + 7 >::VALUE - -#define ASMJIT_TABLE_T_16(TABLE, VALUE, I) \ - ASMJIT_TABLE_T_8(TABLE, VALUE, I), \ - ASMJIT_TABLE_T_8(TABLE, VALUE, I + 8) - -#define ASMJIT_TABLE_T_32(TABLE, VALUE, I) \ - ASMJIT_TABLE_T_16(TABLE, VALUE, I), \ - ASMJIT_TABLE_T_16(TABLE, VALUE, I + 16) - -#define ASMJIT_TABLE_T_64(TABLE, VALUE, I) \ - ASMJIT_TABLE_T_32(TABLE, VALUE, I), \ - ASMJIT_TABLE_T_32(TABLE, VALUE, I + 32) - -#define ASMJIT_TABLE_T_128(TABLE, VALUE, I) \ - ASMJIT_TABLE_T_64(TABLE, VALUE, I), \ - ASMJIT_TABLE_T_64(TABLE, VALUE, I + 64) - -#define ASMJIT_TABLE_T_256(TABLE, VALUE, I) \ - ASMJIT_TABLE_T_128(TABLE, VALUE, I), \ - ASMJIT_TABLE_T_128(TABLE, VALUE, I + 128) - -#define ASMJIT_TABLE_T_512(TABLE, VALUE, I) \ - ASMJIT_TABLE_T_256(TABLE, VALUE, I), \ - ASMJIT_TABLE_T_256(TABLE, VALUE, I + 256) - -#define ASMJIT_TABLE_T_1024(TABLE, VALUE, I) \ - ASMJIT_TABLE_T_512(TABLE, VALUE, I), \ - ASMJIT_TABLE_T_512(TABLE, VALUE, I + 512) - -//! \} - -} // asmjit namespace - -//! \} - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_MISC_P_H diff --git a/src/asmjit/base/operand.cpp b/src/asmjit/base/operand.cpp deleted file mode 100644 index 09eeea8..0000000 --- a/src/asmjit/base/operand.cpp +++ /dev/null @@ -1,209 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/operand.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::TypeId] -// ============================================================================ - -template -struct TypeIdSizeOf_T { - enum { - kValue = (ID == TypeId::kI8 ) ? 1 : - (ID == TypeId::kU8 ) ? 1 : - (ID == TypeId::kI16 ) ? 2 : - (ID == TypeId::kU16 ) ? 2 : - (ID == TypeId::kI32 ) ? 4 : - (ID == TypeId::kU32 ) ? 4 : - (ID == TypeId::kI64 ) ? 8 : - (ID == TypeId::kU64 ) ? 8 : - (ID == TypeId::kF32 ) ? 4 : - (ID == TypeId::kF64 ) ? 8 : - (ID == TypeId::kF80 ) ? 10 : - (ID == TypeId::kMask8 ) ? 1 : - (ID == TypeId::kMask16) ? 2 : - (ID == TypeId::kMask32) ? 4 : - (ID == TypeId::kMask64) ? 8 : - (ID == TypeId::kMmx32 ) ? 4 : - (ID == TypeId::kMmx64 ) ? 8 : - (ID >= TypeId::_kVec32Start && ID <= TypeId::_kVec32End ) ? 4 : - (ID >= TypeId::_kVec64Start && ID <= TypeId::_kVec64End ) ? 8 : - (ID >= TypeId::_kVec128Start && ID <= TypeId::_kVec128End) ? 16 : - (ID >= TypeId::_kVec256Start && ID <= TypeId::_kVec256End) ? 32 : - (ID >= TypeId::_kVec512Start && ID <= TypeId::_kVec512End) ? 64 : 0 - }; -}; - -template -struct TypeIdElementOf_T { - enum { - kValue = (ID == TypeId::kMask8 ) ? TypeId::kU8 : - (ID == TypeId::kMask16) ? TypeId::kU16 : - (ID == TypeId::kMask32) ? TypeId::kU32 : - (ID == TypeId::kMask64) ? TypeId::kU64 : - (ID == TypeId::kMmx32 ) ? TypeId::kI32 : - (ID == TypeId::kMmx64 ) ? TypeId::kI64 : - (ID >= TypeId::kI8 && ID <= TypeId::kF80 ) ? ID : - (ID >= TypeId::_kVec32Start && ID <= TypeId::_kVec32End ) ? ID - TypeId::_kVec32Start + TypeId::kI8 : - (ID >= TypeId::_kVec64Start && ID <= TypeId::_kVec64End ) ? ID - TypeId::_kVec64Start + TypeId::kI8 : - (ID >= TypeId::_kVec128Start && ID <= TypeId::_kVec128End) ? ID - TypeId::_kVec128Start + TypeId::kI8 : - (ID >= TypeId::_kVec256Start && ID <= TypeId::_kVec256End) ? ID - TypeId::_kVec256Start + TypeId::kI8 : - (ID >= TypeId::_kVec512Start && ID <= TypeId::_kVec512End) ? ID - TypeId::_kVec512Start + TypeId::kI8 : 0 - }; -}; - -#define R(TMPL, I) TMPL::kValue, TMPL::kValue, \ - TMPL::kValue, TMPL::kValue, \ - TMPL::kValue, TMPL::kValue, \ - TMPL::kValue, TMPL::kValue, \ - TMPL::kValue, TMPL::kValue, \ - TMPL::kValue, TMPL::kValue, \ - TMPL::kValue, TMPL::kValue, \ - TMPL::kValue, TMPL::kValue -ASMJIT_API const TypeId::Info TypeId::_info = { - // SizeOf[128] - { - R(TypeIdSizeOf_T, 0), R(TypeIdSizeOf_T, 16), - R(TypeIdSizeOf_T, 32), R(TypeIdSizeOf_T, 48), - R(TypeIdSizeOf_T, 64), R(TypeIdSizeOf_T, 80), - R(TypeIdSizeOf_T, 96), R(TypeIdSizeOf_T, 112) - }, - - // ElementOf[128] - { - R(TypeIdElementOf_T, 0), R(TypeIdElementOf_T, 16), - R(TypeIdElementOf_T, 32), R(TypeIdElementOf_T, 48), - R(TypeIdElementOf_T, 64), R(TypeIdElementOf_T, 80), - R(TypeIdElementOf_T, 96), R(TypeIdElementOf_T, 112) - } -}; -#undef R - -// ============================================================================ -// [asmjit::Operand - Test] -// ============================================================================ - -#if defined(ASMJIT_TEST) -UNIT(base_operand) { - INFO("Checking operand sizes"); - EXPECT(sizeof(Operand) == 16); - EXPECT(sizeof(Reg) == 16); - EXPECT(sizeof(Mem) == 16); - EXPECT(sizeof(Imm) == 16); - EXPECT(sizeof(Label) == 16); - - INFO("Checking basic functionality of Operand"); - Operand a, b; - Operand dummy; - - EXPECT(a.isNone() == true); - EXPECT(a.isReg() == false); - EXPECT(a.isMem() == false); - EXPECT(a.isImm() == false); - EXPECT(a.isLabel() == false); - EXPECT(a == b); - - EXPECT(a._any.reserved8_4 == 0, "Default constructed Operand should zero its 'reserved8_4' field"); - EXPECT(a._any.reserved12_4 == 0, "Default constructed Operand should zero its 'reserved12_4' field"); - - INFO("Checking basic functionality of Label"); - Label label; - EXPECT(label.isValid() == false); - EXPECT(label.getId() == 0); - - INFO("Checking basic functionality of Reg"); - EXPECT(Reg().isValid() == false, - "Default constructed Reg() should not be valid"); - EXPECT(Reg()._any.reserved8_4 == 0, - "A default constructed Reg() should zero its 'reserved8_4' field"); - EXPECT(Reg()._any.reserved12_4 == 0, - "A default constructed Reg() should zero its 'reserved12_4' field"); - - EXPECT(Reg().isReg() == false, - "Default constructed register should not isReg()"); - EXPECT(dummy.as().isValid() == false, - "Default constructed Operand casted to Reg should not be valid"); - - // Create some register (not specific to any architecture). - uint32_t rSig = Operand::kOpReg | (1 << Operand::kSignatureRegTypeShift) | - (2 << Operand::kSignatureRegKindShift) | - (8 << Operand::kSignatureSizeShift ) ; - Reg r1(Reg::fromSignature(rSig, 5)); - - EXPECT(r1.isValid() == true); - EXPECT(r1.isReg() == true); - EXPECT(r1.isReg(1) == true); - EXPECT(r1.isPhysReg() == true); - EXPECT(r1.isVirtReg() == false); - EXPECT(r1.getSignature() == rSig); - EXPECT(r1.getType() == 1); - EXPECT(r1.getKind() == 2); - EXPECT(r1.getSize() == 8); - EXPECT(r1.getId() == 5); - EXPECT(r1.isReg(1, 5) == true); // RegType and Id. - - EXPECT(r1._any.reserved8_4 == 0, "Reg should have 'reserved8_4' zero"); - EXPECT(r1._any.reserved12_4 == 0, "Reg should have 'reserved12_4' zero"); - - // The same type of register having different id. - Reg r2(r1, 6); - EXPECT(r2.isValid() == true); - EXPECT(r2.isReg() == true); - EXPECT(r2.isReg(1) == true); - EXPECT(r2.isPhysReg() == true); - EXPECT(r2.isVirtReg() == false); - EXPECT(r2.getSignature() == rSig); - EXPECT(r2.getType() == r1.getType()); - EXPECT(r2.getKind() == r1.getKind()); - EXPECT(r2.getSize() == r1.getSize()); - EXPECT(r2.getId() == 6); - EXPECT(r2.isReg(1, 6) == true); - - r1.reset(); - EXPECT(!r1.isValid(), - "Reset register should not be valid"); - EXPECT(!r1.isReg(), - "Reset register should not isReg()"); - - INFO("Checking basic functionality of Mem"); - Mem m; - EXPECT(m.isMem() , "Default constructed Mem() should isMem()"); - EXPECT(m == Mem() , "Two default constructed Mem() operands should be equal"); - EXPECT(m.hasBase() == false , "Default constructed Mem() should not have base specified"); - EXPECT(m.hasIndex() == false , "Default constructed Mem() should not have index specified"); - EXPECT(m.has64BitOffset() == true , "Default constructed Mem() should report 64-bit offset"); - EXPECT(m.getOffset() == 0 , "Default constructed Mem() should have be zero offset / address"); - - m.setOffset(-1); - EXPECT(m.getOffsetLo32() == -1 , "Memory operand must hold a 32-bit offset"); - EXPECT(m.getOffset() == -1 , "32-bit offset must be sign extended to 64 bits"); - - int64_t x = int64_t(ASMJIT_UINT64_C(0xFF00FF0000000001)); - m.setOffset(x); - EXPECT(m.getOffset() == x , "Memory operand must hold a 64-bit offset"); - EXPECT(m.getOffsetLo32() == 1 , "Memory operand must return correct low offset DWORD"); - EXPECT(m.getOffsetHi32() == 0xFF00FF00, "Memory operand must return correct high offset DWORD"); - - INFO("Checking basic functionality of Imm"); - EXPECT(Imm(-1).getInt64() == int64_t(-1), - "Immediate values should by default sign-extend to 64-bits"); -} -#endif // ASMJIT_TEST - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/operand.h b/src/asmjit/base/operand.h deleted file mode 100644 index eaa2c98..0000000 --- a/src/asmjit/base/operand.h +++ /dev/null @@ -1,1570 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_OPERAND_H -#define _ASMJIT_BASE_OPERAND_H - -// [Dependencies] -#include "../base/utils.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::Operand_] -// ============================================================================ - -//! Constructor-less \ref Operand. -//! -//! Contains no initialization code and can be used safely to define an array -//! of operands that won't be initialized. This is a \ref Operand compatible -//! data structure designed to be statically initialized or `static const`. -struct Operand_ { - // -------------------------------------------------------------------------- - // [Operand Type] - // -------------------------------------------------------------------------- - - //! Operand types that can be encoded in \ref Operand. - ASMJIT_ENUM(OpType) { - kOpNone = 0, //!< Not an operand or not initialized. - kOpReg = 1, //!< Operand is a register. - kOpMem = 2, //!< Operand is a memory. - kOpImm = 3, //!< Operand is an immediate value. - kOpLabel = 4 //!< Operand is a label. - }; - - // -------------------------------------------------------------------------- - // [Operand Signature (Bits)] - // -------------------------------------------------------------------------- - - ASMJIT_ENUM(SignatureBits) { - // Operand type (3 least significant bits). - // |........|........|........|.....XXX| - kSignatureOpShift = 0, - kSignatureOpBits = 0x07U, - kSignatureOpMask = kSignatureOpBits << kSignatureOpShift, - - // Operand size (8 most significant bits). - // |XXXXXXXX|........|........|........| - kSignatureSizeShift = 24, - kSignatureSizeBits = 0xFFU, - kSignatureSizeMask = kSignatureSizeBits << kSignatureSizeShift, - - // Register type (5 bits). - // |........|........|........|XXXXX...| - kSignatureRegTypeShift = 3, - kSignatureRegTypeBits = 0x1FU, - kSignatureRegTypeMask = kSignatureRegTypeBits << kSignatureRegTypeShift, - - // Register kind (4 bits). - // |........|........|....XXXX|........| - kSignatureRegKindShift = 8, - kSignatureRegKindBits = 0x0FU, - kSignatureRegKindMask = kSignatureRegKindBits << kSignatureRegKindShift, - - // Memory base type (5 bits). - // |........|........|........|XXXXX...| - kSignatureMemBaseTypeShift = 3, - kSignatureMemBaseTypeBits = 0x1FU, - kSignatureMemBaseTypeMask = kSignatureMemBaseTypeBits << kSignatureMemBaseTypeShift, - - // Memory index type (5 bits). - // |........|........|...XXXXX|........| - kSignatureMemIndexTypeShift = 8, - kSignatureMemIndexTypeBits = 0x1FU, - kSignatureMemIndexTypeMask = kSignatureMemIndexTypeBits << kSignatureMemIndexTypeShift, - - // Memory base+index combined (10 bits). - // |........|........|...XXXXX|XXXXX...| - kSignatureMemBaseIndexShift = 3, - kSignatureMemBaseIndexBits = 0x3FFU, - kSignatureMemBaseIndexMask = kSignatureMemBaseIndexBits << kSignatureMemBaseIndexShift, - - // Memory should be encoded as absolute immediate (X86|X64). - // |........|........|.XX.....|........| - kSignatureMemAddrTypeShift = 13, - kSignatureMemAddrTypeBits = 0x03U, - kSignatureMemAddrTypeMask = kSignatureMemAddrTypeBits << kSignatureMemAddrTypeShift, - - // This memory operand represents a function argument's stack location (CodeCompiler) - // |........|........|.X......|........| - kSignatureMemArgHomeShift = 15, - kSignatureMemArgHomeBits = 0x01U, - kSignatureMemArgHomeFlag = kSignatureMemArgHomeBits << kSignatureMemArgHomeShift, - - // This memory operand represents a virtual register's home-slot (CodeCompiler). - // |........|........|X.......|........| - kSignatureMemRegHomeShift = 16, - kSignatureMemRegHomeBits = 0x01U, - kSignatureMemRegHomeFlag = kSignatureMemRegHomeBits << kSignatureMemRegHomeShift - }; - - // -------------------------------------------------------------------------- - // [Operand Id] - // -------------------------------------------------------------------------- - - //! Operand id helpers useful for id <-> index translation. - ASMJIT_ENUM(PackedId) { - //! Minimum valid packed-id. - kPackedIdMin = 0x00000100U, - //! Maximum valid packed-id. - kPackedIdMax = 0xFFFFFFFFU, - //! Count of valid packed-ids. - kPackedIdCount = kPackedIdMax - kPackedIdMin + 1 - }; - - // -------------------------------------------------------------------------- - // [Operand Utilities] - // -------------------------------------------------------------------------- - - //! Get if the given `id` is a valid packed-id that can be used by Operand. - //! Packed ids are those equal or greater than `kPackedIdMin` and lesser or - //! equal to `kPackedIdMax`. This concept was created to support virtual - //! registers and to make them distinguishable from physical ones. It allows - //! a single uint32_t to contain either physical register id or virtual - //! register id represented as `packed-id`. This concept is used also for - //! labels to make the API consistent. - static ASMJIT_INLINE bool isPackedId(uint32_t id) noexcept { return id - kPackedIdMin < kPackedIdCount; } - //! Convert a real-id into a packed-id that can be stored in Operand. - static ASMJIT_INLINE uint32_t packId(uint32_t id) noexcept { return id + kPackedIdMin; } - //! Convert a packed-id back to real-id. - static ASMJIT_INLINE uint32_t unpackId(uint32_t id) noexcept { return id - kPackedIdMin; } - - // -------------------------------------------------------------------------- - // [Operand Data] - // -------------------------------------------------------------------------- - - //! Any operand. - struct AnyData { - uint32_t signature; //!< Type of the operand (see \ref OpType) and other data. - uint32_t id; //!< Operand id or `0`. - uint32_t reserved8_4; //!< \internal - uint32_t reserved12_4; //!< \internal - }; - - //! Register operand data. - struct RegData { - uint32_t signature; //!< Type of the operand (always \ref kOpReg) and other data. - uint32_t id; //!< Physical or virtual register id. - uint32_t reserved8_4; //!< \internal - uint32_t reserved12_4; //!< \internal - }; - - //! Memory operand data. - struct MemData { - uint32_t signature; //!< Type of the operand (always \ref kOpMem) and other data. - uint32_t index; //!< INDEX register id or `0`. - - // [BASE + OFF32] vs just [OFF64]. - union { - uint64_t offset64; //!< 64-bit offset, combining low and high 32-bit parts. - struct { -#if ASMJIT_ARCH_LE - uint32_t offsetLo32; //!< 32-bit low offset part. - uint32_t base; //!< 32-bit high offset part or BASE. -#else - uint32_t base; //!< 32-bit high offset part or BASE. - uint32_t offsetLo32; //!< 32-bit low offset part. -#endif - }; - }; - }; - - //! Immediate operand data. - struct ImmData { - uint32_t signature; //!< Type of the operand (always \ref kOpImm) and other data. - uint32_t id; //!< Immediate id (always `0`). - UInt64 value; //!< Immediate value. - }; - - //! Label operand data. - struct LabelData { - uint32_t signature; //!< Type of the operand (always \ref kOpLabel) and other data. - uint32_t id; //!< Label id (`0` if not initialized). - uint32_t reserved8_4; //!< \internal - uint32_t reserved12_4; //!< \internal - }; - - // -------------------------------------------------------------------------- - // [Init & Copy] - // -------------------------------------------------------------------------- - - //! \internal - //! - //! Initialize the operand to `other` (used by constructors). - ASMJIT_INLINE void _init(const Operand_& other) noexcept { ::memcpy(this, &other, sizeof(Operand_)); } - - //! \internal - ASMJIT_INLINE void _initReg(uint32_t signature, uint32_t rd) { - _init_packed_d0_d1(signature, rd); - _init_packed_d2_d3(0, 0); - } - - //! \internal - ASMJIT_INLINE void _init_packed_d0_d1(uint32_t d0, uint32_t d1) noexcept { _packed[0].setPacked_2x32(d0, d1); } - //! \internal - ASMJIT_INLINE void _init_packed_d2_d3(uint32_t d2, uint32_t d3) noexcept { _packed[1].setPacked_2x32(d2, d3); } - - //! \internal - //! - //! Initialize the operand from `other` (used by operator overloads). - ASMJIT_INLINE void copyFrom(const Operand_& other) noexcept { ::memcpy(this, &other, sizeof(Operand_)); } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get if the operand matches the given signature `sign`. - ASMJIT_INLINE bool hasSignature(uint32_t signature) const noexcept { return _signature == signature; } - - //! Get if the operand matches a signature of the `other` operand. - ASMJIT_INLINE bool hasSignature(const Operand_& other) const noexcept { - return _signature == other.getSignature(); - } - - //! Get a 32-bit operand signature. - //! - //! Signature is first 4 bytes of the operand data. It's used mostly for - //! operand checking as it's much faster to check 4 bytes at once than having - //! to check these bytes individually. - ASMJIT_INLINE uint32_t getSignature() const noexcept { return _signature; } - - //! Set the operand signature (see \ref getSignature). - //! - //! Improper use of `setSignature()` can lead to hard-to-debug errors. - ASMJIT_INLINE void setSignature(uint32_t signature) noexcept { _signature = signature; } - - ASMJIT_INLINE bool _hasSignatureData(uint32_t bits) const noexcept { return (_signature & bits) != 0; } - - //! \internal - //! - //! Unpacks information from operand's signature. - ASMJIT_INLINE uint32_t _getSignatureData(uint32_t bits, uint32_t shift) const noexcept { return (_signature >> shift) & bits; } - - //! \internal - //! - //! Packs information to operand's signature. - ASMJIT_INLINE void _setSignatureData(uint32_t value, uint32_t bits, uint32_t shift) noexcept { - ASMJIT_ASSERT(value <= bits); - _signature = (_signature & ~(bits << shift)) | (value << shift); - } - - ASMJIT_INLINE void _addSignatureData(uint32_t data) noexcept { _signature |= data; } - - //! Clears specified bits in operand's signature. - ASMJIT_INLINE void _clearSignatureData(uint32_t bits, uint32_t shift) noexcept { _signature &= ~(bits << shift); } - - //! Get type of the operand, see \ref OpType. - ASMJIT_INLINE uint32_t getOp() const noexcept { return _getSignatureData(kSignatureOpBits, kSignatureOpShift); } - //! Get if the operand is none (\ref kOpNone). - ASMJIT_INLINE bool isNone() const noexcept { return getOp() == 0; } - //! Get if the operand is a register (\ref kOpReg). - ASMJIT_INLINE bool isReg() const noexcept { return getOp() == kOpReg; } - //! Get if the operand is a memory location (\ref kOpMem). - ASMJIT_INLINE bool isMem() const noexcept { return getOp() == kOpMem; } - //! Get if the operand is an immediate (\ref kOpImm). - ASMJIT_INLINE bool isImm() const noexcept { return getOp() == kOpImm; } - //! Get if the operand is a label (\ref kOpLabel). - ASMJIT_INLINE bool isLabel() const noexcept { return getOp() == kOpLabel; } - - //! Get if the operand is a physical register. - ASMJIT_INLINE bool isPhysReg() const noexcept { return isReg() && _reg.id < Globals::kInvalidRegId; } - //! Get if the operand is a virtual register. - ASMJIT_INLINE bool isVirtReg() const noexcept { return isReg() && isPackedId(_reg.id); } - - //! Get if the operand specifies a size (i.e. the size is not zero). - ASMJIT_INLINE bool hasSize() const noexcept { return _hasSignatureData(kSignatureSizeMask); } - //! Get if the size of the operand matches `size`. - ASMJIT_INLINE bool hasSize(uint32_t size) const noexcept { return getSize() == size; } - - //! Get size of the operand (in bytes). - //! - //! The value returned depends on the operand type: - //! * None - Should always return zero size. - //! * Reg - Should always return the size of the register. If the register - //! size depends on architecture (like \ref X86CReg and \ref X86DReg) - //! the size returned should be the greatest possible (so it should - //! return 64-bit size in such case). - //! * Mem - Size is optional and will be in most cases zero. - //! * Imm - Should always return zero size. - //! * Label - Should always return zero size. - ASMJIT_INLINE uint32_t getSize() const noexcept { return _getSignatureData(kSignatureSizeBits, kSignatureSizeShift); } - - //! Get the operand id. - //! - //! The value returned should be interpreted accordingly to the operand type: - //! * None - Should be `0`. - //! * Reg - Physical or virtual register id. - //! * Mem - Multiple meanings - BASE address (register or label id), or - //! high value of a 64-bit absolute address. - //! * Imm - Should be `0`. - //! * Label - Label id if it was created by using `newLabel()` or `0` - //! if the label is invalid or uninitialized. - ASMJIT_INLINE uint32_t getId() const noexcept { return _any.id; } - - //! Get if the operand is 100% equal to `other`. - ASMJIT_INLINE bool isEqual(const Operand_& other) const noexcept { - return (_packed[0] == other._packed[0]) & - (_packed[1] == other._packed[1]) ; - } - - //! Get if the operand is a register matching `rType`. - ASMJIT_INLINE bool isReg(uint32_t rType) const noexcept { - const uint32_t kMsk = (kSignatureOpBits << kSignatureOpShift) | (kSignatureRegTypeBits << kSignatureRegTypeShift); - const uint32_t kSgn = (kOpReg << kSignatureOpShift) | (rType << kSignatureRegTypeShift); - return (_signature & kMsk) == kSgn; - } - - //! Get whether the operand is register and of `type` and `id`. - ASMJIT_INLINE bool isReg(uint32_t rType, uint32_t rId) const noexcept { - return isReg(rType) && getId() == rId; - } - - //! Get whether the operand is a register or memory. - ASMJIT_INLINE bool isRegOrMem() const noexcept { - ASMJIT_ASSERT(kOpMem - kOpReg == 1); - return Utils::inInterval(getOp(), kOpReg, kOpMem); - } - - //! Cast this operand to `T` type. - template - ASMJIT_INLINE T& as() noexcept { return static_cast(*this); } - //! Cast this operand to `T` type (const). - template - ASMJIT_INLINE const T& as() const noexcept { return static_cast(*this); } - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - //! Reset the `Operand` to none. - //! - //! None operand is defined the following way: - //! - Its signature is zero (kOpNone, and the rest zero as well). - //! - Its id is `0`. - //! - The reserved8_4 field is set to `0`. - //! - The reserved12_4 field is set to zero. - //! - //! In other words, reset operands have all members set to zero. Reset operand - //! must match the Operand state right after its construction. Alternatively, - //! if you have an array of operands, you can simply use `memset()`. - //! - //! ``` - //! using namespace asmjit; - //! - //! Operand a; - //! Operand b; - //! assert(a == b); - //! - //! b = x86::eax; - //! assert(a != b); - //! - //! b.reset(); - //! assert(a == b); - //! - //! memset(&b, 0, sizeof(Operand)); - //! assert(a == b); - //! ``` - ASMJIT_INLINE void reset() noexcept { - _init_packed_d0_d1(kOpNone, 0); - _init_packed_d2_d3(0, 0); - } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - template - ASMJIT_INLINE bool operator==(const T& other) const noexcept { return isEqual(other); } - template - ASMJIT_INLINE bool operator!=(const T& other) const noexcept { return !isEqual(other); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - union { - AnyData _any; //!< Generic data. - RegData _reg; //!< Physical or virtual register data. - MemData _mem; //!< Memory address data. - ImmData _imm; //!< Immediate value data. - LabelData _label; //!< Label data. - - uint32_t _signature; //!< Operand signature (first 32-bits). - UInt64 _packed[2]; //!< Operand packed into two 64-bit integers. - }; -}; - -// ============================================================================ -// [asmjit::Operand] -// ============================================================================ - -//! Operand can contain register, memory location, immediate, or label. -class Operand : public Operand_ { -public: - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create an uninitialized operand. - ASMJIT_INLINE Operand() noexcept { reset(); } - //! Create a reference to `other` operand. - ASMJIT_INLINE Operand(const Operand& other) noexcept { _init(other); } - //! Create a reference to `other` operand. - explicit ASMJIT_INLINE Operand(const Operand_& other) noexcept { _init(other); } - //! Create a completely uninitialized operand (dangerous). - explicit ASMJIT_INLINE Operand(const _NoInit&) noexcept {} - - // -------------------------------------------------------------------------- - // [Clone] - // -------------------------------------------------------------------------- - - //! Clone the `Operand`. - ASMJIT_INLINE Operand clone() const noexcept { return Operand(*this); } - - ASMJIT_INLINE Operand& operator=(const Operand_& other) noexcept { copyFrom(other); return *this; } -}; - -// ============================================================================ -// [asmjit::Label] -// ============================================================================ - -//! Label (jump target or data location). -//! -//! Label represents a location in code typically used as a jump target, but -//! may be also a reference to some data or a static variable. Label has to be -//! explicitly created by CodeEmitter. -//! -//! Example of using labels: -//! -//! ~~~ -//! // Create a CodeEmitter (for example X86Assembler). -//! X86Assembler a; -//! -//! // Create Label instance. -//! Label L1 = a.newLabel(); -//! -//! // ... your code ... -//! -//! // Using label. -//! a.jump(L1); -//! -//! // ... your code ... -//! -//! // Bind label to the current position, see `CodeEmitter::bind()`. -//! a.bind(L1); -//! ~~~ -class Label : public Operand { -public: - //! Type of the Label. - enum Type { - kTypeAnonymous = 0, //!< Anonymous (unnamed) label. - kTypeLocal = 1, //!< Local label (always has parentId). - kTypeGlobal = 2, //!< Global label (never has parentId). - kTypeCount = 3 //!< Number of label types. - }; - - // TODO: Find a better place, find a better name. - enum { - //! Label tag is used as a sub-type, forming a unique signature across all - //! operand types as 0x1 is never associated with any register (reg-type). - //! This means that a memory operand's BASE register can be constructed - //! from virtually any operand (register vs. label) by just assigning its - //! type (reg type or label-tag) and operand id. - kLabelTag = 0x1 - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create new, unassociated label. - ASMJIT_INLINE Label() noexcept : Operand(NoInit) { reset(); } - //! Create a reference to another label. - ASMJIT_INLINE Label(const Label& other) noexcept : Operand(other) {} - - explicit ASMJIT_INLINE Label(uint32_t id) noexcept : Operand(NoInit) { - _init_packed_d0_d1(kOpLabel, id); - _init_packed_d2_d3(0, 0); - } - - explicit ASMJIT_INLINE Label(const _NoInit&) noexcept : Operand(NoInit) {} - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - // TODO: I think that if operand is reset it shouldn't say it's a Label, it - // should be none like all other operands. - ASMJIT_INLINE void reset() noexcept { - _init_packed_d0_d1(kOpLabel, 0); - _init_packed_d2_d3(0, 0); - } - - // -------------------------------------------------------------------------- - // [Label Specific] - // -------------------------------------------------------------------------- - - //! Get if the label was created by CodeEmitter and has an assigned id. - ASMJIT_INLINE bool isValid() const noexcept { return _label.id != 0; } - //! Set label id. - ASMJIT_INLINE void setId(uint32_t id) { _label.id = id; } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE Label& operator=(const Label& other) noexcept { copyFrom(other); return *this; } -}; - -// ============================================================================ -// [asmjit::Reg] -// ============================================================================ - -#define ASMJIT_DEFINE_REG_TRAITS(TRAITS_T, REG_T, TYPE, KIND, SIZE, COUNT, TYPE_ID) \ -template<> \ -struct TRAITS_T < TYPE > { \ - typedef REG_T Reg; \ - \ - enum { \ - kValid = 1, \ - kCount = COUNT, \ - kTypeId = TYPE_ID, \ - \ - kType = TYPE, \ - kKind = KIND, \ - kSize = SIZE, \ - kSignature = (Operand::kOpReg << Operand::kSignatureOpShift ) | \ - (kType << Operand::kSignatureRegTypeShift) | \ - (kKind << Operand::kSignatureRegKindShift) | \ - (kSize << Operand::kSignatureSizeShift ) \ - }; \ -} \ - -#define ASMJIT_DEFINE_ABSTRACT_REG(REG_T, BASE_T) \ -public: \ - /*! Default constructor doesn't setup anything, it's like `Operand()`. */ \ - ASMJIT_INLINE REG_T() ASMJIT_NOEXCEPT \ - : BASE_T() {} \ - \ - /*! Copy the `other` REG_T register operand. */ \ - ASMJIT_INLINE REG_T(const REG_T& other) ASMJIT_NOEXCEPT \ - : BASE_T(other) {} \ - \ - /*! Copy the `other` REG_T register operand having its id set to `rId` */ \ - ASMJIT_INLINE REG_T(const Reg& other, uint32_t rId) ASMJIT_NOEXCEPT \ - : BASE_T(other, rId) {} \ - \ - /*! Create a REG_T register operand based on `signature` and `rId`. */ \ - ASMJIT_INLINE REG_T(const _Init& init, uint32_t signature, uint32_t rId) ASMJIT_NOEXCEPT \ - : BASE_T(init, signature, rId) {} \ - \ - /*! Create a completely uninitialized REG_T register operand (garbage). */ \ - explicit ASMJIT_INLINE REG_T(const _NoInit&) ASMJIT_NOEXCEPT \ - : BASE_T(NoInit) {} \ - \ - /*! Clone the register operand. */ \ - ASMJIT_INLINE REG_T clone() const ASMJIT_NOEXCEPT { return REG_T(*this); } \ - \ - /*! Create a new register from register type and id. */ \ - static ASMJIT_INLINE REG_T fromTypeAndId(uint32_t rType, uint32_t rId) ASMJIT_NOEXCEPT { \ - return REG_T(Init, signatureOf(rType), rId); \ - } \ - \ - /*! Create a new register from signature and id. */ \ - static ASMJIT_INLINE REG_T fromSignature(uint32_t signature, uint32_t rId) ASMJIT_NOEXCEPT { \ - return REG_T(Init, signature, rId); \ - } \ - \ - ASMJIT_INLINE REG_T& operator=(const REG_T& other) ASMJIT_NOEXCEPT { \ - copyFrom(other); return *this; \ - } - -#define ASMJIT_DEFINE_FINAL_REG(REG_T, BASE_T, TRAITS_T) \ - ASMJIT_DEFINE_ABSTRACT_REG(REG_T, BASE_T) \ - \ - /*! Create a REG_T register with `id`. */ \ - explicit ASMJIT_INLINE REG_T(uint32_t rId) ASMJIT_NOEXCEPT \ - : BASE_T(Init, kSignature, rId) {} \ - \ - enum { \ - kThisType = TRAITS_T::kType, \ - kThisKind = TRAITS_T::kKind, \ - kThisSize = TRAITS_T::kSize, \ - kSignature = TRAITS_T::kSignature \ - }; - -//! Structure that contains core register information. -//! -//! This information is compatible with operand's signature (32-bit integer) -//! and `RegInfo` just provides easy way to access it. -struct RegInfo { - ASMJIT_INLINE uint32_t getSignature() const noexcept { - return _signature; - } - - ASMJIT_INLINE uint32_t getOp() const noexcept { - return (_signature >> Operand::kSignatureOpShift) & Operand::kSignatureOpBits; - } - - ASMJIT_INLINE uint32_t getType() const noexcept { - return (_signature >> Operand::kSignatureRegTypeShift) & Operand::kSignatureRegTypeBits; - } - - ASMJIT_INLINE uint32_t getKind() const noexcept { - return (_signature >> Operand::kSignatureRegKindShift) & Operand::kSignatureRegKindBits; - } - - ASMJIT_INLINE uint32_t getSize() const noexcept { - return (_signature >> Operand::kSignatureSizeShift) & Operand::kSignatureSizeBits; - } - - uint32_t _signature; -}; - -//! Physical/Virtual register operand. -class Reg : public Operand { -public: - //! Architecture neutral register types. - //! - //! These must be reused by any platform that contains that types. All GP - //! and VEC registers are also allowed by design to be part of a BASE|INDEX - //! of a memory operand. - ASMJIT_ENUM(RegType) { - kRegNone = 0, //!< No register - unused, invalid, multiple meanings. - // (1 is used as a LabelTag) - kRegGp8Lo = 2, //!< 8-bit low general purpose register (X86). - kRegGp8Hi = 3, //!< 8-bit high general purpose register (X86). - kRegGp16 = 4, //!< 16-bit general purpose register (X86). - kRegGp32 = 5, //!< 32-bit general purpose register (X86|ARM). - kRegGp64 = 6, //!< 64-bit general purpose register (X86|ARM). - kRegVec32 = 7, //!< 32-bit view of a vector register (ARM). - kRegVec64 = 8, //!< 64-bit view of a vector register (ARM). - kRegVec128 = 9, //!< 128-bit view of a vector register (X86|ARM). - kRegVec256 = 10, //!< 256-bit view of a vector register (X86). - kRegVec512 = 11, //!< 512-bit view of a vector register (X86). - kRegVec1024 = 12, //!< 1024-bit view of a vector register (future). - kRegVec2048 = 13, //!< 2048-bit view of a vector register (future). - kRegIP = 14, //!< Universal id of IP/PC register (if separate). - kRegCustom = 15, //!< Start of platform dependent register types (must be honored). - kRegMax = 31 //!< Maximum possible register id of all architectures. - }; - - //! Architecture neutral register kinds. - ASMJIT_ENUM(Kind) { - kKindGp = 0, //!< General purpose register (X86|ARM). - kKindVec = 1, //!< Vector register (X86|ARM). - kKindMax = 15 //!< Maximum possible register kind of all architectures. - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a dummy register operand. - ASMJIT_INLINE Reg() noexcept : Operand() {} - //! Create a new register operand which is the same as `other` . - ASMJIT_INLINE Reg(const Reg& other) noexcept : Operand(other) {} - //! Create a new register operand compatible with `other`, but with a different `rId`. - ASMJIT_INLINE Reg(const Reg& other, uint32_t rId) noexcept : Operand(NoInit) { - _init_packed_d0_d1(other._signature, rId); - _packed[1] = other._packed[1]; - } - - //! Create a register initialized to `signature` and `rId`. - ASMJIT_INLINE Reg(const _Init&, uint32_t signature, uint32_t rId) noexcept : Operand(NoInit) { - _initReg(signature, rId); - } - explicit ASMJIT_INLINE Reg(const _NoInit&) noexcept : Operand(NoInit) {} - - //! Create a new register based on `signature` and `rId`. - static ASMJIT_INLINE Reg fromSignature(uint32_t signature, uint32_t rId) noexcept { return Reg(Init, signature, rId); } - - // -------------------------------------------------------------------------- - // [Reg Specific] - // -------------------------------------------------------------------------- - - //! Get if the register is valid (either virtual or physical). - ASMJIT_INLINE bool isValid() const noexcept { return _signature != 0; } - //! Get if this is a physical register. - ASMJIT_INLINE bool isPhysReg() const noexcept { return _reg.id < Globals::kInvalidRegId; } - //! Get if this is a virtual register (used by \ref CodeCompiler). - ASMJIT_INLINE bool isVirtReg() const noexcept { return isPackedId(_reg.id); } - - //! Get if this register is the same as `other`. - //! - //! This is just an optimization. Registers by default only use the first - //! 8 bytes of the Operand, so this method takes advantage of this knowledge - //! and only compares these 8 bytes. If both operands were created correctly - //! then `isEqual()` and `isSame()` should give the same answer, however, if - //! some operands contains a garbage or other metadata in the upper 8 bytes - //! then `isSame()` may return `true` in cases where `isEqual()` returns - //! false. However. no such case is known at the moment. - ASMJIT_INLINE bool isSame(const Reg& other) const noexcept { return _packed[0] == other._packed[0]; } - - //! Get if the register type matches `rType` - same as `isReg(rType)`, provided for convenience. - ASMJIT_INLINE bool isType(uint32_t rType) const noexcept { return (_signature & kSignatureRegTypeMask) == (rType << kSignatureRegTypeShift); } - //! Get if the register kind matches `rKind`. - ASMJIT_INLINE bool isKind(uint32_t rKind) const noexcept { return (_signature & kSignatureRegKindMask) == (rKind << kSignatureRegKindShift); } - - //! Get if the register is a general purpose register (any size). - ASMJIT_INLINE bool isGp() const noexcept { return isKind(kKindGp); } - //! Get if the register is a vector register. - ASMJIT_INLINE bool isVec() const noexcept { return isKind(kKindVec); } - - using Operand_::isReg; - - //! Same as `isType()`, provided for convenience. - ASMJIT_INLINE bool isReg(uint32_t rType) const noexcept { return isType(rType); } - //! Get if the register type matches `type` and register id matches `rId`. - ASMJIT_INLINE bool isReg(uint32_t rType, uint32_t rId) const noexcept { return isType(rType) && getId() == rId; } - - //! Get the register type. - ASMJIT_INLINE uint32_t getType() const noexcept { return _getSignatureData(kSignatureRegTypeBits, kSignatureRegTypeShift); } - //! Get the register kind. - ASMJIT_INLINE uint32_t getKind() const noexcept { return _getSignatureData(kSignatureRegKindBits, kSignatureRegKindShift); } - - //! Clone the register operand. - ASMJIT_INLINE Reg clone() const noexcept { return Reg(*this); } - - //! Cast this register to `RegT` by also changing its signature. - //! - //! NOTE: Improper use of `cloneAs()` can lead to hard-to-debug errors. - template - ASMJIT_INLINE RegT cloneAs() const noexcept { return RegT(Init, RegT::kSignature, getId()); } - - //! Cast this register to `other` by also changing its signature. - //! - //! NOTE: Improper use of `cloneAs()` can lead to hard-to-debug errors. - template - ASMJIT_INLINE RegT cloneAs(const RegT& other) const noexcept { return RegT(Init, other.getSignature(), getId()); } - - //! Set the register id to `id`. - ASMJIT_INLINE void setId(uint32_t rId) noexcept { _reg.id = rId; } - - //! Set a 32-bit operand signature based on traits of `RegT`. - template - ASMJIT_INLINE void setSignatureT() noexcept { _signature = RegT::kSignature; } - - //! Set register's `signature` and `rId`. - ASMJIT_INLINE void setSignatureAndId(uint32_t signature, uint32_t rId) noexcept { - _signature = signature; - _reg.id = rId; - } - - // -------------------------------------------------------------------------- - // [Reg Statics] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE bool isGp(const Operand_& op) noexcept { - // Check operand type and register kind. Not interested in register type and size. - const uint32_t kSgn = (kOpReg << kSignatureOpShift ) | - (kKindGp << kSignatureRegKindShift) ; - return (op.getSignature() & (kSignatureOpMask | kSignatureRegKindMask)) == kSgn; - } - - //! Get if the `op` operand is either a low or high 8-bit GPB register. - static ASMJIT_INLINE bool isVec(const Operand_& op) noexcept { - // Check operand type and register kind. Not interested in register type and size. - const uint32_t kSgn = (kOpReg << kSignatureOpShift ) | - (kKindVec << kSignatureRegKindShift) ; - return (op.getSignature() & (kSignatureOpMask | kSignatureRegKindMask)) == kSgn; - } - - static ASMJIT_INLINE bool isGp(const Operand_& op, uint32_t rId) noexcept { return isGp(op) & (op.getId() == rId); } - static ASMJIT_INLINE bool isVec(const Operand_& op, uint32_t rId) noexcept { return isVec(op) & (op.getId() == rId); } -}; - -// ============================================================================ -// [asmjit::RegOnly] -// ============================================================================ - -//! RegOnly is 8-byte version of `Reg` that only allows to store either `Reg` -//! or nothing. This class was designed to decrease the space consumed by each -//! extra "operand" in `CodeEmitter` and `CBInst` classes. -struct RegOnly { - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - //! Initialize the `RegOnly` instance to hold register `signature` and `id`. - ASMJIT_INLINE void init(uint32_t signature, uint32_t id) noexcept { - _signature = signature; - _id = id; - } - - ASMJIT_INLINE void init(const Reg& reg) noexcept { init(reg.getSignature(), reg.getId()); } - ASMJIT_INLINE void init(const RegOnly& reg) noexcept { init(reg.getSignature(), reg.getId()); } - - //! Reset the `RegOnly` to none. - ASMJIT_INLINE void reset() noexcept { init(0, 0); } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get if the `ExtraReg` is none (same as calling `Operand_::isNone()`). - ASMJIT_INLINE bool isNone() const noexcept { return _signature == 0; } - //! Get if the register is valid (either virtual or physical). - ASMJIT_INLINE bool isValid() const noexcept { return _signature != 0; } - - //! Get if this is a physical register. - ASMJIT_INLINE bool isPhysReg() const noexcept { return _id < Globals::kInvalidRegId; } - //! Get if this is a virtual register (used by \ref CodeCompiler). - ASMJIT_INLINE bool isVirtReg() const noexcept { return Operand::isPackedId(_id); } - - //! Get register signature or 0. - ASMJIT_INLINE uint32_t getSignature() const noexcept { return _signature; } - //! Get register id or 0. - ASMJIT_INLINE uint32_t getId() const noexcept { return _id; } - - //! \internal - //! - //! Unpacks information from operand's signature. - ASMJIT_INLINE uint32_t _getSignatureData(uint32_t bits, uint32_t shift) const noexcept { return (_signature >> shift) & bits; } - - //! Get the register type. - ASMJIT_INLINE uint32_t getType() const noexcept { return _getSignatureData(Operand::kSignatureRegTypeBits, Operand::kSignatureRegTypeShift); } - //! Get the register kind. - ASMJIT_INLINE uint32_t getKind() const noexcept { return _getSignatureData(Operand::kSignatureRegKindBits, Operand::kSignatureRegKindShift); } - - // -------------------------------------------------------------------------- - // [ToReg] - // -------------------------------------------------------------------------- - - //! Convert back to `RegT` operand. - template - ASMJIT_INLINE RegT toReg() const noexcept { return RegT(Init, _signature, _id); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Type of the operand, either `kOpNone` or `kOpReg`. - uint32_t _signature; - //! Physical or virtual register id. - uint32_t _id; -}; - -// ============================================================================ -// [asmjit::Mem] -// ============================================================================ - -//! Base class for all memory operands. -//! -//! NOTE: It's tricky to pack all possible cases that define a memory operand -//! into just 16 bytes. The `Mem` splits data into the following parts: -//! -//! BASE - Base register or label - requires 36 bits total. 4 bits are used -//! to encode the type of the BASE operand (label vs. register type) and -//! the remaining 32 bits define the BASE id, which can be a physical or -//! virtual register index. If BASE type is zero, which is never used as -//! a register-type and label doesn't use it as well then BASE field -//! contains a high DWORD of a possible 64-bit absolute address, which is -//! possible on X64. -//! -//! INDEX - Index register (or theoretically Label, which doesn't make sense). -//! Encoding is similar to BASE - it also requires 36 bits and splits the -//! encoding to INDEX type (4 bits defining the register type) and id (32-bits). -//! -//! OFFSET - A relative offset of the address. Basically if BASE is specified -//! the relative displacement adjusts BASE and an optional INDEX. if BASE is -//! not specified then the OFFSET should be considered as ABSOLUTE address -//! (at least on X86/X64). In that case its low 32 bits are stored in -//! DISPLACEMENT field and the remaining high 32 bits are stored in BASE. -//! -//! OTHER FIELDS - There is rest 8 bits that can be used for whatever purpose. -//! The X86Mem operand uses these bits to store segment override -//! prefix and index shift (scale). -class Mem : public Operand { -public: - enum AddrType { - kAddrTypeDefault = 0, - kAddrTypeAbs = 1, - kAddrTypeRel = 2, - kAddrTypeWrt = 3 - }; - - // Shortcuts. - enum SignatureMem { - kSignatureMemAbs = kAddrTypeAbs << kSignatureMemAddrTypeShift, - kSignatureMemRel = kAddrTypeRel << kSignatureMemAddrTypeShift, - kSignatureMemWrt = kAddrTypeWrt << kSignatureMemAddrTypeShift - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Construct a default `Mem` operand, that points to [0]. - ASMJIT_INLINE Mem() noexcept : Operand(NoInit) { reset(); } - ASMJIT_INLINE Mem(const Mem& other) noexcept : Operand(other) {} - - ASMJIT_INLINE Mem(const _Init&, - uint32_t baseType, uint32_t baseId, - uint32_t indexType, uint32_t indexId, - int32_t off, uint32_t size, uint32_t flags) noexcept : Operand(NoInit) { - - uint32_t signature = (baseType << kSignatureMemBaseTypeShift ) | - (indexType << kSignatureMemIndexTypeShift) | - (size << kSignatureSizeShift ) ; - - _init_packed_d0_d1(kOpMem | signature | flags, indexId); - _mem.base = baseId; - _mem.offsetLo32 = static_cast(off); - } - explicit ASMJIT_INLINE Mem(const _NoInit&) noexcept : Operand(NoInit) {} - - // -------------------------------------------------------------------------- - // [Mem Specific] - // -------------------------------------------------------------------------- - - //! Clone `Mem` operand. - ASMJIT_INLINE Mem clone() const noexcept { return Mem(*this); } - - //! Reset the memory operand - after reset the memory points to [0]. - ASMJIT_INLINE void reset() noexcept { - _init_packed_d0_d1(kOpMem, 0); - _init_packed_d2_d3(0, 0); - } - - ASMJIT_INLINE bool hasAddrType() const noexcept { return _hasSignatureData(kSignatureMemAddrTypeMask); } - ASMJIT_INLINE uint32_t getAddrType() const noexcept { return _getSignatureData(kSignatureMemAddrTypeBits, kSignatureMemAddrTypeShift); } - ASMJIT_INLINE void setAddrType(uint32_t addrType) noexcept { return _setSignatureData(addrType, kSignatureMemAddrTypeBits, kSignatureMemAddrTypeShift); } - ASMJIT_INLINE void resetAddrType() noexcept { return _clearSignatureData(kSignatureMemAddrTypeBits, kSignatureMemAddrTypeShift); } - - ASMJIT_INLINE bool isAbs() const noexcept { return getAddrType() == kAddrTypeAbs; } - ASMJIT_INLINE bool isRel() const noexcept { return getAddrType() == kAddrTypeRel; } - ASMJIT_INLINE bool isWrt() const noexcept { return getAddrType() == kAddrTypeWrt; } - - ASMJIT_INLINE void setAbs() noexcept { setAddrType(kAddrTypeAbs); } - ASMJIT_INLINE void setRel() noexcept { setAddrType(kAddrTypeRel); } - ASMJIT_INLINE void setWrt() noexcept { setAddrType(kAddrTypeWrt); } - - ASMJIT_INLINE bool isArgHome() const noexcept { return _hasSignatureData(kSignatureMemArgHomeFlag); } - ASMJIT_INLINE bool isRegHome() const noexcept { return _hasSignatureData(kSignatureMemRegHomeFlag); } - - ASMJIT_INLINE void setArgHome() noexcept { _signature |= kSignatureMemArgHomeFlag; } - ASMJIT_INLINE void setRegHome() noexcept { _signature |= kSignatureMemRegHomeFlag; } - - ASMJIT_INLINE void clearArgHome() noexcept { _signature &= ~kSignatureMemArgHomeFlag; } - ASMJIT_INLINE void clearRegHome() noexcept { _signature &= ~kSignatureMemRegHomeFlag; } - - //! Get if the memory operand has a BASE register or label specified. - ASMJIT_INLINE bool hasBase() const noexcept { return (_signature & kSignatureMemBaseTypeMask) != 0; } - //! Get if the memory operand has an INDEX register specified. - ASMJIT_INLINE bool hasIndex() const noexcept { return (_signature & kSignatureMemIndexTypeMask) != 0; } - //! Get whether the memory operand has BASE and INDEX register. - ASMJIT_INLINE bool hasBaseOrIndex() const noexcept { return (_signature & kSignatureMemBaseIndexMask) != 0; } - //! Get whether the memory operand has BASE and INDEX register. - ASMJIT_INLINE bool hasBaseAndIndex() const noexcept { return (_signature & kSignatureMemBaseTypeMask) != 0 && (_signature & kSignatureMemIndexTypeMask) != 0; } - - //! Get if the BASE operand is a register (registers start after `kLabelTag`). - ASMJIT_INLINE bool hasBaseReg() const noexcept { return (_signature & kSignatureMemBaseTypeMask) > (Label::kLabelTag << kSignatureMemBaseTypeShift); } - //! Get if the BASE operand is a label. - ASMJIT_INLINE bool hasBaseLabel() const noexcept { return (_signature & kSignatureMemBaseTypeMask) == (Label::kLabelTag << kSignatureMemBaseTypeShift); } - //! Get if the INDEX operand is a register (registers start after `kLabelTag`). - ASMJIT_INLINE bool hasIndexReg() const noexcept { return (_signature & kSignatureMemIndexTypeMask) > (Label::kLabelTag << kSignatureMemIndexTypeShift); } - - //! Get type of a BASE register (0 if this memory operand doesn't use the BASE register). - //! - //! NOTE: If the returned type is one (a value never associated to a register - //! type) the BASE is not register, but it's a label. One equals to `kLabelTag`. - //! You should always check `hasBaseLabel()` before using `getBaseId()` result. - ASMJIT_INLINE uint32_t getBaseType() const noexcept { return _getSignatureData(kSignatureMemBaseTypeBits, kSignatureMemBaseTypeShift); } - //! Get type of an INDEX register (0 if this memory operand doesn't use the INDEX register). - ASMJIT_INLINE uint32_t getIndexType() const noexcept { return _getSignatureData(kSignatureMemIndexTypeBits, kSignatureMemIndexTypeShift); } - - //! Get both BASE (4:0 bits) and INDEX (9:5 bits) types combined into a single integer. - //! - //! This is used internally for BASE+INDEX validation. - ASMJIT_INLINE uint32_t getBaseIndexType() const noexcept { return _getSignatureData(kSignatureMemBaseIndexBits, kSignatureMemBaseIndexShift); } - - //! Get id of the BASE register or label (if the BASE was specified as label). - ASMJIT_INLINE uint32_t getBaseId() const noexcept { return _mem.base; } - //! Get id of the INDEX register. - ASMJIT_INLINE uint32_t getIndexId() const noexcept { return _mem.index; } - - ASMJIT_INLINE void _setBase(uint32_t rType, uint32_t rId) noexcept { - _setSignatureData(rType, kSignatureMemBaseTypeBits, kSignatureMemBaseTypeShift); - _mem.base = rId; - } - - ASMJIT_INLINE void _setIndex(uint32_t rType, uint32_t rId) noexcept { - _setSignatureData(rType, kSignatureMemIndexTypeBits, kSignatureMemIndexTypeShift); - _mem.index = rId; - } - - ASMJIT_INLINE void setBase(const Reg& base) noexcept { return _setBase(base.getType(), base.getId()); } - ASMJIT_INLINE void setIndex(const Reg& index) noexcept { return _setIndex(index.getType(), index.getId()); } - - //! Reset the memory operand's BASE register / label. - ASMJIT_INLINE void resetBase() noexcept { _setBase(0, 0); } - //! Reset the memory operand's INDEX register. - ASMJIT_INLINE void resetIndex() noexcept { _setIndex(0, 0); } - - //! Set memory operand size. - ASMJIT_INLINE void setSize(uint32_t size) noexcept { - _setSignatureData(size, kSignatureSizeBits, kSignatureSizeShift); - } - - ASMJIT_INLINE bool hasOffset() const noexcept { - int32_t lo = static_cast(_mem.offsetLo32); - int32_t hi = static_cast(_mem.base) & -static_cast(getBaseType() == 0); - return (lo | hi) != 0; - } - - //! Get if the memory operand has 64-bit offset or absolute address. - //! - //! If this is true then `hasBase()` must always report false. - ASMJIT_INLINE bool has64BitOffset() const noexcept { return getBaseType() == 0; } - - //! Get a 64-bit offset or absolute address. - ASMJIT_INLINE int64_t getOffset() const noexcept { - return has64BitOffset() - ? static_cast(_mem.offset64) - : static_cast(static_cast(_mem.offsetLo32)); // Sign-Extend. - } - - //! Get a lower part of a 64-bit offset or absolute address. - ASMJIT_INLINE int32_t getOffsetLo32() const noexcept { return static_cast(_mem.offsetLo32); } - //! Get a higher part of a 64-bit offset or absolute address. - //! - //! NOTE: This function is UNSAFE and returns garbage if `has64BitOffset()` - //! returns false. Never use it blindly without checking it. - ASMJIT_INLINE int32_t getOffsetHi32() const noexcept { return static_cast(_mem.base); } - - //! Set a 64-bit offset or an absolute address to `offset`. - //! - //! NOTE: This functions attempts to set both high and low parts of a 64-bit - //! offset, however, if the operand has a BASE register it will store only the - //! low 32 bits of the offset / address as there is no way to store both BASE - //! and 64-bit offset, and there is currently no architecture that has such - //! capability targeted by AsmJit. - ASMJIT_INLINE void setOffset(int64_t offset) noexcept { - if (has64BitOffset()) - _mem.offset64 = static_cast(offset); - else - _mem.offsetLo32 = static_cast(offset & 0xFFFFFFFF); - } - //! Adjust the offset by a 64-bit `off`. - ASMJIT_INLINE void addOffset(int64_t off) noexcept { - if (has64BitOffset()) - _mem.offset64 += static_cast(off); - else - _mem.offsetLo32 += static_cast(off & 0xFFFFFFFF); - } - //! Reset the memory offset to zero. - ASMJIT_INLINE void resetOffset() noexcept { setOffset(0); } - - //! Set a low 32-bit offset to `off`. - ASMJIT_INLINE void setOffsetLo32(int32_t off) noexcept { - _mem.offsetLo32 = static_cast(off); - } - //! Adjust the offset by `off`. - //! - //! NOTE: This is a fast function that doesn't use the HI 32-bits of a - //! 64-bit offset. Use it only if you know that there is a BASE register - //! and the offset is only 32 bits anyway. - ASMJIT_INLINE void addOffsetLo32(int32_t off) noexcept { - _mem.offsetLo32 += static_cast(off); - } - //! Reset the memory offset to zero. - ASMJIT_INLINE void resetOffsetLo32() noexcept { setOffsetLo32(0); } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE Mem& operator=(const Mem& other) noexcept { copyFrom(other); return *this; } -}; - -// ============================================================================ -// [asmjit::Imm] -// ============================================================================ - -//! Immediate operand. -//! -//! Immediate operand is usually part of instruction itself. It's inlined after -//! or before the instruction opcode. Immediates can be only signed or unsigned -//! integers. -//! -//! To create immediate operand use `imm()` or `imm_u()` non-members or `Imm` -//! constructors. -class Imm : public Operand { -public: - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new immediate value (initial value is 0). - Imm() noexcept : Operand(NoInit) { - _init_packed_d0_d1(kOpImm, 0); - _imm.value.i64 = 0; - } - - //! Create a new signed immediate value, assigning the value to `val`. - explicit Imm(int64_t val) noexcept : Operand(NoInit) { - _init_packed_d0_d1(kOpImm, 0); - _imm.value.i64 = val; - } - - //! Create a new immediate value from `other`. - ASMJIT_INLINE Imm(const Imm& other) noexcept : Operand(other) {} - - explicit ASMJIT_INLINE Imm(const _NoInit&) noexcept : Operand(NoInit) {} - - // -------------------------------------------------------------------------- - // [Immediate Specific] - // -------------------------------------------------------------------------- - - //! Clone `Imm` operand. - ASMJIT_INLINE Imm clone() const noexcept { return Imm(*this); } - - //! Get whether the immediate can be casted to 8-bit signed integer. - ASMJIT_INLINE bool isInt8() const noexcept { return Utils::isInt8(_imm.value.i64); } - //! Get whether the immediate can be casted to 8-bit unsigned integer. - ASMJIT_INLINE bool isUInt8() const noexcept { return Utils::isUInt8(_imm.value.i64); } - - //! Get whether the immediate can be casted to 16-bit signed integer. - ASMJIT_INLINE bool isInt16() const noexcept { return Utils::isInt16(_imm.value.i64); } - //! Get whether the immediate can be casted to 16-bit unsigned integer. - ASMJIT_INLINE bool isUInt16() const noexcept { return Utils::isUInt16(_imm.value.i64); } - - //! Get whether the immediate can be casted to 32-bit signed integer. - ASMJIT_INLINE bool isInt32() const noexcept { return Utils::isInt32(_imm.value.i64); } - //! Get whether the immediate can be casted to 32-bit unsigned integer. - ASMJIT_INLINE bool isUInt32() const noexcept { return Utils::isUInt32(_imm.value.i64); } - - //! Get immediate value as 8-bit signed integer. - ASMJIT_INLINE int8_t getInt8() const noexcept { return static_cast(_imm.value.i32Lo & 0xFF); } - //! Get immediate value as 8-bit unsigned integer. - ASMJIT_INLINE uint8_t getUInt8() const noexcept { return static_cast(_imm.value.u32Lo & 0xFFU); } - //! Get immediate value as 16-bit signed integer. - ASMJIT_INLINE int16_t getInt16() const noexcept { return static_cast(_imm.value.i32Lo & 0xFFFF);} - //! Get immediate value as 16-bit unsigned integer. - ASMJIT_INLINE uint16_t getUInt16() const noexcept { return static_cast(_imm.value.u32Lo & 0xFFFFU);} - - //! Get immediate value as 32-bit signed integer. - ASMJIT_INLINE int32_t getInt32() const noexcept { return _imm.value.i32Lo; } - //! Get low 32-bit signed integer. - ASMJIT_INLINE int32_t getInt32Lo() const noexcept { return _imm.value.i32Lo; } - //! Get high 32-bit signed integer. - ASMJIT_INLINE int32_t getInt32Hi() const noexcept { return _imm.value.i32Hi; } - - //! Get immediate value as 32-bit unsigned integer. - ASMJIT_INLINE uint32_t getUInt32() const noexcept { return _imm.value.u32Lo; } - //! Get low 32-bit signed integer. - ASMJIT_INLINE uint32_t getUInt32Lo() const noexcept { return _imm.value.u32Lo; } - //! Get high 32-bit signed integer. - ASMJIT_INLINE uint32_t getUInt32Hi() const noexcept { return _imm.value.u32Hi; } - - //! Get immediate value as 64-bit signed integer. - ASMJIT_INLINE int64_t getInt64() const noexcept { return _imm.value.i64; } - //! Get immediate value as 64-bit unsigned integer. - ASMJIT_INLINE uint64_t getUInt64() const noexcept { return _imm.value.u64; } - - //! Get immediate value as `intptr_t`. - ASMJIT_INLINE intptr_t getIntPtr() const noexcept { - if (sizeof(intptr_t) == sizeof(int64_t)) - return static_cast(getInt64()); - else - return static_cast(getInt32()); - } - - //! Get immediate value as `uintptr_t`. - ASMJIT_INLINE uintptr_t getUIntPtr() const noexcept { - if (sizeof(uintptr_t) == sizeof(uint64_t)) - return static_cast(getUInt64()); - else - return static_cast(getUInt32()); - } - - //! Set immediate value to 8-bit signed integer `val`. - ASMJIT_INLINE void setInt8(int8_t val) noexcept { _imm.value.i64 = static_cast(val); } - //! Set immediate value to 8-bit unsigned integer `val`. - ASMJIT_INLINE void setUInt8(uint8_t val) noexcept { _imm.value.u64 = static_cast(val); } - - //! Set immediate value to 16-bit signed integer `val`. - ASMJIT_INLINE void setInt16(int16_t val) noexcept { _imm.value.i64 = static_cast(val); } - //! Set immediate value to 16-bit unsigned integer `val`. - ASMJIT_INLINE void setUInt16(uint16_t val) noexcept { _imm.value.u64 = static_cast(val); } - - //! Set immediate value to 32-bit signed integer `val`. - ASMJIT_INLINE void setInt32(int32_t val) noexcept { _imm.value.i64 = static_cast(val); } - //! Set immediate value to 32-bit unsigned integer `val`. - ASMJIT_INLINE void setUInt32(uint32_t val) noexcept { _imm.value.u64 = static_cast(val); } - - //! Set immediate value to 64-bit signed integer `val`. - ASMJIT_INLINE void setInt64(int64_t val) noexcept { _imm.value.i64 = val; } - //! Set immediate value to 64-bit unsigned integer `val`. - ASMJIT_INLINE void setUInt64(uint64_t val) noexcept { _imm.value.u64 = val; } - //! Set immediate value to intptr_t `val`. - ASMJIT_INLINE void setIntPtr(intptr_t val) noexcept { _imm.value.i64 = static_cast(val); } - //! Set immediate value to uintptr_t `val`. - ASMJIT_INLINE void setUIntPtr(uintptr_t val) noexcept { _imm.value.u64 = static_cast(val); } - - //! Set immediate value as unsigned type to `val`. - ASMJIT_INLINE void setPtr(void* p) noexcept { setIntPtr((uint64_t)p); } - //! Set immediate value to `val`. - template - ASMJIT_INLINE void setValue(T val) noexcept { setIntPtr((int64_t)val); } - - // -------------------------------------------------------------------------- - // [Float] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void setFloat(float f) noexcept { - _imm.value.f32Lo = f; - _imm.value.u32Hi = 0; - } - - ASMJIT_INLINE void setDouble(double d) noexcept { - _imm.value.f64 = d; - } - - // -------------------------------------------------------------------------- - // [Truncate] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void truncateTo8Bits() noexcept { - if (ASMJIT_ARCH_64BIT) { - _imm.value.u64 &= static_cast(0x000000FFU); - } - else { - _imm.value.u32Lo &= 0x000000FFU; - _imm.value.u32Hi = 0; - } - } - - ASMJIT_INLINE void truncateTo16Bits() noexcept { - if (ASMJIT_ARCH_64BIT) { - _imm.value.u64 &= static_cast(0x0000FFFFU); - } - else { - _imm.value.u32Lo &= 0x0000FFFFU; - _imm.value.u32Hi = 0; - } - } - - ASMJIT_INLINE void truncateTo32Bits() noexcept { _imm.value.u32Hi = 0; } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - //! Assign `other` to the immediate operand. - ASMJIT_INLINE Imm& operator=(const Imm& other) noexcept { copyFrom(other); return *this; } -}; - -//! Create a signed immediate operand. -static ASMJIT_INLINE Imm imm(int64_t val) noexcept { return Imm(val); } -//! Create an unsigned immediate operand. -static ASMJIT_INLINE Imm imm_u(uint64_t val) noexcept { return Imm(static_cast(val)); } -//! Create an immediate operand from `p`. -template -static ASMJIT_INLINE Imm imm_ptr(T p) noexcept { return Imm(static_cast((intptr_t)p)); } - -// ============================================================================ -// [asmjit::TypeId] -// ============================================================================ - -//! Type-id. -//! -//! This is an additional information that can be used to describe a physical -//! or virtual register. it's used mostly by CodeCompiler to describe register -//! representation (the kind of data stored in the register and the width used) -//! and it's also used by APIs that allow to describe and work with function -//! signatures. -struct TypeId { - // -------------------------------------------------------------------------- - // [Id] - // -------------------------------------------------------------------------- - - enum Id { - kVoid = 0, - - _kIntStart = 32, - _kIntEnd = 41, - - kIntPtr = 32, - kUIntPtr = 33, - - kI8 = 34, - kU8 = 35, - kI16 = 36, - kU16 = 37, - kI32 = 38, - kU32 = 39, - kI64 = 40, - kU64 = 41, - - _kFloatStart = 42, - _kFloatEnd = 44, - - kF32 = 42, - kF64 = 43, - kF80 = 44, - - _kMaskStart = 45, - _kMaskEnd = 48, - - kMask8 = 45, - kMask16 = 46, - kMask32 = 47, - kMask64 = 48, - - _kMmxStart = 49, - _kMmxEnd = 50, - - kMmx32 = 49, - kMmx64 = 50, - - _kVec32Start = 51, - _kVec32End = 60, - - kI8x4 = 51, - kU8x4 = 52, - kI16x2 = 53, - kU16x2 = 54, - kI32x1 = 55, - kU32x1 = 56, - kF32x1 = 59, - - _kVec64Start = 61, - _kVec64End = 70, - - kI8x8 = 61, - kU8x8 = 62, - kI16x4 = 63, - kU16x4 = 64, - kI32x2 = 65, - kU32x2 = 66, - kI64x1 = 67, - kU64x1 = 68, - kF32x2 = 69, - kF64x1 = 70, - - _kVec128Start = 71, - _kVec128End = 80, - - kI8x16 = 71, - kU8x16 = 72, - kI16x8 = 73, - kU16x8 = 74, - kI32x4 = 75, - kU32x4 = 76, - kI64x2 = 77, - kU64x2 = 78, - kF32x4 = 79, - kF64x2 = 80, - - _kVec256Start = 81, - _kVec256End = 90, - - kI8x32 = 81, - kU8x32 = 82, - kI16x16 = 83, - kU16x16 = 84, - kI32x8 = 85, - kU32x8 = 86, - kI64x4 = 87, - kU64x4 = 88, - kF32x8 = 89, - kF64x4 = 90, - - _kVec512Start = 91, - _kVec512End = 100, - - kI8x64 = 91, - kU8x64 = 92, - kI16x32 = 93, - kU16x32 = 94, - kI32x16 = 95, - kU32x16 = 96, - kI64x8 = 97, - kU64x8 = 98, - kF32x16 = 99, - kF64x8 = 100, - - kCount = 101 - }; - - // -------------------------------------------------------------------------- - // [TypeName - Used by Templates] - // -------------------------------------------------------------------------- - - struct Int8 {}; //!< int8_t as C++ type-name. - struct UInt8 {}; //!< uint8_t as C++ type-name. - struct Int16 {}; //!< int16_t as C++ type-name. - struct UInt16 {}; //!< uint16_t as C++ type-name. - struct Int32 {}; //!< int32_t as C++ type-name. - struct UInt32 {}; //!< uint32_t as C++ type-name. - struct Int64 {}; //!< int64_t as C++ type-name. - struct UInt64 {}; //!< uint64_t as C++ type-name. - struct IntPtr {}; //!< intptr_t as C++ type-name. - struct UIntPtr {}; //!< uintptr_t as C++ type-name. - struct Float {}; //!< float as C++ type-name. - struct Double {}; //!< double as C++ type-name. - struct MmxReg {}; //!< MMX register as C++ type-name. - struct Vec128 {}; //!< SIMD128/XMM register as C++ type-name. - struct Vec256 {}; //!< SIMD256/YMM register as C++ type-name. - struct Vec512 {}; //!< SIMD512/ZMM register as C++ type-name. - - // -------------------------------------------------------------------------- - // [Utilities] - // -------------------------------------------------------------------------- - - struct Info { - uint8_t sizeOf[128]; - uint8_t elementOf[128]; - }; - - ASMJIT_API static const Info _info; - - static ASMJIT_INLINE bool isVoid(uint32_t typeId) noexcept { return typeId == 0; } - static ASMJIT_INLINE bool isValid(uint32_t typeId) noexcept { return typeId >= _kIntStart && typeId <= _kVec512End; } - static ASMJIT_INLINE bool isAbstract(uint32_t typeId) noexcept { return typeId >= kIntPtr && typeId <= kUIntPtr; } - static ASMJIT_INLINE bool isInt(uint32_t typeId) noexcept { return typeId >= _kIntStart && typeId <= _kIntEnd; } - static ASMJIT_INLINE bool isGpb(uint32_t typeId) noexcept { return typeId >= kI8 && typeId <= kU8; } - static ASMJIT_INLINE bool isGpw(uint32_t typeId) noexcept { return typeId >= kI16 && typeId <= kU16; } - static ASMJIT_INLINE bool isGpd(uint32_t typeId) noexcept { return typeId >= kI32 && typeId <= kU32; } - static ASMJIT_INLINE bool isGpq(uint32_t typeId) noexcept { return typeId >= kI64 && typeId <= kU64; } - static ASMJIT_INLINE bool isFloat(uint32_t typeId) noexcept { return typeId >= _kFloatStart && typeId <= _kFloatEnd; } - static ASMJIT_INLINE bool isMask(uint32_t typeId) noexcept { return typeId >= _kMaskStart && typeId <= _kMaskEnd; } - static ASMJIT_INLINE bool isMmx(uint32_t typeId) noexcept { return typeId >= _kMmxStart && typeId <= _kMmxEnd; } - - static ASMJIT_INLINE bool isVec(uint32_t typeId) noexcept { return typeId >= _kVec32Start && typeId <= _kVec512End; } - static ASMJIT_INLINE bool isVec32(uint32_t typeId) noexcept { return typeId >= _kVec32Start && typeId <= _kVec32End; } - static ASMJIT_INLINE bool isVec64(uint32_t typeId) noexcept { return typeId >= _kVec64Start && typeId <= _kVec64End; } - static ASMJIT_INLINE bool isVec128(uint32_t typeId) noexcept { return typeId >= _kVec128Start && typeId <= _kVec128End; } - static ASMJIT_INLINE bool isVec256(uint32_t typeId) noexcept { return typeId >= _kVec256Start && typeId <= _kVec256End; } - static ASMJIT_INLINE bool isVec512(uint32_t typeId) noexcept { return typeId >= _kVec512Start && typeId <= _kVec512End; } - - static ASMJIT_INLINE uint32_t sizeOf(uint32_t typeId) noexcept { - ASMJIT_ASSERT(typeId < ASMJIT_ARRAY_SIZE(_info.sizeOf)); - return _info.sizeOf[typeId]; - } - - static ASMJIT_INLINE uint32_t elementOf(uint32_t typeId) noexcept { - ASMJIT_ASSERT(typeId < ASMJIT_ARRAY_SIZE(_info.elementOf)); - return _info.elementOf[typeId]; - } - - //! Get an offset to convert a `kIntPtr` and `kUIntPtr` TypeId into a - //! type that matches `gpSize` (general-purpose register size). If you - //! find such TypeId it's then only about adding the offset to it. - //! - //! For example: - //! ~~~ - //! uint32_t gpSize = '4' or '8'; - //! uint32_t deabstractDelta = TypeId::deabstractDeltaOfSize(gpSize); - //! - //! uint32_t typeId = 'some type-id'; - //! - //! // Normalize some typeId into a non-abstract typeId. - //! if (TypeId::isAbstract(typeId)) typeId += deabstractDelta; - //! - //! // The same, but by using TypeId::deabstract() function. - //! typeId = TypeId::deabstract(typeId, deabstractDelta); - //! ~~~ - static ASMJIT_INLINE uint32_t deabstractDeltaOfSize(uint32_t gpSize) noexcept { - return gpSize >= 8 ? kI64 - kIntPtr : kI32 - kIntPtr; - } - - static ASMJIT_INLINE uint32_t deabstract(uint32_t typeId, uint32_t deabstractDelta) noexcept { - return TypeId::isAbstract(typeId) ? typeId += deabstractDelta : typeId; - } -}; - -//! TypeIdOf<> template allows to get a TypeId of a C++ type. -template struct TypeIdOf { - // Don't provide anything if not specialized. -}; -template struct TypeIdOf { - enum { kTypeId = TypeId::kUIntPtr }; -}; - -template -struct TypeIdOfInt { - enum { - kSigned = int(~T(0) < T(0)), - kTypeId = (sizeof(T) == 1) ? (int)(kSigned ? TypeId::kI8 : TypeId::kU8 ) : - (sizeof(T) == 2) ? (int)(kSigned ? TypeId::kI16 : TypeId::kU16) : - (sizeof(T) == 4) ? (int)(kSigned ? TypeId::kI32 : TypeId::kU32) : - (sizeof(T) == 8) ? (int)(kSigned ? TypeId::kI64 : TypeId::kU64) : (int)TypeId::kVoid - }; -}; - -#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \ - template<> \ - struct TypeIdOf { enum { kTypeId = TYPE_ID}; } - -ASMJIT_DEFINE_TYPE_ID(signed char , TypeIdOfInt< signed char >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(unsigned char , TypeIdOfInt< unsigned char >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(short , TypeIdOfInt< short >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(unsigned short , TypeIdOfInt< unsigned short >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(int , TypeIdOfInt< int >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(unsigned int , TypeIdOfInt< unsigned int >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(long , TypeIdOfInt< long >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(unsigned long , TypeIdOfInt< unsigned long >::kTypeId); -#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(16, 0, 0) -ASMJIT_DEFINE_TYPE_ID(__int64 , TypeIdOfInt< __int64 >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(unsigned __int64 , TypeIdOfInt< unsigned __int64 >::kTypeId); -#else -ASMJIT_DEFINE_TYPE_ID(long long , TypeIdOfInt< long long >::kTypeId); -ASMJIT_DEFINE_TYPE_ID(unsigned long long, TypeIdOfInt< unsigned long long >::kTypeId); -#endif -#if ASMJIT_CC_HAS_NATIVE_CHAR -ASMJIT_DEFINE_TYPE_ID(char , TypeIdOfInt< char >::kTypeId); -#endif -#if ASMJIT_CC_HAS_NATIVE_CHAR16_T -ASMJIT_DEFINE_TYPE_ID(char16_t , TypeIdOfInt< char16_t >::kTypeId); -#endif -#if ASMJIT_CC_HAS_NATIVE_CHAR32_T -ASMJIT_DEFINE_TYPE_ID(char32_t , TypeIdOfInt< char32_t >::kTypeId); -#endif -#if ASMJIT_CC_HAS_NATIVE_WCHAR_T -ASMJIT_DEFINE_TYPE_ID(wchar_t , TypeIdOfInt< wchar_t >::kTypeId); -#endif - -ASMJIT_DEFINE_TYPE_ID(void , TypeId::kVoid); -ASMJIT_DEFINE_TYPE_ID(bool , TypeId::kI8); -ASMJIT_DEFINE_TYPE_ID(float , TypeId::kF32); -ASMJIT_DEFINE_TYPE_ID(double , TypeId::kF64); - -ASMJIT_DEFINE_TYPE_ID(TypeId::Int8 , TypeId::kI8); -ASMJIT_DEFINE_TYPE_ID(TypeId::UInt8 , TypeId::kU8); -ASMJIT_DEFINE_TYPE_ID(TypeId::Int16 , TypeId::kI16); -ASMJIT_DEFINE_TYPE_ID(TypeId::UInt16 , TypeId::kU16); -ASMJIT_DEFINE_TYPE_ID(TypeId::Int32 , TypeId::kI32); -ASMJIT_DEFINE_TYPE_ID(TypeId::UInt32 , TypeId::kU32); -ASMJIT_DEFINE_TYPE_ID(TypeId::Int64 , TypeId::kI64); -ASMJIT_DEFINE_TYPE_ID(TypeId::UInt64 , TypeId::kU64); -ASMJIT_DEFINE_TYPE_ID(TypeId::IntPtr , TypeId::kIntPtr); -ASMJIT_DEFINE_TYPE_ID(TypeId::UIntPtr , TypeId::kUIntPtr); -ASMJIT_DEFINE_TYPE_ID(TypeId::Float , TypeId::kF32); -ASMJIT_DEFINE_TYPE_ID(TypeId::Double , TypeId::kF64); -ASMJIT_DEFINE_TYPE_ID(TypeId::MmxReg , TypeId::kMmx64); -ASMJIT_DEFINE_TYPE_ID(TypeId::Vec128 , TypeId::kI32x4); -ASMJIT_DEFINE_TYPE_ID(TypeId::Vec256 , TypeId::kI32x8); -ASMJIT_DEFINE_TYPE_ID(TypeId::Vec512 , TypeId::kI32x16); - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_OPERAND_H diff --git a/src/asmjit/base/osutils.cpp b/src/asmjit/base/osutils.cpp deleted file mode 100644 index 08ddd7d..0000000 --- a/src/asmjit/base/osutils.cpp +++ /dev/null @@ -1,228 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/osutils.h" -#include "../base/utils.h" - -#if ASMJIT_OS_POSIX -# include -# include -# include -# include -#endif // ASMJIT_OS_POSIX - -#if ASMJIT_OS_MAC -# include -#endif // ASMJIT_OS_MAC - -#if ASMJIT_OS_WINDOWS -# if defined(_MSC_VER) && _MSC_VER >= 1400 -# include -# else -# define _InterlockedCompareExchange InterlockedCompareExchange -# endif // _MSC_VER -#endif // ASMJIT_OS_WINDOWS - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::OSUtils - Virtual Memory] -// ============================================================================ - -// Windows specific implementation using `VirtualAllocEx` and `VirtualFree`. -#if ASMJIT_OS_WINDOWS -static ASMJIT_NOINLINE const VMemInfo& OSUtils_GetVMemInfo() noexcept { - static VMemInfo vmi; - - if (ASMJIT_UNLIKELY(!vmi.hCurrentProcess)) { - SYSTEM_INFO info; - ::GetSystemInfo(&info); - - vmi.pageSize = Utils::alignToPowerOf2(info.dwPageSize); - vmi.pageGranularity = info.dwAllocationGranularity; - vmi.hCurrentProcess = ::GetCurrentProcess(); - } - - return vmi; -}; - -VMemInfo OSUtils::getVirtualMemoryInfo() noexcept { return OSUtils_GetVMemInfo(); } - -void* OSUtils::allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept { - return allocProcessMemory(static_cast(0), size, allocated, flags); -} - -Error OSUtils::releaseVirtualMemory(void* p, size_t size) noexcept { - return releaseProcessMemory(static_cast(0), p, size); -} - -void* OSUtils::allocProcessMemory(HANDLE hProcess, size_t size, size_t* allocated, uint32_t flags) noexcept { - if (size == 0) - return nullptr; - - const VMemInfo& vmi = OSUtils_GetVMemInfo(); - if (!hProcess) hProcess = vmi.hCurrentProcess; - - // VirtualAllocEx rounds the allocated size to a page size automatically, - // but we need the `alignedSize` so we can store the real allocated size - // into `allocated` output. - size_t alignedSize = Utils::alignTo(size, vmi.pageSize); - - // Windows XP SP2 / Vista+ allow data-execution-prevention (DEP). - DWORD protectFlags = 0; - - if (flags & kVMExecutable) - protectFlags |= (flags & kVMWritable) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ; - else - protectFlags |= (flags & kVMWritable) ? PAGE_READWRITE : PAGE_READONLY; - - LPVOID mBase = ::VirtualAllocEx(hProcess, nullptr, alignedSize, MEM_COMMIT | MEM_RESERVE, protectFlags); - if (ASMJIT_UNLIKELY(!mBase)) return nullptr; - - ASMJIT_ASSERT(Utils::isAligned(reinterpret_cast(mBase), vmi.pageSize)); - if (allocated) *allocated = alignedSize; - return mBase; -} - -Error OSUtils::releaseProcessMemory(HANDLE hProcess, void* p, size_t size) noexcept { - const VMemInfo& vmi = OSUtils_GetVMemInfo(); - if (!hProcess) hProcess = vmi.hCurrentProcess; - - if (ASMJIT_UNLIKELY(!::VirtualFreeEx(hProcess, p, 0, MEM_RELEASE))) - return DebugUtils::errored(kErrorInvalidState); - - return kErrorOk; -} -#endif // ASMJIT_OS_WINDOWS - -// Posix specific implementation using `mmap()` and `munmap()`. -#if ASMJIT_OS_POSIX - -// Mac uses MAP_ANON instead of MAP_ANONYMOUS. -#if !defined(MAP_ANONYMOUS) -# define MAP_ANONYMOUS MAP_ANON -#endif // MAP_ANONYMOUS - -static const VMemInfo& OSUtils_GetVMemInfo() noexcept { - static VMemInfo vmi; - if (ASMJIT_UNLIKELY(!vmi.pageSize)) { - size_t pageSize = ::getpagesize(); - vmi.pageSize = pageSize; - vmi.pageGranularity = std::max(pageSize, 65536); - } - return vmi; -}; - -VMemInfo OSUtils::getVirtualMemoryInfo() noexcept { return OSUtils_GetVMemInfo(); } - -void* OSUtils::allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept { - const VMemInfo& vmi = OSUtils_GetVMemInfo(); - - size_t alignedSize = Utils::alignTo(size, vmi.pageSize); - int protection = PROT_READ; - - if (flags & kVMWritable ) protection |= PROT_WRITE; - if (flags & kVMExecutable) protection |= PROT_EXEC; - - void* mbase = ::mmap(nullptr, alignedSize, protection, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (ASMJIT_UNLIKELY(mbase == MAP_FAILED)) return nullptr; - - if (allocated) *allocated = alignedSize; - return mbase; -} - -Error OSUtils::releaseVirtualMemory(void* p, size_t size) noexcept { - if (ASMJIT_UNLIKELY(::munmap(p, size) != 0)) - return DebugUtils::errored(kErrorInvalidState); - - return kErrorOk; -} -#endif // ASMJIT_OS_POSIX - -// ============================================================================ -// [asmjit::OSUtils - GetTickCount] -// ============================================================================ - -#if ASMJIT_OS_WINDOWS -static ASMJIT_INLINE uint32_t OSUtils_calcHiRes(const LARGE_INTEGER& now, double freq) noexcept { - return static_cast( - (int64_t)(double(now.QuadPart) / freq) & 0xFFFFFFFF); -} - -uint32_t OSUtils::getTickCount() noexcept { - static volatile uint32_t _hiResTicks; - static volatile double _hiResFreq; - - do { - uint32_t hiResOk = _hiResTicks; - LARGE_INTEGER qpf, now; - - // If for whatever reason this fails, bail to `GetTickCount()`. - if (!::QueryPerformanceCounter(&now)) break; - - // Expected - if we ran through this at least once `hiResTicks` will be - // either 1 or 0xFFFFFFFF. If it's '1' then the Hi-Res counter is available - // and `QueryPerformanceCounter()` can be used. - if (hiResOk == 1) return OSUtils_calcHiRes(now, _hiResFreq); - - // Hi-Res counter is not available, bail to `GetTickCount()`. - if (hiResOk != 0) break; - - // Detect availability of Hi-Res counter, if not available, bail to `GetTickCount()`. - if (!::QueryPerformanceFrequency(&qpf)) { - _InterlockedCompareExchange((LONG*)&_hiResTicks, 0xFFFFFFFF, 0); - break; - } - - double freq = double(qpf.QuadPart) / 1000.0; - _hiResFreq = freq; - - _InterlockedCompareExchange((LONG*)&_hiResTicks, 1, 0); - return OSUtils_calcHiRes(now, freq); - } while (0); - - return ::GetTickCount(); -} -#elif ASMJIT_OS_MAC -uint32_t OSUtils::getTickCount() noexcept { - static mach_timebase_info_data_t _machTime; - - // See Apple's QA1398. - if (ASMJIT_UNLIKELY(_machTime.denom == 0) || mach_timebase_info(&_machTime) != KERN_SUCCESS) - return 0; - - // `mach_absolute_time()` returns nanoseconds, we want milliseconds. - uint64_t t = mach_absolute_time() / 1000000; - - t = t * _machTime.numer / _machTime.denom; - return static_cast(t & 0xFFFFFFFFU); -} -#elif defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0 -uint32_t OSUtils::getTickCount() noexcept { - struct timespec ts; - - if (ASMJIT_UNLIKELY(clock_gettime(CLOCK_MONOTONIC, &ts) != 0)) - return 0; - - uint64_t t = (uint64_t(ts.tv_sec ) * 1000) + (uint64_t(ts.tv_nsec) / 1000000); - return static_cast(t & 0xFFFFFFFFU); -} -#else -#error "[asmjit] OSUtils::getTickCount() is not implemented for your target OS." -uint32_t OSUtils::getTickCount() noexcept { return 0; } -#endif - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/osutils.h b/src/asmjit/base/osutils.h deleted file mode 100644 index ccf6bee..0000000 --- a/src/asmjit/base/osutils.h +++ /dev/null @@ -1,178 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_OSUTILS_H -#define _ASMJIT_BASE_OSUTILS_H - -// [Dependencies] -#include "../base/globals.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::VMemInfo] -// ============================================================================ - -//! Information about OS virtual memory. -struct VMemInfo { -#if ASMJIT_OS_WINDOWS - HANDLE hCurrentProcess; //!< Handle of the current process (Windows). -#endif // ASMJIT_OS_WINDOWS - size_t pageSize; //!< Virtual memory page size. - size_t pageGranularity; //!< Virtual memory page granularity. -}; - -// ============================================================================ -// [asmjit::OSUtils] -// ============================================================================ - -//! OS utilities. -//! -//! Virtual Memory -//! -------------- -//! -//! Provides functions to allocate and release virtual memory that is required -//! to execute dynamically generated code. If both processor and host OS support -//! data-execution-prevention (DEP) then the only way to run machine code is to -//! allocate virtual memory that has `OSUtils::kVMExecutable` flag enabled. All -//! functions provides by OSUtils use internally platform specific API. -//! -//! Benchmarking -//! ------------ -//! -//! OSUtils also provide a function `getTickCount()` that can be used for -//! benchmarking purposes. It's similar to Windows-only `GetTickCount()`, but -//! it's cross-platform and tries to be the most reliable platform specific -//! calls to make the result usable. -struct OSUtils { - // -------------------------------------------------------------------------- - // [Virtual Memory] - // -------------------------------------------------------------------------- - - //! Virtual memory flags. - ASMJIT_ENUM(VMFlags) { - kVMWritable = 0x00000001U, //!< Virtual memory is writable. - kVMExecutable = 0x00000002U //!< Virtual memory is executable. - }; - - ASMJIT_API static VMemInfo getVirtualMemoryInfo() noexcept; - - //! Allocate virtual memory. - ASMJIT_API static void* allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept; - //! Release virtual memory previously allocated by \ref allocVirtualMemory(). - ASMJIT_API static Error releaseVirtualMemory(void* p, size_t size) noexcept; - -#if ASMJIT_OS_WINDOWS - //! Allocate virtual memory of `hProcess` (Windows). - ASMJIT_API static void* allocProcessMemory(HANDLE hProcess, size_t size, size_t* allocated, uint32_t flags) noexcept; - - //! Release virtual memory of `hProcess` (Windows). - ASMJIT_API static Error releaseProcessMemory(HANDLE hProcess, void* p, size_t size) noexcept; -#endif // ASMJIT_OS_WINDOWS - - // -------------------------------------------------------------------------- - // [GetTickCount] - // -------------------------------------------------------------------------- - - //! Get the current CPU tick count, used for benchmarking (1ms resolution). - ASMJIT_API static uint32_t getTickCount() noexcept; -}; - -// ============================================================================ -// [asmjit::Lock] -// ============================================================================ - -//! \internal -//! -//! Lock. -struct Lock { - ASMJIT_NONCOPYABLE(Lock) - - // -------------------------------------------------------------------------- - // [Windows] - // -------------------------------------------------------------------------- - -#if ASMJIT_OS_WINDOWS - typedef CRITICAL_SECTION Handle; - - //! Create a new `Lock` instance. - ASMJIT_INLINE Lock() noexcept { InitializeCriticalSection(&_handle); } - //! Destroy the `Lock` instance. - ASMJIT_INLINE ~Lock() noexcept { DeleteCriticalSection(&_handle); } - - //! Lock. - ASMJIT_INLINE void lock() noexcept { EnterCriticalSection(&_handle); } - //! Unlock. - ASMJIT_INLINE void unlock() noexcept { LeaveCriticalSection(&_handle); } -#endif // ASMJIT_OS_WINDOWS - - // -------------------------------------------------------------------------- - // [Posix] - // -------------------------------------------------------------------------- - -#if ASMJIT_OS_POSIX - typedef pthread_mutex_t Handle; - - //! Create a new `Lock` instance. - ASMJIT_INLINE Lock() noexcept { pthread_mutex_init(&_handle, nullptr); } - //! Destroy the `Lock` instance. - ASMJIT_INLINE ~Lock() noexcept { pthread_mutex_destroy(&_handle); } - - //! Lock. - ASMJIT_INLINE void lock() noexcept { pthread_mutex_lock(&_handle); } - //! Unlock. - ASMJIT_INLINE void unlock() noexcept { pthread_mutex_unlock(&_handle); } -#endif // ASMJIT_OS_POSIX - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Native handle. - Handle _handle; -}; - -// ============================================================================ -// [asmjit::AutoLock] -// ============================================================================ - -//! \internal -//! -//! Scoped lock. -struct AutoLock { - ASMJIT_NONCOPYABLE(AutoLock) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE AutoLock(Lock& target) noexcept : _target(target) { _target.lock(); } - ASMJIT_INLINE ~AutoLock() noexcept { _target.unlock(); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Reference to the `Lock`. - Lock& _target; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_OSUTILS_H diff --git a/src/asmjit/base/regalloc.cpp b/src/asmjit/base/regalloc.cpp deleted file mode 100644 index cbdfd85..0000000 --- a/src/asmjit/base/regalloc.cpp +++ /dev/null @@ -1,594 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Guard] -#include "../asmjit_build.h" -#if !defined(ASMJIT_DISABLE_COMPILER) - -// [Dependencies] -#include "../base/regalloc_p.h" -#include "../base/utils.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::RAPass - Construction / Destruction] -// ============================================================================ - -RAPass::RAPass() noexcept : - CBPass("RA"), - _varMapToVaListOffset(0) {} -RAPass::~RAPass() noexcept {} - -// ============================================================================ -// [asmjit::RAPass - Interface] -// ============================================================================ - -Error RAPass::process(Zone* zone) noexcept { - _zone = zone; - _heap.reset(zone); - _emitComments = (cb()->getGlobalOptions() & CodeEmitter::kOptionLoggingEnabled) != 0; - - Error err = kErrorOk; - CBNode* node = cc()->getFirstNode(); - if (!node) return err; - - do { - if (node->getType() == CBNode::kNodeFunc) { - CCFunc* func = static_cast(node); - node = func->getEnd(); - - err = compile(func); - if (err) break; - } - - // Find a function by skipping all nodes that are not `kNodeFunc`. - do { - node = node->getNext(); - } while (node && node->getType() != CBNode::kNodeFunc); - } while (node); - - _heap.reset(nullptr); - _zone = nullptr; - return err; -} - -Error RAPass::compile(CCFunc* func) noexcept { - ASMJIT_PROPAGATE(prepare(func)); - - Error err; - do { - err = fetch(); - if (err) break; - - err = removeUnreachableCode(); - if (err) break; - - err = livenessAnalysis(); - if (err) break; - -#if !defined(ASMJIT_DISABLE_LOGGING) - if (cc()->getGlobalOptions() & CodeEmitter::kOptionLoggingEnabled) { - err = annotate(); - if (err) break; - } -#endif // !ASMJIT_DISABLE_LOGGING - - err = translate(); - } while (false); - - cleanup(); - - // We alter the compiler cursor, because it doesn't make sense to reference - // it after compilation - some nodes may disappear and it's forbidden to add - // new code after the compilation is done. - cc()->_setCursor(nullptr); - return err; -} - -Error RAPass::prepare(CCFunc* func) noexcept { - CBNode* end = func->getEnd(); - - _func = func; - _stop = end->getNext(); - - _unreachableList.reset(); - _returningList.reset(); - _jccList.reset(); - _contextVd.reset(); - - _memVarCells = nullptr; - _memStackCells = nullptr; - - _mem1ByteVarsUsed = 0; - _mem2ByteVarsUsed = 0; - _mem4ByteVarsUsed = 0; - _mem8ByteVarsUsed = 0; - _mem16ByteVarsUsed = 0; - _mem32ByteVarsUsed = 0; - _mem64ByteVarsUsed = 0; - _memStackCellsUsed = 0; - - _memMaxAlign = 0; - _memVarTotal = 0; - _memStackTotal = 0; - _memAllTotal = 0; - _annotationLength = 12; - - return kErrorOk; -} - -void RAPass::cleanup() noexcept { - VirtReg** virtArray = _contextVd.getData(); - size_t virtCount = _contextVd.getLength(); - - for (size_t i = 0; i < virtCount; i++) { - VirtReg* vreg = virtArray[i]; - vreg->_raId = kInvalidValue; - vreg->resetPhysId(); - } - - _contextVd.reset(); -} - -// ============================================================================ -// [asmjit::RAPass - Mem] -// ============================================================================ - -static ASMJIT_INLINE uint32_t RAGetDefaultAlignment(uint32_t size) { - if (size > 32) - return 64; - else if (size > 16) - return 32; - else if (size > 8) - return 16; - else if (size > 4) - return 8; - else if (size > 2) - return 4; - else if (size > 1) - return 2; - else - return 1; -} - -RACell* RAPass::_newVarCell(VirtReg* vreg) { - ASMJIT_ASSERT(vreg->_memCell == nullptr); - - RACell* cell; - uint32_t size = vreg->getSize(); - - if (vreg->isStack()) { - cell = _newStackCell(size, vreg->getAlignment()); - if (ASMJIT_UNLIKELY(!cell)) return nullptr; - } - else { - cell = static_cast(_zone->alloc(sizeof(RACell))); - if (!cell) goto _NoMemory; - - cell->next = _memVarCells; - cell->offset = 0; - cell->size = size; - cell->alignment = size; - - _memVarCells = cell; - _memMaxAlign = std::max(_memMaxAlign, size); - _memVarTotal += size; - - switch (size) { - case 1: _mem1ByteVarsUsed++ ; break; - case 2: _mem2ByteVarsUsed++ ; break; - case 4: _mem4ByteVarsUsed++ ; break; - case 8: _mem8ByteVarsUsed++ ; break; - case 16: _mem16ByteVarsUsed++; break; - case 32: _mem32ByteVarsUsed++; break; - case 64: _mem64ByteVarsUsed++; break; - - default: - ASMJIT_NOT_REACHED(); - } - } - - vreg->_memCell = cell; - return cell; - -_NoMemory: - cc()->setLastError(DebugUtils::errored(kErrorNoHeapMemory)); - return nullptr; -} - -RACell* RAPass::_newStackCell(uint32_t size, uint32_t alignment) { - RACell* cell = static_cast(_zone->alloc(sizeof(RACell))); - if (ASMJIT_UNLIKELY(!cell)) return nullptr; - - if (alignment == 0) - alignment = RAGetDefaultAlignment(size); - - if (alignment > 64) - alignment = 64; - - ASMJIT_ASSERT(Utils::isPowerOf2(alignment)); - size = Utils::alignTo(size, alignment); - - // Insert it sorted according to the alignment and size. - { - RACell** pPrev = &_memStackCells; - RACell* cur = *pPrev; - - while (cur && ((cur->alignment > alignment) || (cur->alignment == alignment && cur->size > size))) { - pPrev = &cur->next; - cur = *pPrev; - } - - cell->next = cur; - cell->offset = 0; - cell->size = size; - cell->alignment = alignment; - - *pPrev = cell; - _memStackCellsUsed++; - - _memMaxAlign = std::max(_memMaxAlign, alignment); - _memStackTotal += size; - } - - return cell; -} - -Error RAPass::resolveCellOffsets() { - RACell* varCell = _memVarCells; - RACell* stackCell = _memStackCells; - - uint32_t pos64 = 0; - uint32_t pos32 = pos64 + _mem64ByteVarsUsed * 64; - uint32_t pos16 = pos32 + _mem32ByteVarsUsed * 32; - uint32_t pos8 = pos16 + _mem16ByteVarsUsed * 16; - uint32_t pos4 = pos8 + _mem8ByteVarsUsed * 8 ; - uint32_t pos2 = pos4 + _mem4ByteVarsUsed * 4 ; - uint32_t pos1 = pos2 + _mem2ByteVarsUsed * 2 ; - - // Assign home slots. - while (varCell) { - uint32_t size = varCell->size; - uint32_t offset = 0; - - switch (size) { - case 1: offset = pos1 ; pos1 += 1 ; break; - case 2: offset = pos2 ; pos2 += 2 ; break; - case 4: offset = pos4 ; pos4 += 4 ; break; - case 8: offset = pos8 ; pos8 += 8 ; break; - case 16: offset = pos16; pos16 += 16; break; - case 32: offset = pos32; pos32 += 32; break; - case 64: offset = pos64; pos64 += 64; break; - - default: - ASMJIT_NOT_REACHED(); - } - - varCell->offset = static_cast(offset); - varCell = varCell->next; - } - - // Assign stack slots. - uint32_t stackPos = pos1 + _mem1ByteVarsUsed; - while (stackCell) { - uint32_t size = stackCell->size; - uint32_t alignment = stackCell->alignment; - ASMJIT_ASSERT(alignment != 0 && Utils::isPowerOf2(alignment)); - - stackPos = Utils::alignTo(stackPos, alignment); - stackCell->offset = stackPos; - stackCell = stackCell->next; - - stackPos += size; - } - - _memAllTotal = stackPos; - return kErrorOk; -} - -// ============================================================================ -// [asmjit::RAPass - RemoveUnreachableCode] -// ============================================================================ - -Error RAPass::removeUnreachableCode() { - ZoneList::Link* link = _unreachableList.getFirst(); - CBNode* stop = getStop(); - - while (link) { - CBNode* node = link->getValue(); - if (node && node->getPrev() && node != stop) { - // Locate all unreachable nodes. - CBNode* first = node; - do { - if (node->hasPassData()) break; - node = node->getNext(); - } while (node != stop); - - // Remove unreachable nodes that are neither informative nor directives. - if (node != first) { - CBNode* end = node; - node = first; - - // NOTE: The strategy is as follows: - // 1. The algorithm removes everything until it finds a first label. - // 2. After the first label is found it removes only removable nodes. - bool removeEverything = true; - do { - CBNode* next = node->getNext(); - bool remove = node->isRemovable(); - - if (!remove) { - if (node->isLabel()) - removeEverything = false; - remove = removeEverything; - } - - if (remove) - cc()->removeNode(node); - - node = next; - } while (node != end); - } - } - - link = link->getNext(); - } - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::RAPass - Liveness Analysis] -// ============================================================================ - -//! \internal -struct LivenessTarget { - LivenessTarget* prev; //!< Previous target. - CBLabel* node; //!< Target node. - CBJump* from; //!< Jumped from. -}; - -Error RAPass::livenessAnalysis() { - uint32_t bLen = static_cast( - ((_contextVd.getLength() + RABits::kEntityBits - 1) / RABits::kEntityBits)); - - // No variables. - if (bLen == 0) - return kErrorOk; - - CCFunc* func = getFunc(); - CBJump* from = nullptr; - - LivenessTarget* ltCur = nullptr; - LivenessTarget* ltUnused = nullptr; - - ZoneList::Link* retPtr = _returningList.getFirst(); - ASMJIT_ASSERT(retPtr != nullptr); - - CBNode* node = retPtr->getValue(); - RAData* wd; - - size_t varMapToVaListOffset = _varMapToVaListOffset; - RABits* bCur = newBits(bLen); - if (ASMJIT_UNLIKELY(!bCur)) goto NoMem; - - // Allocate bits for code visited first time. -Visit: - for (;;) { - wd = node->getPassData(); - if (wd->liveness) { - if (bCur->_addBitsDelSource(wd->liveness, bCur, bLen)) - goto Patch; - else - goto Done; - } - - RABits* bTmp = copyBits(bCur, bLen); - if (!bTmp) goto NoMem; - - wd = node->getPassData(); - wd->liveness = bTmp; - - uint32_t tiedTotal = wd->tiedTotal; - TiedReg* tiedArray = reinterpret_cast(((uint8_t*)wd) + varMapToVaListOffset); - - for (uint32_t i = 0; i < tiedTotal; i++) { - TiedReg* tied = &tiedArray[i]; - VirtReg* vreg = tied->vreg; - - uint32_t flags = tied->flags; - uint32_t raId = vreg->_raId; - - if ((flags & TiedReg::kWAll) && !(flags & TiedReg::kRAll)) { - // Write-Only. - bTmp->setBit(raId); - bCur->delBit(raId); - } - else { - // Read-Only or Read/Write. - bTmp->setBit(raId); - bCur->setBit(raId); - } - } - - if (node->getType() == CBNode::kNodeLabel) - goto Target; - - if (node == func) - goto Done; - - ASMJIT_ASSERT(node->getPrev()); - node = node->getPrev(); - } - - // Patch already generated liveness bits. -Patch: - for (;;) { - ASMJIT_ASSERT(node->hasPassData()); - ASMJIT_ASSERT(node->getPassData()->liveness != nullptr); - - RABits* bNode = node->getPassData()->liveness; - if (!bNode->_addBitsDelSource(bCur, bLen)) goto Done; - if (node->getType() == CBNode::kNodeLabel) goto Target; - - if (node == func) goto Done; - node = node->getPrev(); - } - -Target: - if (static_cast(node)->getNumRefs() != 0) { - // Push a new LivenessTarget onto the stack if needed. - if (!ltCur || ltCur->node != node) { - // Allocate a new LivenessTarget object (from pool or zone). - LivenessTarget* ltTmp = ltUnused; - - if (ltTmp) { - ltUnused = ltUnused->prev; - } - else { - ltTmp = _zone->allocT( - sizeof(LivenessTarget) - sizeof(RABits) + bLen * sizeof(uintptr_t)); - if (!ltTmp) goto NoMem; - } - - // Initialize and make current - ltTmp->from will be set later on. - ltTmp->prev = ltCur; - ltTmp->node = static_cast(node); - ltCur = ltTmp; - - from = static_cast(node)->getFrom(); - ASMJIT_ASSERT(from != nullptr); - } - else { - from = ltCur->from; - goto JumpNext; - } - - // Visit/Patch. - do { - ltCur->from = from; - bCur->copyBits(node->getPassData()->liveness, bLen); - - if (!from->getPassData()->liveness) { - node = from; - goto Visit; - } - - // Issue #25: Moved 'JumpNext' here since it's important to patch - // code again if there are more live variables than before. -JumpNext: - if (bCur->delBits(from->getPassData()->liveness, bLen)) { - node = from; - goto Patch; - } - - from = from->getJumpNext(); - } while (from); - - // Pop the current LivenessTarget from the stack. - { - LivenessTarget* ltTmp = ltCur; - ltCur = ltCur->prev; - ltTmp->prev = ltUnused; - ltUnused = ltTmp; - } - } - - bCur->copyBits(node->getPassData()->liveness, bLen); - node = node->getPrev(); - if (node->isJmp() || !node->hasPassData()) goto Done; - - wd = node->getPassData(); - if (!wd->liveness) goto Visit; - if (bCur->delBits(wd->liveness, bLen)) goto Patch; - -Done: - if (ltCur) { - node = ltCur->node; - from = ltCur->from; - - goto JumpNext; - } - - retPtr = retPtr->getNext(); - if (retPtr) { - node = retPtr->getValue(); - goto Visit; - } - - return kErrorOk; - -NoMem: - return DebugUtils::errored(kErrorNoHeapMemory); -} - -// ============================================================================ -// [asmjit::RAPass - Annotate] -// ============================================================================ - -Error RAPass::formatInlineComment(StringBuilder& dst, CBNode* node) { -#if !defined(ASMJIT_DISABLE_LOGGING) - RAData* wd = node->getPassData(); - - if (node->hasInlineComment()) - dst.appendString(node->getInlineComment()); - - if (wd && wd->liveness) { - if (dst.getLength() < _annotationLength) - dst.appendChars(' ', _annotationLength - dst.getLength()); - - uint32_t vdCount = static_cast(_contextVd.getLength()); - size_t offset = dst.getLength() + 1; - - dst.appendChar('['); - dst.appendChars(' ', vdCount); - dst.appendChar(']'); - RABits* liveness = wd->liveness; - - uint32_t i; - for (i = 0; i < vdCount; i++) { - if (liveness->getBit(i)) - dst.getData()[offset + i] = '.'; - } - - uint32_t tiedTotal = wd->tiedTotal; - TiedReg* tiedArray = reinterpret_cast(((uint8_t*)wd) + _varMapToVaListOffset); - - for (i = 0; i < tiedTotal; i++) { - TiedReg* tied = &tiedArray[i]; - VirtReg* vreg = tied->vreg; - uint32_t flags = tied->flags; - - char c = 'u'; - if ( (flags & TiedReg::kRAll) && !(flags & TiedReg::kWAll)) c = 'r'; - if (!(flags & TiedReg::kRAll) && (flags & TiedReg::kWAll)) c = 'w'; - if ( (flags & TiedReg::kRAll) && (flags & TiedReg::kWAll)) c = 'x'; - // Uppercase if unused. - if ( (flags & TiedReg::kUnuse)) c -= 'a' - 'A'; - - ASMJIT_ASSERT(offset + vreg->_raId < dst.getLength()); - dst._data[offset + vreg->_raId] = c; - } - } -#endif // !ASMJIT_DISABLE_LOGGING - - return kErrorOk; -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_COMPILER diff --git a/src/asmjit/base/regalloc_p.h b/src/asmjit/base/regalloc_p.h deleted file mode 100644 index 53c7aeb..0000000 --- a/src/asmjit/base/regalloc_p.h +++ /dev/null @@ -1,568 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_REGALLOC_P_H -#define _ASMJIT_BASE_REGALLOC_P_H - -#include "../asmjit_build.h" -#if !defined(ASMJIT_DISABLE_COMPILER) - -// [Dependencies] -#include "../base/codecompiler.h" -#include "../base/zone.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::TiedReg] -// ============================================================================ - -//! Tied register (CodeCompiler) -//! -//! Tied register is used to describe one ore more register operands that share -//! the same virtual register. Tied register contains all the data that is -//! essential for register allocation. -struct TiedReg { - //! Flags. - ASMJIT_ENUM(Flags) { - kRReg = 0x00000001U, //!< Register read. - kWReg = 0x00000002U, //!< Register write. - kXReg = 0x00000003U, //!< Register read-write. - - kRMem = 0x00000004U, //!< Memory read. - kWMem = 0x00000008U, //!< Memory write. - kXMem = 0x0000000CU, //!< Memory read-write. - - kRDecide = 0x00000010U, //!< RA can decide between reg/mem read. - kWDecide = 0x00000020U, //!< RA can decide between reg/mem write. - kXDecide = 0x00000030U, //!< RA can decide between reg/mem read-write. - - kRFunc = 0x00000100U, //!< Function argument passed in register. - kWFunc = 0x00000200U, //!< Function return value passed into register. - kXFunc = 0x00000300U, //!< Function argument and return value. - kRCall = 0x00000400U, //!< Function call operand. - - kSpill = 0x00000800U, //!< Variable should be spilled. - kUnuse = 0x00001000U, //!< Variable should be unused at the end of the instruction/node. - - kRAll = kRReg | kRMem | kRDecide | kRFunc | kRCall, //!< All in-flags. - kWAll = kWReg | kWMem | kWDecide | kWFunc, //!< All out-flags. - - kRDone = 0x00400000U, //!< Already allocated on the input. - kWDone = 0x00800000U, //!< Already allocated on the output. - - kX86GpbLo = 0x10000000U, - kX86GpbHi = 0x20000000U, - kX86Fld4 = 0x40000000U, - kX86Fld8 = 0x80000000U - }; - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void init(VirtReg* vreg, uint32_t flags = 0, uint32_t inRegs = 0, uint32_t allocableRegs = 0) noexcept { - this->vreg = vreg; - this->flags = flags; - this->refCount = 0; - this->inPhysId = Globals::kInvalidRegId; - this->outPhysId = Globals::kInvalidRegId; - this->reserved = 0; - this->inRegs = inRegs; - this->allocableRegs = allocableRegs; - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get whether the variable has to be allocated in a specific input register. - ASMJIT_INLINE uint32_t hasInPhysId() const { return inPhysId != Globals::kInvalidRegId; } - //! Get whether the variable has to be allocated in a specific output register. - ASMJIT_INLINE uint32_t hasOutPhysId() const { return outPhysId != Globals::kInvalidRegId; } - - //! Set the input register index. - ASMJIT_INLINE void setInPhysId(uint32_t index) { inPhysId = static_cast(index); } - //! Set the output register index. - ASMJIT_INLINE void setOutPhysId(uint32_t index) { outPhysId = static_cast(index); } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE TiedReg& operator=(const TiedReg& other) { - ::memcpy(this, &other, sizeof(TiedReg)); - return *this; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Pointer to the associated \ref VirtReg. - VirtReg* vreg; - //! Tied flags. - uint32_t flags; - - union { - struct { - //! How many times the variable is used by the instruction/node. - uint8_t refCount; - //! Input register index or `kInvalidReg` if it's not given. - //! - //! Even if the input register index is not given (i.e. it may by any - //! register), register allocator should assign an index that will be - //! used to persist a variable into this specific index. It's helpful - //! in situations where one variable has to be allocated in multiple - //! registers to determine the register which will be persistent. - uint8_t inPhysId; - //! Output register index or `kInvalidReg` if it's not given. - //! - //! Typically `kInvalidReg` if variable is only used on input. - uint8_t outPhysId; - //! \internal - uint8_t reserved; - }; - - //! \internal - //! - //! Packed data #0. - uint32_t packed; - }; - - //! Mandatory input registers. - //! - //! Mandatory input registers are required by the instruction even if - //! there are duplicates. This schema allows us to allocate one variable - //! in one or more register when needed. Required mostly by instructions - //! that have implicit register operands (imul, cpuid, ...) and function - //! call. - uint32_t inRegs; - - //! Allocable input registers. - //! - //! Optional input registers is a mask of all allocable registers for a given - //! variable where we have to pick one of them. This mask is usually not used - //! when _inRegs is set. If both masks are used then the register - //! allocator tries first to find an intersection between these and allocates - //! an extra slot if not found. - uint32_t allocableRegs; -}; - -// ============================================================================ -// [asmjit::RABits] -// ============================================================================ - -//! Fixed size bit-array. -//! -//! Used by variable liveness analysis. -struct RABits { - // -------------------------------------------------------------------------- - // [Enums] - // -------------------------------------------------------------------------- - - enum { - kEntitySize = static_cast(sizeof(uintptr_t)), - kEntityBits = kEntitySize * 8 - }; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE uintptr_t getBit(uint32_t index) const noexcept { - return (data[index / kEntityBits] >> (index % kEntityBits)) & 1; - } - - ASMJIT_INLINE void setBit(uint32_t index) noexcept { - data[index / kEntityBits] |= static_cast(1) << (index % kEntityBits); - } - - ASMJIT_INLINE void delBit(uint32_t index) noexcept { - data[index / kEntityBits] &= ~(static_cast(1) << (index % kEntityBits)); - } - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - //! Copy bits from `s0`, returns `true` if at least one bit is set in `s0`. - ASMJIT_INLINE bool copyBits(const RABits* s0, uint32_t len) noexcept { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t t = s0->data[i]; - data[i] = t; - r |= t; - } - return r != 0; - } - - ASMJIT_INLINE bool addBits(const RABits* s0, uint32_t len) noexcept { - return addBits(this, s0, len); - } - - ASMJIT_INLINE bool addBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t t = s0->data[i] | s1->data[i]; - data[i] = t; - r |= t; - } - return r != 0; - } - - ASMJIT_INLINE bool andBits(const RABits* s1, uint32_t len) noexcept { - return andBits(this, s1, len); - } - - ASMJIT_INLINE bool andBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t t = s0->data[i] & s1->data[i]; - data[i] = t; - r |= t; - } - return r != 0; - } - - ASMJIT_INLINE bool delBits(const RABits* s1, uint32_t len) noexcept { - return delBits(this, s1, len); - } - - ASMJIT_INLINE bool delBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t t = s0->data[i] & ~s1->data[i]; - data[i] = t; - r |= t; - } - return r != 0; - } - - ASMJIT_INLINE bool _addBitsDelSource(RABits* s1, uint32_t len) noexcept { - return _addBitsDelSource(this, s1, len); - } - - ASMJIT_INLINE bool _addBitsDelSource(const RABits* s0, RABits* s1, uint32_t len) noexcept { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t a = s0->data[i]; - uintptr_t b = s1->data[i]; - - this->data[i] = a | b; - b &= ~a; - - s1->data[i] = b; - r |= b; - } - return r != 0; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uintptr_t data[1]; -}; - -// ============================================================================ -// [asmjit::RACell] -// ============================================================================ - -//! Register allocator's (RA) memory cell. -struct RACell { - RACell* next; //!< Next active cell. - int32_t offset; //!< Cell offset, relative to base-offset. - uint32_t size; //!< Cell size. - uint32_t alignment; //!< Cell alignment. -}; - -// ============================================================================ -// [asmjit::RAData] -// ============================================================================ - -//! Register allocator's (RA) data associated with each \ref CBNode. -struct RAData { - ASMJIT_INLINE RAData(uint32_t tiedTotal) noexcept - : liveness(nullptr), - state(nullptr), - tiedTotal(tiedTotal) {} - - RABits* liveness; //!< Liveness bits (populated by liveness-analysis). - RAState* state; //!< Optional saved \ref RAState. - uint32_t tiedTotal; //!< Total count of \ref TiedReg regs. -}; - -// ============================================================================ -// [asmjit::RAState] -// ============================================================================ - -//! Variables' state. -struct RAState {}; - -// ============================================================================ -// [asmjit::RAPass] -// ============================================================================ - -//! \internal -//! -//! Register allocator pipeline used by \ref CodeCompiler. -struct RAPass : public CBPass { -public: - ASMJIT_NONCOPYABLE(RAPass) - - typedef void (ASMJIT_CDECL* TraceNodeFunc)(RAPass* self, CBNode* node_, const char* prefix); - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - RAPass() noexcept; - virtual ~RAPass() noexcept; - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - virtual Error process(Zone* zone) noexcept override; - - //! Run the register allocator for a given function `func`. - virtual Error compile(CCFunc* func) noexcept; - - //! Called by `compile()` to prepare the register allocator to process the - //! given function. It should reset and set-up everything (i.e. no states - //! from a previous compilation should prevail). - virtual Error prepare(CCFunc* func) noexcept; - - //! Called after `compile()` to clean everything up, no matter if it - //! succeeded or failed. - virtual void cleanup() noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the associated `CodeCompiler`. - ASMJIT_INLINE CodeCompiler* cc() const noexcept { return static_cast(_cb); } - - //! Get function. - ASMJIT_INLINE CCFunc* getFunc() const noexcept { return _func; } - //! Get stop node. - ASMJIT_INLINE CBNode* getStop() const noexcept { return _stop; } - - // -------------------------------------------------------------------------- - // [State] - // -------------------------------------------------------------------------- - - //! Get current state. - ASMJIT_INLINE RAState* getState() const { return _state; } - - //! Load current state from `target` state. - virtual void loadState(RAState* src) = 0; - - //! Save current state, returning new `RAState` instance. - virtual RAState* saveState() = 0; - - //! Change the current state to `target` state. - virtual void switchState(RAState* src) = 0; - - //! Change the current state to the intersection of two states `a` and `b`. - virtual void intersectStates(RAState* a, RAState* b) = 0; - - // -------------------------------------------------------------------------- - // [Context] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE Error assignRAId(VirtReg* vreg) noexcept { - // Likely as a single virtual register would be mostly used more than once, - // this means that each virtual register will hit one bad case (doesn't - // have id) and then all likely cases. - if (ASMJIT_LIKELY(vreg->_raId != kInvalidValue)) return kErrorOk; - - uint32_t raId = static_cast(_contextVd.getLength()); - ASMJIT_PROPAGATE(_contextVd.append(&_heap, vreg)); - - vreg->_raId = raId; - return kErrorOk; - } - - // -------------------------------------------------------------------------- - // [Mem] - // -------------------------------------------------------------------------- - - RACell* _newVarCell(VirtReg* vreg); - RACell* _newStackCell(uint32_t size, uint32_t alignment); - - ASMJIT_INLINE RACell* getVarCell(VirtReg* vreg) { - RACell* cell = vreg->getMemCell(); - return cell ? cell : _newVarCell(vreg); - } - - virtual Error resolveCellOffsets(); - - // -------------------------------------------------------------------------- - // [Bits] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE RABits* newBits(uint32_t len) { - return static_cast( - _zone->allocZeroed(static_cast(len) * RABits::kEntitySize)); - } - - ASMJIT_INLINE RABits* copyBits(const RABits* src, uint32_t len) { - return static_cast( - _zone->dup(src, static_cast(len) * RABits::kEntitySize)); - } - - // -------------------------------------------------------------------------- - // [Fetch] - // -------------------------------------------------------------------------- - - //! Fetch. - //! - //! Fetch iterates over all nodes and gathers information about all variables - //! used. The process generates information required by register allocator, - //! variable liveness analysis and translator. - virtual Error fetch() = 0; - - // -------------------------------------------------------------------------- - // [Unreachable Code] - // -------------------------------------------------------------------------- - - //! Add unreachable-flow data to the unreachable flow list. - ASMJIT_INLINE Error addUnreachableNode(CBNode* node) { - ZoneList::Link* link = _zone->allocT::Link>(); - if (!link) return DebugUtils::errored(kErrorNoHeapMemory); - - link->setValue(node); - _unreachableList.append(link); - - return kErrorOk; - } - - //! Remove unreachable code. - virtual Error removeUnreachableCode(); - - // -------------------------------------------------------------------------- - // [Code-Flow] - // -------------------------------------------------------------------------- - - //! Add returning node (i.e. node that returns and where liveness analysis - //! should start). - ASMJIT_INLINE Error addReturningNode(CBNode* node) { - ZoneList::Link* link = _zone->allocT::Link>(); - if (!link) return DebugUtils::errored(kErrorNoHeapMemory); - - link->setValue(node); - _returningList.append(link); - - return kErrorOk; - } - - //! Add jump-flow data to the jcc flow list. - ASMJIT_INLINE Error addJccNode(CBNode* node) { - ZoneList::Link* link = _zone->allocT::Link>(); - if (!link) return DebugUtils::errored(kErrorNoHeapMemory); - - link->setValue(node); - _jccList.append(link); - - return kErrorOk; - } - - // -------------------------------------------------------------------------- - // [Analyze] - // -------------------------------------------------------------------------- - - //! Perform variable liveness analysis. - //! - //! Analysis phase iterates over nodes in reverse order and generates a bit - //! array describing variables that are alive at every node in the function. - //! When the analysis start all variables are assumed dead. When a read or - //! read/write operations of a variable is detected the variable becomes - //! alive; when only write operation is detected the variable becomes dead. - //! - //! When a label is found all jumps to that label are followed and analysis - //! repeats until all variables are resolved. - virtual Error livenessAnalysis(); - - // -------------------------------------------------------------------------- - // [Annotate] - // -------------------------------------------------------------------------- - - virtual Error annotate() = 0; - virtual Error formatInlineComment(StringBuilder& dst, CBNode* node); - - // -------------------------------------------------------------------------- - // [Translate] - // -------------------------------------------------------------------------- - - //! Translate code by allocating registers and handling state changes. - virtual Error translate() = 0; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - Zone* _zone; //!< Zone passed to `process()`. - ZoneHeap _heap; //!< ZoneHeap that uses `_zone`. - - CCFunc* _func; //!< Function being processed. - CBNode* _stop; //!< Stop node. - - //! \internal - //! - //! Offset (how many bytes to add) to `VarMap` to get `TiedReg` array. Used - //! by liveness analysis shared across all backends. This is needed because - //! `VarMap` is a base class for a specialized version that liveness analysis - //! doesn't use, it just needs `TiedReg` array. - uint32_t _varMapToVaListOffset; - - uint8_t _emitComments; //!< Whether to emit comments. - - ZoneList _unreachableList; //!< Unreachable nodes. - ZoneList _returningList; //!< Returning nodes. - ZoneList _jccList; //!< Jump nodes. - - ZoneVector _contextVd; //!< All variables used by the current function. - RACell* _memVarCells; //!< Memory used to spill variables. - RACell* _memStackCells; //!< Memory used to allocate memory on the stack. - - uint32_t _mem1ByteVarsUsed; //!< Count of 1-byte cells. - uint32_t _mem2ByteVarsUsed; //!< Count of 2-byte cells. - uint32_t _mem4ByteVarsUsed; //!< Count of 4-byte cells. - uint32_t _mem8ByteVarsUsed; //!< Count of 8-byte cells. - uint32_t _mem16ByteVarsUsed; //!< Count of 16-byte cells. - uint32_t _mem32ByteVarsUsed; //!< Count of 32-byte cells. - uint32_t _mem64ByteVarsUsed; //!< Count of 64-byte cells. - uint32_t _memStackCellsUsed; //!< Count of stack memory cells. - - uint32_t _memMaxAlign; //!< Maximum memory alignment used by the function. - uint32_t _memVarTotal; //!< Count of bytes used by variables. - uint32_t _memStackTotal; //!< Count of bytes used by stack. - uint32_t _memAllTotal; //!< Count of bytes used by variables and stack after alignment. - - uint32_t _annotationLength; //!< Default length of an annotated instruction. - RAState* _state; //!< Current RA state. -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_COMPILER -#endif // _ASMJIT_BASE_REGALLOC_P_H diff --git a/src/asmjit/base/runtime.cpp b/src/asmjit/base/runtime.cpp deleted file mode 100644 index f074885..0000000 --- a/src/asmjit/base/runtime.cpp +++ /dev/null @@ -1,147 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/assembler.h" -#include "../base/cpuinfo.h" -#include "../base/runtime.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -static ASMJIT_INLINE void hostFlushInstructionCache(const void* p, size_t size) noexcept { - // Only useful on non-x86 architectures. -#if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64 -# if ASMJIT_OS_WINDOWS - // Windows has a built-in support in kernel32.dll. - ::FlushInstructionCache(_memMgr.getProcessHandle(), p, size); -# endif // ASMJIT_OS_WINDOWS -#else - ASMJIT_UNUSED(p); - ASMJIT_UNUSED(size); -#endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64 -} - -static ASMJIT_INLINE uint32_t hostDetectNaturalStackAlignment() noexcept { - // Alignment is assumed to match the pointer-size by default. - uint32_t alignment = sizeof(intptr_t); - - // X86 & X64 - // --------- - // - // - 32-bit X86 requires stack to be aligned to 4 bytes. Modern Linux, Mac - // and UNIX guarantees 16-byte stack alignment even on 32-bit. I'm not - // sure about all other UNIX operating systems, because 16-byte alignment - //! is addition to an older specification. - // - 64-bit X86 requires stack to be aligned to at least 16 bytes. -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - int kIsModernOS = ASMJIT_OS_LINUX || // Linux & ANDROID. - ASMJIT_OS_MAC || // OSX and iOS. - ASMJIT_OS_BSD ; // BSD variants. - alignment = ASMJIT_ARCH_X64 || kIsModernOS ? 16 : 4; -#endif - - // ARM32 & ARM64 - // ------------- - // - // - 32-bit ARM requires stack to be aligned to 8 bytes. - // - 64-bit ARM requires stack to be aligned to 16 bytes. -#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 - alignment = ASMJIT_ARCH_ARM32 ? 8 : 16; -#endif - - return alignment; -} - - -// ============================================================================ -// [asmjit::Runtime - Construction / Destruction] -// ============================================================================ - -Runtime::Runtime() noexcept - : _codeInfo(), - _runtimeType(kRuntimeNone), - _allocType(VMemMgr::kAllocFreeable) {} -Runtime::~Runtime() noexcept {} - -// ============================================================================ -// [asmjit::HostRuntime - Construction / Destruction] -// ============================================================================ - -HostRuntime::HostRuntime() noexcept { - _runtimeType = kRuntimeJit; - - // Setup the CodeInfo of this Runtime. - _codeInfo._archInfo = CpuInfo::getHost().getArchInfo(); - _codeInfo._stackAlignment = static_cast(hostDetectNaturalStackAlignment()); - _codeInfo._cdeclCallConv = CallConv::kIdHostCDecl; - _codeInfo._stdCallConv = CallConv::kIdHostStdCall; - _codeInfo._fastCallConv = CallConv::kIdHostFastCall; -} -HostRuntime::~HostRuntime() noexcept {} - -// ============================================================================ -// [asmjit::HostRuntime - Interface] -// ============================================================================ - -void HostRuntime::flush(const void* p, size_t size) noexcept { - hostFlushInstructionCache(p, size); -} - -// ============================================================================ -// [asmjit::JitRuntime - Construction / Destruction] -// ============================================================================ - -JitRuntime::JitRuntime() noexcept {} -JitRuntime::~JitRuntime() noexcept {} - -// ============================================================================ -// [asmjit::JitRuntime - Interface] -// ============================================================================ - -Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept { - size_t codeSize = code->getCodeSize(); - if (ASMJIT_UNLIKELY(codeSize == 0)) { - *dst = nullptr; - return DebugUtils::errored(kErrorNoCodeGenerated); - } - - void* p = _memMgr.alloc(codeSize, getAllocType()); - if (ASMJIT_UNLIKELY(!p)) { - *dst = nullptr; - return DebugUtils::errored(kErrorNoVirtualMemory); - } - - // Relocate the code and release the unused memory back to `VMemMgr`. - size_t relocSize = code->relocate(p); - if (ASMJIT_UNLIKELY(relocSize == 0)) { - *dst = nullptr; - _memMgr.release(p); - return DebugUtils::errored(kErrorInvalidState); - } - - if (relocSize < codeSize) - _memMgr.shrink(p, relocSize); - - flush(p, relocSize); - *dst = p; - - return kErrorOk; -} - -Error JitRuntime::_release(void* p) noexcept { - return _memMgr.release(p); -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/runtime.h b/src/asmjit/base/runtime.h deleted file mode 100644 index 730b6a8..0000000 --- a/src/asmjit/base/runtime.h +++ /dev/null @@ -1,198 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_RUNTIME_H -#define _ASMJIT_BASE_RUNTIME_H - -// [Dependencies] -#include "../base/codeholder.h" -#include "../base/vmem.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [Forward Declarations] -// ============================================================================ - -class CodeHolder; - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::Runtime] -// ============================================================================ - -//! Base runtime. -class ASMJIT_VIRTAPI Runtime { -public: - ASMJIT_NONCOPYABLE(Runtime) - - ASMJIT_ENUM(RuntimeType) { - kRuntimeNone = 0, - kRuntimeJit = 1, - kRuntimeRemote = 2 - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a `Runtime` instance. - ASMJIT_API Runtime() noexcept; - //! Destroy the `Runtime` instance. - ASMJIT_API virtual ~Runtime() noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get CodeInfo of this runtime. - //! - //! CodeInfo can be used to setup a CodeHolder in case you plan to generate a - //! code compatible and executable by this Runtime. - ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; } - - //! Get the Runtime's architecture type, see \ref ArchInfo::Type. - ASMJIT_INLINE uint32_t getArchType() const noexcept { return _codeInfo.getArchType(); } - //! Get the Runtime's architecture sub-type, see \ref ArchInfo::SubType. - ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _codeInfo.getArchSubType(); } - - //! Get the runtime type, see \ref Type. - ASMJIT_INLINE uint32_t getRuntimeType() const noexcept { return _runtimeType; } - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - // NOTE: To allow passing function pointers to `add()` and `release()` the - // virtual methods are prefixed with `_` and called from templates. - - template - ASMJIT_INLINE Error add(Func* dst, CodeHolder* code) noexcept { - return _add(Internal::ptr_cast(dst), code); - } - - template - ASMJIT_INLINE Error release(Func dst) noexcept { - return _release(Internal::ptr_cast(dst)); - } - - //! Allocate a memory needed for a code stored in the \ref CodeHolder and - //! relocate it to the target location. - //! - //! The beginning of the memory allocated for the function is returned in - //! `dst`. If failed the \ref Error code is returned and `dst` is set to null - //! (this means that you don't have to set it to null before calling `add()`). - virtual Error _add(void** dst, CodeHolder* code) noexcept = 0; - - //! Release `p` allocated by `add()`. - virtual Error _release(void* p) noexcept = 0; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - CodeInfo _codeInfo; //!< Basic information about the Runtime's code. - uint8_t _runtimeType; //!< Type of the runtime. - uint8_t _allocType; //!< Type of the allocator the Runtime uses. - uint8_t _reserved[6]; //!< \internal -}; - -// ============================================================================ -// [asmjit::HostRuntime] -// ============================================================================ - -//! Runtime designed to be used in the same process the code is generated in. -class ASMJIT_VIRTAPI HostRuntime : public Runtime { -public: - ASMJIT_NONCOPYABLE(HostRuntime) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a `HostRuntime` instance. - ASMJIT_API HostRuntime() noexcept; - //! Destroy the `HostRuntime` instance. - ASMJIT_API virtual ~HostRuntime() noexcept; - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - //! Flush an instruction cache. - //! - //! This member function is called after the code has been copied to the - //! destination buffer. It is only useful for JIT code generation as it - //! causes a flush of the processor's cache. - //! - //! Flushing is basically a NOP under X86/X64, but is needed by architectures - //! that do not have a transparent instruction cache like ARM. - //! - //! This function can also be overridden to improve compatibility with tools - //! such as Valgrind, however, it's not an official part of AsmJit. - ASMJIT_API virtual void flush(const void* p, size_t size) noexcept; -}; - -// ============================================================================ -// [asmjit::JitRuntime] -// ============================================================================ - -//! Runtime designed to store and execute code generated at runtime (JIT). -class ASMJIT_VIRTAPI JitRuntime : public HostRuntime { -public: - ASMJIT_NONCOPYABLE(JitRuntime) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a `JitRuntime` instance. - ASMJIT_API JitRuntime() noexcept; - //! Destroy the `JitRuntime` instance. - ASMJIT_API virtual ~JitRuntime() noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the type of allocation. - ASMJIT_INLINE uint32_t getAllocType() const noexcept { return _allocType; } - //! Set the type of allocation. - ASMJIT_INLINE void setAllocType(uint32_t allocType) noexcept { _allocType = allocType; } - - //! Get the virtual memory manager. - ASMJIT_INLINE VMemMgr* getMemMgr() const noexcept { return const_cast(&_memMgr); } - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - ASMJIT_API Error _add(void** dst, CodeHolder* code) noexcept override; - ASMJIT_API Error _release(void* p) noexcept override; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Virtual memory manager. - VMemMgr _memMgr; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_RUNTIME_H diff --git a/src/asmjit/base/string.cpp b/src/asmjit/base/string.cpp deleted file mode 100644 index 4d0a837..0000000 --- a/src/asmjit/base/string.cpp +++ /dev/null @@ -1,353 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/string.h" -#include "../base/utils.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::StringBuilder - Construction / Destruction] -// ============================================================================ - -// Should be placed in read-only memory. -static const char StringBuilder_empty[4] = { 0 }; - -StringBuilder::StringBuilder() noexcept - : _data(const_cast(StringBuilder_empty)), - _length(0), - _capacity(0), - _canFree(false) {} - -StringBuilder::~StringBuilder() noexcept { - if (_canFree) - Internal::releaseMemory(_data); -} - -// ============================================================================ -// [asmjit::StringBuilder - Prepare / Reserve] -// ============================================================================ - -ASMJIT_FAVOR_SIZE char* StringBuilder::prepare(uint32_t op, size_t len) noexcept { - if (op == kStringOpSet) { - // We don't care here, but we can't return a null pointer since it indicates - // failure in memory allocation. - if (len == 0) { - if (_data != StringBuilder_empty) - _data[0] = 0; - - _length = 0; - return _data; - } - - if (_capacity < len) { - if (len >= IntTraits::maxValue() - sizeof(intptr_t) * 2) - return nullptr; - - size_t to = Utils::alignTo(len, sizeof(intptr_t)); - if (to < 256 - sizeof(intptr_t)) - to = 256 - sizeof(intptr_t); - - char* newData = static_cast(Internal::allocMemory(to + sizeof(intptr_t))); - if (!newData) { - clear(); - return nullptr; - } - - if (_canFree) - Internal::releaseMemory(_data); - - _data = newData; - _capacity = to + sizeof(intptr_t) - 1; - _canFree = true; - } - - _data[len] = 0; - _length = len; - - ASMJIT_ASSERT(_length <= _capacity); - return _data; - } - else { - // We don't care here, but we can't return a null pointer since it indicates - // failure of memory allocation. - if (len == 0) - return _data + _length; - - // Overflow. - if (IntTraits::maxValue() - sizeof(intptr_t) * 2 - _length < len) - return nullptr; - - size_t after = _length + len; - if (_capacity < after) { - size_t to = _capacity; - - if (to < 256) - to = 256; - - while (to < 1024 * 1024 && to < after) - to *= 2; - - if (to < after) { - to = after; - if (to < (IntTraits::maxValue() - 1024 * 32)) - to = Utils::alignTo(to, 1024 * 32); - } - - to = Utils::alignTo(to, sizeof(intptr_t)); - char* newData = static_cast(Internal::allocMemory(to + sizeof(intptr_t))); - if (!newData) return nullptr; - - ::memcpy(newData, _data, _length); - if (_canFree) - Internal::releaseMemory(_data); - - _data = newData; - _capacity = to + sizeof(intptr_t) - 1; - _canFree = true; - } - - char* ret = _data + _length; - _data[after] = 0; - _length = after; - - ASMJIT_ASSERT(_length <= _capacity); - return ret; - } -} - -ASMJIT_FAVOR_SIZE Error StringBuilder::reserve(size_t to) noexcept { - if (_capacity >= to) - return kErrorOk; - - if (to >= IntTraits::maxValue() - sizeof(intptr_t) * 2) - return DebugUtils::errored(kErrorNoHeapMemory); - - to = Utils::alignTo(to, sizeof(intptr_t)); - char* newData = static_cast(Internal::allocMemory(to + sizeof(intptr_t))); - - if (!newData) - return DebugUtils::errored(kErrorNoHeapMemory); - - ::memcpy(newData, _data, _length + 1); - if (_canFree) - Internal::releaseMemory(_data); - - _data = newData; - _capacity = to + sizeof(intptr_t) - 1; - _canFree = true; - return kErrorOk; -} - -// ============================================================================ -// [asmjit::StringBuilder - Clear] -// ============================================================================ - -void StringBuilder::clear() noexcept { - if (_data != StringBuilder_empty) - _data[0] = 0; - _length = 0; -} - -// ============================================================================ -// [asmjit::StringBuilder - Methods] -// ============================================================================ - -Error StringBuilder::_opString(uint32_t op, const char* str, size_t len) noexcept { - if (len == Globals::kInvalidIndex) - len = str ? ::strlen(str) : static_cast(0); - - char* p = prepare(op, len); - if (!p) return DebugUtils::errored(kErrorNoHeapMemory); - - ::memcpy(p, str, len); - return kErrorOk; -} - -Error StringBuilder::_opChar(uint32_t op, char c) noexcept { - char* p = prepare(op, 1); - if (!p) return DebugUtils::errored(kErrorNoHeapMemory); - - *p = c; - return kErrorOk; -} - -Error StringBuilder::_opChars(uint32_t op, char c, size_t n) noexcept { - char* p = prepare(op, n); - if (!p) return DebugUtils::errored(kErrorNoHeapMemory); - - ::memset(p, c, n); - return kErrorOk; -} - -static const char StringBuilder_numbers[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - -Error StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept { - if (base < 2 || base > 36) - base = 10; - - char buf[128]; - char* p = buf + ASMJIT_ARRAY_SIZE(buf); - - uint64_t orig = i; - char sign = '\0'; - - // -------------------------------------------------------------------------- - // [Sign] - // -------------------------------------------------------------------------- - - if ((flags & kStringFormatSigned) != 0 && static_cast(i) < 0) { - i = static_cast(-static_cast(i)); - sign = '-'; - } - else if ((flags & kStringFormatShowSign) != 0) { - sign = '+'; - } - else if ((flags & kStringFormatShowSpace) != 0) { - sign = ' '; - } - - // -------------------------------------------------------------------------- - // [Number] - // -------------------------------------------------------------------------- - - do { - uint64_t d = i / base; - uint64_t r = i % base; - - *--p = StringBuilder_numbers[r]; - i = d; - } while (i); - - size_t numberLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p); - - // -------------------------------------------------------------------------- - // [Alternate Form] - // -------------------------------------------------------------------------- - - if ((flags & kStringFormatAlternate) != 0) { - if (base == 8) { - if (orig != 0) - *--p = '0'; - } - if (base == 16) { - *--p = 'x'; - *--p = '0'; - } - } - - // -------------------------------------------------------------------------- - // [Width] - // -------------------------------------------------------------------------- - - if (sign != 0) - *--p = sign; - - if (width > 256) - width = 256; - - if (width <= numberLength) - width = 0; - else - width -= numberLength; - - // -------------------------------------------------------------------------- - // Write] - // -------------------------------------------------------------------------- - - size_t prefixLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberLength; - char* data = prepare(op, prefixLength + width + numberLength); - - if (!data) - return DebugUtils::errored(kErrorNoHeapMemory); - - ::memcpy(data, p, prefixLength); - data += prefixLength; - - ::memset(data, '0', width); - data += width; - - ::memcpy(data, p + prefixLength, numberLength); - return kErrorOk; -} - -Error StringBuilder::_opHex(uint32_t op, const void* data, size_t len) noexcept { - char* dst; - - if (len >= IntTraits::maxValue() / 2 || !(dst = prepare(op, len * 2))) - return DebugUtils::errored(kErrorNoHeapMemory);; - - const char* src = static_cast(data); - for (size_t i = 0; i < len; i++, dst += 2, src++) { - dst[0] = StringBuilder_numbers[(src[0] >> 4) & 0xF]; - dst[1] = StringBuilder_numbers[(src[0] ) & 0xF]; - } - return kErrorOk; -} - -Error StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept { - char buf[1024]; - - vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); - buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; - - return _opString(op, buf); -} - -Error StringBuilder::setFormat(const char* fmt, ...) noexcept { - bool result; - - va_list ap; - va_start(ap, fmt); - result = _opVFormat(kStringOpSet, fmt, ap); - va_end(ap); - - return result; -} - -Error StringBuilder::appendFormat(const char* fmt, ...) noexcept { - bool result; - - va_list ap; - va_start(ap, fmt); - result = _opVFormat(kStringOpAppend, fmt, ap); - va_end(ap); - - return result; -} - -bool StringBuilder::eq(const char* str, size_t len) const noexcept { - const char* aData = _data; - const char* bData = str; - - size_t aLength = _length; - size_t bLength = len; - - if (bLength == Globals::kInvalidIndex) { - size_t i; - for (i = 0; i < aLength; i++) - if (aData[i] != bData[i] || bData[i] == 0) - return false; - return bData[i] == 0; - } - else { - if (aLength != bLength) - return false; - return ::memcmp(aData, bData, aLength) == 0; - } -} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/string.h b/src/asmjit/base/string.h deleted file mode 100644 index 8d1ef16..0000000 --- a/src/asmjit/base/string.h +++ /dev/null @@ -1,289 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_STRING_H -#define _ASMJIT_BASE_STRING_H - -// [Dependencies] -#include "../base/globals.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::SmallString] -// ============================================================================ - -//! Small string is a template that helps to create strings that can be either -//! statically allocated if they are small, or externally allocated in case -//! their length exceed the limit. The `WholeSize` represents the size of the -//! whole `SmallString` structure, based on that size the maximum size of the -//! internal buffer is determined. -template -class SmallString { -public: - enum { kMaxEmbeddedLength = WholeSize - 5 }; - - ASMJIT_INLINE SmallString() noexcept { reset(); } - ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); } - - ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; } - ASMJIT_INLINE bool isEmbedded() const noexcept { return _length <= kMaxEmbeddedLength; } - ASMJIT_INLINE bool mustEmbed(size_t len) const noexcept { return len <= kMaxEmbeddedLength; } - - ASMJIT_INLINE uint32_t getLength() const noexcept { return _length; } - ASMJIT_INLINE char* getData() const noexcept { - return _length <= kMaxEmbeddedLength ? const_cast(_embedded) : _external[1]; - } - - ASMJIT_INLINE void setEmbedded(const char* data, size_t len) noexcept { - ASMJIT_ASSERT(len <= kMaxEmbeddedLength); - - _length = static_cast(len); - ::memcpy(_embedded, data, len); - _embedded[len] = '\0'; - } - - ASMJIT_INLINE void setExternal(const char* data, size_t len) noexcept { - ASMJIT_ASSERT(len > kMaxEmbeddedLength); - ASMJIT_ASSERT(len <= ~static_cast(0)); - - _length = static_cast(len); - _external[1] = const_cast(data); - } - - union { - struct { - uint32_t _length; - char _embedded[WholeSize - 4]; - }; - char* _external[2]; - }; -}; - -// ============================================================================ -// [asmjit::StringBuilder] -// ============================================================================ - -//! String builder. -//! -//! String builder was designed to be able to build a string using append like -//! operation to append numbers, other strings, or signle characters. It can -//! allocate it's own buffer or use a buffer created on the stack. -//! -//! String builder contains method specific to AsmJit functionality, used for -//! logging or HTML output. -class StringBuilder { -public: - ASMJIT_NONCOPYABLE(StringBuilder) - - //! \internal - //! - //! String operation. - ASMJIT_ENUM(OpType) { - kStringOpSet = 0, //!< Replace the current string by a given content. - kStringOpAppend = 1 //!< Append a given content to the current string. - }; - - //! \internal - //! - //! String format flags. - ASMJIT_ENUM(StringFormatFlags) { - kStringFormatShowSign = 0x00000001, - kStringFormatShowSpace = 0x00000002, - kStringFormatAlternate = 0x00000004, - kStringFormatSigned = 0x80000000 - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_API StringBuilder() noexcept; - ASMJIT_API ~StringBuilder() noexcept; - - ASMJIT_INLINE StringBuilder(const _NoInit&) noexcept {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get string builder capacity. - ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; } - //! Get length. - ASMJIT_INLINE size_t getLength() const noexcept { return _length; } - - //! Get null-terminated string data. - ASMJIT_INLINE char* getData() noexcept { return _data; } - //! Get null-terminated string data (const). - ASMJIT_INLINE const char* getData() const noexcept { return _data; } - - // -------------------------------------------------------------------------- - // [Prepare / Reserve] - // -------------------------------------------------------------------------- - - //! Prepare to set/append. - ASMJIT_API char* prepare(uint32_t op, size_t len) noexcept; - - //! Reserve `to` bytes in string builder. - ASMJIT_API Error reserve(size_t to) noexcept; - - // -------------------------------------------------------------------------- - // [Clear] - // -------------------------------------------------------------------------- - - //! Clear the content in String builder. - ASMJIT_API void clear() noexcept; - - // -------------------------------------------------------------------------- - // [Op] - // -------------------------------------------------------------------------- - - ASMJIT_API Error _opString(uint32_t op, const char* str, size_t len = Globals::kInvalidIndex) noexcept; - ASMJIT_API Error _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept; - ASMJIT_API Error _opChar(uint32_t op, char c) noexcept; - ASMJIT_API Error _opChars(uint32_t op, char c, size_t n) noexcept; - ASMJIT_API Error _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept; - ASMJIT_API Error _opHex(uint32_t op, const void* data, size_t len) noexcept; - - // -------------------------------------------------------------------------- - // [Set] - // -------------------------------------------------------------------------- - - //! Replace the current string with `str` having `len` characters (or `kInvalidIndex` if it's null terminated). - ASMJIT_INLINE Error setString(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _opString(kStringOpSet, str, len); } - //! Replace the current content by a formatted string `fmt`. - ASMJIT_API Error setFormat(const char* fmt, ...) noexcept; - //! Replace the current content by a formatted string `fmt` (va_list version). - ASMJIT_INLINE Error setFormatVA(const char* fmt, va_list ap) noexcept { return _opVFormat(kStringOpSet, fmt, ap); } - - //! Replace the current content by a single `c` character. - ASMJIT_INLINE Error setChar(char c) noexcept { return _opChar(kStringOpSet, c); } - //! Replace the current content by `c` character `n` times. - ASMJIT_INLINE Error setChars(char c, size_t n) noexcept { return _opChars(kStringOpSet, c, n); } - - //! Replace the current content by a formatted integer `i` (signed). - ASMJIT_INLINE Error setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept { - return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned); - } - - //! Replace the current content by a formatted integer `i` (unsigned). - ASMJIT_INLINE Error setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept { - return _opNumber(kStringOpSet, i, base, width, flags); - } - - //! Replace the current content by the given `data` converted to a HEX string. - ASMJIT_INLINE Error setHex(const void* data, size_t len) noexcept { - return _opHex(kStringOpSet, data, len); - } - - // -------------------------------------------------------------------------- - // [Append] - // -------------------------------------------------------------------------- - - //! Append string `str` having `len` characters (or `kInvalidIndex` if it's null terminated). - ASMJIT_INLINE Error appendString(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _opString(kStringOpAppend, str, len); } - //! Append a formatted string `fmt`. - ASMJIT_API Error appendFormat(const char* fmt, ...) noexcept; - //! Append a formatted string `fmt` (va_list version). - ASMJIT_INLINE Error appendFormatVA(const char* fmt, va_list ap) noexcept { return _opVFormat(kStringOpAppend, fmt, ap); } - - //! Append a single `c` character. - ASMJIT_INLINE Error appendChar(char c) noexcept { return _opChar(kStringOpAppend, c); } - //! Append `c` character `n` times. - ASMJIT_INLINE Error appendChars(char c, size_t n) noexcept { return _opChars(kStringOpAppend, c, n); } - - //! Append `i`. - ASMJIT_INLINE Error appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept { - return _opNumber(kStringOpAppend, static_cast(i), base, width, flags | kStringFormatSigned); - } - - //! Append `i`. - ASMJIT_INLINE Error appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept { - return _opNumber(kStringOpAppend, i, base, width, flags); - } - - //! Append the given `data` converted to a HEX string. - ASMJIT_INLINE Error appendHex(const void* data, size_t len) noexcept { - return _opHex(kStringOpAppend, data, len); - } - - // -------------------------------------------------------------------------- - // [Eq] - // -------------------------------------------------------------------------- - - //! Check for equality with other `str` of length `len`. - ASMJIT_API bool eq(const char* str, size_t len = Globals::kInvalidIndex) const noexcept; - //! Check for equality with `other`. - ASMJIT_INLINE bool eq(const StringBuilder& other) const noexcept { return eq(other._data, other._length); } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool operator==(const StringBuilder& other) const noexcept { return eq(other); } - ASMJIT_INLINE bool operator!=(const StringBuilder& other) const noexcept { return !eq(other); } - - ASMJIT_INLINE bool operator==(const char* str) const noexcept { return eq(str); } - ASMJIT_INLINE bool operator!=(const char* str) const noexcept { return !eq(str); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - char* _data; //!< String data. - size_t _length; //!< String length. - size_t _capacity; //!< String capacity. - size_t _canFree; //!< If the string data can be freed. -}; - -// ============================================================================ -// [asmjit::StringBuilderTmp] -// ============================================================================ - -//! Temporary string builder, has statically allocated `N` bytes. -template -class StringBuilderTmp : public StringBuilder { -public: - ASMJIT_NONCOPYABLE(StringBuilderTmp) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE StringBuilderTmp() noexcept : StringBuilder(NoInit) { - _data = _embeddedData; - _data[0] = 0; - - _length = 0; - _capacity = N; - _canFree = false; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Embedded data. - char _embeddedData[static_cast( - N + 1 + sizeof(intptr_t)) & ~static_cast(sizeof(intptr_t) - 1)]; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_STRING_H diff --git a/src/asmjit/base/utils.cpp b/src/asmjit/base/utils.cpp deleted file mode 100644 index 91e0170..0000000 --- a/src/asmjit/base/utils.cpp +++ /dev/null @@ -1,176 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/utils.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::Utils - Unit] -// ============================================================================ - -#if defined(ASMJIT_TEST) -UNIT(base_utils) { - uint32_t i; - - INFO("IntTraits<>"); - EXPECT(IntTraits::kIsSigned,"IntTraits should report signed"); - EXPECT(IntTraits::kIsSigned, "IntTraits should report signed"); - EXPECT(IntTraits::kIsSigned, "IntTraits should report signed"); - EXPECT(IntTraits::kIsSigned, "IntTraits should report signed"); - - EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned"); - EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned"); - EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned"); - EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned"); - - EXPECT(IntTraits::kIsSigned, "IntTraits should report signed"); - EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned"); - - EXPECT(IntTraits::kIsIntPtr, "IntTraits should report intptr_t type"); - EXPECT(IntTraits::kIsIntPtr, "IntTraits should report intptr_t type"); - - INFO("Utils::inInterval()"); - EXPECT(Utils::inInterval(11 , 10, 20) == true , "Utils::inInterval should return true if inside"); - EXPECT(Utils::inInterval(101, 10, 20) == false, "Utils::inInterval should return false if outside"); - - INFO("Utils::isInt8()"); - EXPECT(Utils::isInt8(-128) == true , "Utils::isInt8<> should return true if inside"); - EXPECT(Utils::isInt8( 127) == true , "Utils::isInt8<> should return true if inside"); - EXPECT(Utils::isInt8(-129) == false, "Utils::isInt8<> should return false if outside"); - EXPECT(Utils::isInt8( 128) == false, "Utils::isInt8<> should return false if outside"); - - INFO("Utils::isInt16()"); - EXPECT(Utils::isInt16(-32768) == true , "Utils::isInt16<> should return true if inside"); - EXPECT(Utils::isInt16( 32767) == true , "Utils::isInt16<> should return true if inside"); - EXPECT(Utils::isInt16(-32769) == false, "Utils::isInt16<> should return false if outside"); - EXPECT(Utils::isInt16( 32768) == false, "Utils::isInt16<> should return false if outside"); - - INFO("Utils::isInt32()"); - EXPECT(Utils::isInt32( 2147483647 ) == true, "Utils::isInt32 should return true if inside"); - EXPECT(Utils::isInt32(-2147483647 - 1) == true, "Utils::isInt32 should return true if inside"); - EXPECT(Utils::isInt32(ASMJIT_UINT64_C(2147483648)) == false, "Utils::isInt32 should return false if outside"); - EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == false, "Utils::isInt32 should return false if outside"); - EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isInt32 should return false if outside"); - - INFO("Utils::isUInt8()"); - EXPECT(Utils::isUInt8(0) == true , "Utils::isUInt8<> should return true if inside"); - EXPECT(Utils::isUInt8(255) == true , "Utils::isUInt8<> should return true if inside"); - EXPECT(Utils::isUInt8(256) == false, "Utils::isUInt8<> should return false if outside"); - EXPECT(Utils::isUInt8(-1) == false, "Utils::isUInt8<> should return false if negative"); - - INFO("Utils::isUInt12()"); - EXPECT(Utils::isUInt12(0) == true , "Utils::isUInt12<> should return true if inside"); - EXPECT(Utils::isUInt12(4095) == true , "Utils::isUInt12<> should return true if inside"); - EXPECT(Utils::isUInt12(4096) == false, "Utils::isUInt12<> should return false if outside"); - EXPECT(Utils::isUInt12(-1) == false, "Utils::isUInt12<> should return false if negative"); - - INFO("Utils::isUInt16()"); - EXPECT(Utils::isUInt16(0) == true , "Utils::isUInt16<> should return true if inside"); - EXPECT(Utils::isUInt16(65535) == true , "Utils::isUInt16<> should return true if inside"); - EXPECT(Utils::isUInt16(65536) == false, "Utils::isUInt16<> should return false if outside"); - EXPECT(Utils::isUInt16(-1) == false, "Utils::isUInt16<> should return false if negative"); - - INFO("Utils::isUInt32()"); - EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == true, "Utils::isUInt32 should return true if inside"); - EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isUInt32 should return false if outside"); - EXPECT(Utils::isUInt32(-1) == false, "Utils::isUInt32 should return false if negative"); - - INFO("Utils::isPower2()"); - for (i = 0; i < 64; i++) { - EXPECT(Utils::isPowerOf2(static_cast(1) << i) == true, - "Utils::isPower2() didn't report power of 2"); - EXPECT(Utils::isPowerOf2((static_cast(1) << i) ^ 0x001101) == false, - "Utils::isPower2() didn't report not power of 2"); - } - - INFO("Utils::mask()"); - for (i = 0; i < 32; i++) { - EXPECT(Utils::mask(i) == (1 << i), - "Utils::mask(%u) should return %X", i, (1 << i)); - } - - INFO("Utils::bits()"); - for (i = 0; i < 32; i++) { - uint32_t expectedBits = 0; - - for (uint32_t b = 0; b < i; b++) - expectedBits |= static_cast(1) << b; - - EXPECT(Utils::bits(i) == expectedBits, - "Utils::bits(%u) should return %X", i, expectedBits); - } - - INFO("Utils::hasBit()"); - for (i = 0; i < 32; i++) { - EXPECT(Utils::hasBit((1 << i), i) == true, - "Utils::hasBit(%X, %u) should return true", (1 << i), i); - } - - INFO("Utils::bitCount()"); - for (i = 0; i < 32; i++) { - EXPECT(Utils::bitCount((1 << i)) == 1, - "Utils::bitCount(%X) should return true", (1 << i)); - } - EXPECT(Utils::bitCount(0x000000F0) == 4, ""); - EXPECT(Utils::bitCount(0x10101010) == 4, ""); - EXPECT(Utils::bitCount(0xFF000000) == 8, ""); - EXPECT(Utils::bitCount(0xFFFFFFF7) == 31, ""); - EXPECT(Utils::bitCount(0x7FFFFFFF) == 31, ""); - - INFO("Utils::findFirstBit()"); - for (i = 0; i < 32; i++) { - EXPECT(Utils::findFirstBit((1 << i)) == i, - "Utils::findFirstBit(%X) should return %u", (1 << i), i); - } - - INFO("Utils::keepNOnesFromRight()"); - EXPECT(Utils::keepNOnesFromRight(0xF, 1) == 0x1, ""); - EXPECT(Utils::keepNOnesFromRight(0xF, 2) == 0x3, ""); - EXPECT(Utils::keepNOnesFromRight(0xF, 3) == 0x7, ""); - EXPECT(Utils::keepNOnesFromRight(0x5, 2) == 0x5, ""); - EXPECT(Utils::keepNOnesFromRight(0xD, 2) == 0x5, ""); - - INFO("Utils::isAligned()"); - EXPECT(Utils::isAligned(0xFFFF, 4) == false, ""); - EXPECT(Utils::isAligned(0xFFF4, 4) == true , ""); - EXPECT(Utils::isAligned(0xFFF8, 8) == true , ""); - EXPECT(Utils::isAligned(0xFFF0, 16) == true , ""); - - INFO("Utils::alignTo()"); - EXPECT(Utils::alignTo(0xFFFF, 4) == 0x10000, ""); - EXPECT(Utils::alignTo(0xFFF4, 4) == 0x0FFF4, ""); - EXPECT(Utils::alignTo(0xFFF8, 8) == 0x0FFF8, ""); - EXPECT(Utils::alignTo(0xFFF0, 16) == 0x0FFF0, ""); - EXPECT(Utils::alignTo(0xFFF0, 32) == 0x10000, ""); - - INFO("Utils::alignToPowerOf2()"); - EXPECT(Utils::alignToPowerOf2(0xFFFF) == 0x10000, ""); - EXPECT(Utils::alignToPowerOf2(0xF123) == 0x10000, ""); - EXPECT(Utils::alignToPowerOf2(0x0F00) == 0x01000, ""); - EXPECT(Utils::alignToPowerOf2(0x0100) == 0x00100, ""); - EXPECT(Utils::alignToPowerOf2(0x1001) == 0x02000, ""); - - INFO("Utils::alignDiff()"); - EXPECT(Utils::alignDiff(0xFFFF, 4) == 1, ""); - EXPECT(Utils::alignDiff(0xFFF4, 4) == 0, ""); - EXPECT(Utils::alignDiff(0xFFF8, 8) == 0, ""); - EXPECT(Utils::alignDiff(0xFFF0, 16) == 0, ""); - EXPECT(Utils::alignDiff(0xFFF0, 32) == 16, ""); -} -#endif // ASMJIT_TEST - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/utils.h b/src/asmjit/base/utils.h deleted file mode 100644 index b926cea..0000000 --- a/src/asmjit/base/utils.h +++ /dev/null @@ -1,1358 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_UTILS_H -#define _ASMJIT_BASE_UTILS_H - -// [Dependencies] -#include "../base/globals.h" - -#if ASMJIT_CC_MSC_GE(14, 0, 0) -# include -#endif // ASMJIT_OS_WINDOWS - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::IntTraits] -// ============================================================================ - -//! \internal -//! \{ -template -struct IntTraitsPrivate {}; // Let it fail if not specialized! - -template<> struct IntTraitsPrivate<1, 0> { typedef int IntType; typedef int8_t SignedType; typedef uint8_t UnsignedType; }; -template<> struct IntTraitsPrivate<1, 1> { typedef int IntType; typedef int8_t SignedType; typedef uint8_t UnsignedType; }; - -template<> struct IntTraitsPrivate<2, 0> { typedef int IntType; typedef int16_t SignedType; typedef uint16_t UnsignedType; }; -template<> struct IntTraitsPrivate<2, 1> { typedef int IntType; typedef int16_t SignedType; typedef uint16_t UnsignedType; }; - -template<> struct IntTraitsPrivate<4, 0> { typedef int64_t IntType; typedef int32_t SignedType; typedef uint32_t UnsignedType; }; -template<> struct IntTraitsPrivate<4, 1> { typedef int IntType; typedef int32_t SignedType; typedef uint32_t UnsignedType; }; - -template<> struct IntTraitsPrivate<8, 0> { typedef int64_t IntType; typedef int64_t SignedType; typedef uint64_t UnsignedType; }; -template<> struct IntTraitsPrivate<8, 1> { typedef int64_t IntType; typedef int64_t SignedType; typedef uint64_t UnsignedType; }; - -//! \internal -template -struct IntTraits { - enum { - kIsSigned = static_cast(~static_cast(0)) < static_cast(0), - kIsUnsigned = !kIsSigned, - kIs8Bit = sizeof(T) == 1, - kIs16Bit = sizeof(T) == 2, - kIs32Bit = sizeof(T) == 4, - kIs64Bit = sizeof(T) == 8, - kIsIntPtr = sizeof(T) == sizeof(intptr_t) - }; - - typedef typename IntTraitsPrivate::IntType IntType; - typedef typename IntTraitsPrivate::SignedType SignedType; - typedef typename IntTraitsPrivate::UnsignedType UnsignedType; - - //! Get a minimum value of `T`. - static ASMJIT_INLINE T minValue() noexcept { - return kIsSigned ? T((~static_cast(0) >> 1) + static_cast(1)) : T(0); - } - - //! Get a maximum value of `T`. - static ASMJIT_INLINE T maxValue() noexcept { - return kIsSigned ? T(~static_cast(0) >> 1) : ~T(0); - } -}; - -//! \} - -// ============================================================================ -// [asmjit::Utils] -// ============================================================================ - -//! AsmJit utilities - integer, string, etc... -namespace Utils { - // -------------------------------------------------------------------------- - // [Float <-> Int] - // -------------------------------------------------------------------------- - - //! \internal - union FloatBits { - int32_t i; - float f; - }; - - //! \internal - union DoubleBits { - int64_t i; - double d; - }; - - //! Bit-cast `float` to a 32-bit integer. - static ASMJIT_INLINE int32_t floatAsInt(float f) noexcept { FloatBits m; m.f = f; return m.i; } - //! Bit-cast 32-bit integer to `float`. - static ASMJIT_INLINE float intAsFloat(int32_t i) noexcept { FloatBits m; m.i = i; return m.f; } - - //! Bit-cast `double` to a 64-bit integer. - static ASMJIT_INLINE int64_t doubleAsInt(double d) noexcept { DoubleBits m; m.d = d; return m.i; } - //! Bit-cast 64-bit integer to `double`. - static ASMJIT_INLINE double intAsDouble(int64_t i) noexcept { DoubleBits m; m.i = i; return m.d; } - - // -------------------------------------------------------------------------- - // [Pack / Unpack] - // -------------------------------------------------------------------------- - - //! Pack four 8-bit integer into a 32-bit integer as it is an array of `{b0,b1,b2,b3}`. - static ASMJIT_INLINE uint32_t pack32_4x8(uint32_t b0, uint32_t b1, uint32_t b2, uint32_t b3) noexcept { - return ASMJIT_PACK32_4x8(b0, b1, b2, b3); - } - - //! Pack two 32-bit integer into a 64-bit integer as it is an array of `{u0,u1}`. - static ASMJIT_INLINE uint64_t pack64_2x32(uint32_t u0, uint32_t u1) noexcept { - return ASMJIT_ARCH_LE ? (static_cast(u1) << 32) + u0 - : (static_cast(u0) << 32) + u1; - } - - // -------------------------------------------------------------------------- - // [Position of byte (in bit-shift)] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE uint32_t byteShiftOfDWordStruct(uint32_t index) noexcept { - if (ASMJIT_ARCH_LE) - return index * 8; - else - return (sizeof(uint32_t) - 1 - index) * 8; - } - - // -------------------------------------------------------------------------- - // [Lower/Upper] - // -------------------------------------------------------------------------- - - template - static ASMJIT_INLINE T toLower(T c) noexcept { return c ^ (static_cast(c >= T('A') && c <= T('Z')) << 5); } - template - static ASMJIT_INLINE T toUpper(T c) noexcept { return c ^ (static_cast(c >= T('a') && c <= T('z')) << 5); } - - // -------------------------------------------------------------------------- - // [Hash] - // -------------------------------------------------------------------------- - - // \internal - static ASMJIT_INLINE uint32_t hashRound(uint32_t hash, uint32_t c) noexcept { return hash * 65599 + c; } - - // Get a hash of the given string `str` of `len` length. Length must be valid - // as this function doesn't check for a null terminator and allows it in the - // middle of the string. - static ASMJIT_INLINE uint32_t hashString(const char* str, size_t len) noexcept { - uint32_t hVal = 0; - for (uint32_t i = 0; i < len; i++) - hVal = hashRound(hVal, str[i]); - return hVal; - } - - // -------------------------------------------------------------------------- - // [Swap] - // -------------------------------------------------------------------------- - - template - static ASMJIT_INLINE void swap(T& a, T& b) noexcept { - T tmp = a; - a = b; - b = tmp; - } - - // -------------------------------------------------------------------------- - // [InInterval] - // -------------------------------------------------------------------------- - - //! Get whether `x` is greater than or equal to `a` and lesses than or equal to `b`. - template - static ASMJIT_INLINE bool inInterval(T x, T a, T b) noexcept { - return x >= a && x <= b; - } - - // -------------------------------------------------------------------------- - // [AsInt] - // -------------------------------------------------------------------------- - - //! Map an integer `x` of type `T` to `int` or `int64_t` depending on the - //! type. Used internally by AsmJit to dispatch arguments that can be of - //! arbitrary integer type into a function argument that is either `int` or - //! `int64_t`. - template - static ASMJIT_INLINE typename IntTraits::IntType asInt(T x) noexcept { - return static_cast::IntType>(x); - } - - // -------------------------------------------------------------------------- - // [IsInt / IsUInt] - // -------------------------------------------------------------------------- - - //! Get whether the given integer `x` can be casted to a 4-bit signed integer. - template - static ASMJIT_INLINE bool isInt4(T x) noexcept { - typedef typename IntTraits::SignedType SignedType; - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return inInterval(SignedType(x), -8, 7); - else - return UnsignedType(x) <= UnsignedType(7U); - } - - //! Get whether the given integer `x` can be casted to an 8-bit signed integer. - template - static ASMJIT_INLINE bool isInt8(T x) noexcept { - typedef typename IntTraits::SignedType SignedType; - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return sizeof(T) <= 1 || inInterval(SignedType(x), -128, 127); - else - return UnsignedType(x) <= UnsignedType(127U); - } - - //! Get whether the given integer `x` can be casted to a 16-bit signed integer. - template - static ASMJIT_INLINE bool isInt16(T x) noexcept { - typedef typename IntTraits::SignedType SignedType; - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return sizeof(T) <= 2 || inInterval(SignedType(x), -32768, 32767); - else - return sizeof(T) <= 1 || UnsignedType(x) <= UnsignedType(32767U); - } - - //! Get whether the given integer `x` can be casted to a 32-bit signed integer. - template - static ASMJIT_INLINE bool isInt32(T x) noexcept { - typedef typename IntTraits::SignedType SignedType; - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return sizeof(T) <= 4 || inInterval(SignedType(x), -2147483647 - 1, 2147483647); - else - return sizeof(T) <= 2 || UnsignedType(x) <= UnsignedType(2147483647U); - } - - //! Get whether the given integer `x` can be casted to a 4-bit unsigned integer. - template - static ASMJIT_INLINE bool isUInt4(T x) noexcept { - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return x >= T(0) && x <= T(15); - else - return UnsignedType(x) <= UnsignedType(15U); - } - - //! Get whether the given integer `x` can be casted to an 8-bit unsigned integer. - template - static ASMJIT_INLINE bool isUInt8(T x) noexcept { - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return x >= T(0) && (sizeof(T) <= 1 ? true : x <= T(255)); - else - return sizeof(T) <= 1 || UnsignedType(x) <= UnsignedType(255U); - } - - //! Get whether the given integer `x` can be casted to a 12-bit unsigned integer (ARM specific). - template - static ASMJIT_INLINE bool isUInt12(T x) noexcept { - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return x >= T(0) && (sizeof(T) <= 1 ? true : x <= T(4095)); - else - return sizeof(T) <= 1 || UnsignedType(x) <= UnsignedType(4095U); - } - - //! Get whether the given integer `x` can be casted to a 16-bit unsigned integer. - template - static ASMJIT_INLINE bool isUInt16(T x) noexcept { - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return x >= T(0) && (sizeof(T) <= 2 ? true : x <= T(65535)); - else - return sizeof(T) <= 2 || UnsignedType(x) <= UnsignedType(65535U); - } - - //! Get whether the given integer `x` can be casted to a 32-bit unsigned integer. - template - static ASMJIT_INLINE bool isUInt32(T x) noexcept { - typedef typename IntTraits::UnsignedType UnsignedType; - - if (IntTraits::kIsSigned) - return x >= T(0) && (sizeof(T) <= 4 ? true : x <= T(4294967295U)); - else - return sizeof(T) <= 4 || UnsignedType(x) <= UnsignedType(4294967295U); - } - - // -------------------------------------------------------------------------- - // [IsPowerOf2] - // -------------------------------------------------------------------------- - - //! Get whether the `n` value is a power of two (only one bit is set). - template - static ASMJIT_INLINE bool isPowerOf2(T n) noexcept { - return n != 0 && (n & (n - 1)) == 0; - } - - // -------------------------------------------------------------------------- - // [Mask] - // -------------------------------------------------------------------------- - - //! Generate a bit-mask that has `x` bit set. - static ASMJIT_INLINE uint32_t mask(uint32_t x) noexcept { - ASMJIT_ASSERT(x < 32); - return static_cast(1) << x; - } - - //! Generate a bit-mask that has `x0` and `x1` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1) noexcept { - return mask(x0) | mask(x1); - } - - //! Generate a bit-mask that has `x0`, `x1` and `x2` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2) noexcept { - return mask(x0, x1) | mask(x2); - } - - //! Generate a bit-mask that has `x0`, `x1`, `x2` and `x3` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept { - return mask(x0, x1) | mask(x2, x3); - } - - //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3` and `x4` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4) noexcept { - return mask(x0, x1) | mask(x2, x3) | mask(x4); - } - - //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4` and `x5` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5) noexcept { - return mask(x0, x1) | mask(x2, x3) | mask(x4, x5); - } - - //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5` and `x6` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6) noexcept { - return mask(x0, x1) | mask(x2, x3) | mask(x4, x5) | mask(x6); - } - - //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6` and `x7` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept { - return mask(x0, x1) | mask(x2, x3) | mask(x4, x5) | mask(x6, x7); - } - - //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6`, `x7` and `x8` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8) noexcept { - return mask(x0, x1) | mask(x2, x3) | mask(x4, x5) | mask(x6, x7) | mask(x8); - } - - //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6`, `x7`, `x8` and `x9` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8, uint32_t x9) noexcept { - return mask(x0, x1) | mask(x2, x3) | mask(x4, x5) | mask(x6, x7) | mask(x8, x9); - } - - // -------------------------------------------------------------------------- - // [Bits] - // -------------------------------------------------------------------------- - - //! Generate a bit-mask that has `x` least significant bits set. - static ASMJIT_INLINE uint32_t bits(uint32_t x) noexcept { - // Shifting more bits than the type has results in undefined behavior. In - // such case asmjit trashes the result by ORing with `overflow` mask, which - // discards the undefined value returned by the shift. - uint32_t overflow = static_cast( - -static_cast(x >= sizeof(uint32_t) * 8)); - - return ((static_cast(1) << x) - 1U) | overflow; - } - - // -------------------------------------------------------------------------- - // [HasBit] - // -------------------------------------------------------------------------- - - //! Get whether `x` has bit `n` set. - template - static ASMJIT_INLINE bool hasBit(T x, Index n) noexcept { - return (x & (static_cast(1) << n)) != 0; - } - - // -------------------------------------------------------------------------- - // [BitCount] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE uint32_t bitCountSlow(uint32_t x) noexcept { - // From: http://graphics.stanford.edu/~seander/bithacks.html - x = x - ((x >> 1) & 0x55555555U); - x = (x & 0x33333333U) + ((x >> 2) & 0x33333333U); - return (((x + (x >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24; - } - - //! Get count of bits in `x`. - static ASMJIT_INLINE uint32_t bitCount(uint32_t x) noexcept { -#if ASMJIT_CC_GCC || ASMJIT_CC_CLANG - return __builtin_popcount(x); -#else - return bitCountSlow(x); -#endif - } - - // -------------------------------------------------------------------------- - // [FirstBitOf] - // -------------------------------------------------------------------------- - - template - struct FirstBitOfTImpl { - enum { - _shift = (In & ASMJIT_UINT64_C(0x0000FFFFFFFFFFFF)) == 0 ? 48 : - (In & ASMJIT_UINT64_C(0x00000000FFFFFFFF)) == 0 ? 32 : - (In & ASMJIT_UINT64_C(0x000000000000FFFF)) == 0 ? 16 : 0, - - kValue = ((In >> _shift) & 0x0001) ? (_shift + 0) : - ((In >> _shift) & 0x0002) ? (_shift + 1) : - ((In >> _shift) & 0x0004) ? (_shift + 2) : - ((In >> _shift) & 0x0008) ? (_shift + 3) : - ((In >> _shift) & 0x0010) ? (_shift + 4) : - ((In >> _shift) & 0x0020) ? (_shift + 5) : - ((In >> _shift) & 0x0040) ? (_shift + 6) : - ((In >> _shift) & 0x0080) ? (_shift + 7) : - ((In >> _shift) & 0x0100) ? (_shift + 8) : - ((In >> _shift) & 0x0200) ? (_shift + 9) : - ((In >> _shift) & 0x0400) ? (_shift + 10) : - ((In >> _shift) & 0x0800) ? (_shift + 11) : - ((In >> _shift) & 0x1000) ? (_shift + 12) : - ((In >> _shift) & 0x2000) ? (_shift + 13) : - ((In >> _shift) & 0x4000) ? (_shift + 14) : - ((In >> _shift) & 0x8000) ? (_shift + 15) : 0 - }; - }; - - template<> - struct FirstBitOfTImpl<0> {}; - - template - static ASMJIT_INLINE uint32_t firstBitOfT() noexcept { return FirstBitOfTImpl::kValue; } - - // -------------------------------------------------------------------------- - // [FindFirstBit] - // -------------------------------------------------------------------------- - - //! \internal - static ASMJIT_INLINE uint32_t findFirstBitSlow(uint32_t mask) noexcept { - // This is a reference (slow) implementation of `findFirstBit()`, used when - // we don't have a C++ compiler support. The implementation speed has been - // improved to check for 2 bits per iteration. - uint32_t i = 1; - - while (mask != 0) { - uint32_t two = mask & 0x3; - if (two != 0x0) - return i - (two & 0x1); - - i += 2; - mask >>= 2; - } - - return 0xFFFFFFFFU; - } - - //! Find a first bit in `mask`. - static ASMJIT_INLINE uint32_t findFirstBit(uint32_t mask) noexcept { -#if ASMJIT_CC_MSC_GE(14, 0, 0) && (ASMJIT_ARCH_X86 || ASMJIT_ARCH_ARM32 || \ - ASMJIT_ARCH_X64 || ASMJIT_ARCH_ARM64) - DWORD i; - if (_BitScanForward(&i, mask)) - return static_cast(i); - else - return 0xFFFFFFFFU; -#elif ASMJIT_CC_GCC_GE(3, 4, 6) || ASMJIT_CC_CLANG - if (mask) - return __builtin_ctz(mask); - else - return 0xFFFFFFFFU; -#else - return findFirstBitSlow(mask); -#endif - } - - // -------------------------------------------------------------------------- - // [Misc] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE uint32_t keepNOnesFromRight(uint32_t mask, uint32_t nBits) noexcept { - uint32_t m = 0x1; - - do { - nBits -= (mask & m) != 0; - m <<= 1; - if (nBits == 0) { - m -= 1; - mask &= m; - break; - } - } while (m); - - return mask; - } - - static ASMJIT_INLINE uint32_t indexNOnesFromRight(uint8_t* dst, uint32_t mask, uint32_t nBits) noexcept { - uint32_t totalBits = nBits; - uint8_t i = 0; - uint32_t m = 0x1; - - do { - if (mask & m) { - *dst++ = i; - if (--nBits == 0) - break; - } - - m <<= 1; - i++; - } while (m); - - return totalBits - nBits; - } - - // -------------------------------------------------------------------------- - // [Alignment] - // -------------------------------------------------------------------------- - - template - static ASMJIT_INLINE bool isAligned(X base, Y alignment) noexcept { - typedef typename IntTraitsPrivate::UnsignedType U; - return ((U)base % (U)alignment) == 0; - } - - template - static ASMJIT_INLINE X alignTo(X x, Y alignment) noexcept { - typedef typename IntTraitsPrivate::UnsignedType U; - return (X)( ((U)x + (U)(alignment - 1)) & ~(static_cast(alignment) - 1) ); - } - - //! Get delta required to align `base` to `alignment`. - template - static ASMJIT_INLINE X alignDiff(X base, Y alignment) noexcept { - return alignTo(base, alignment) - base; - } - - template - static ASMJIT_INLINE T alignToPowerOf2(T base) noexcept { - // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr. - base -= 1; - -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable: 4293) -#endif // _MSC_VER - - base = base | (base >> 1); - base = base | (base >> 2); - base = base | (base >> 4); - - // 8/16/32 constants are multiplied by the condition to prevent a compiler - // complaining about the 'shift count >= type width' (GCC). - if (sizeof(T) >= 2) base = base | (base >> ( 8 * (sizeof(T) >= 2))); // Base >> 8. - if (sizeof(T) >= 4) base = base | (base >> (16 * (sizeof(T) >= 4))); // Base >> 16. - if (sizeof(T) >= 8) base = base | (base >> (32 * (sizeof(T) >= 8))); // Base >> 32. - -#if defined(_MSC_VER) -# pragma warning(pop) -#endif // _MSC_VER - - return base + 1; - } - - // -------------------------------------------------------------------------- - // [String] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE size_t strLen(const char* s, size_t maxlen) noexcept { - size_t i; - for (i = 0; i < maxlen; i++) - if (!s[i]) - break; - return i; - } - - static ASMJIT_INLINE const char* findPackedString(const char* p, uint32_t id) noexcept { - uint32_t i = 0; - while (i < id) { - while (p[0]) - p++; - p++; - i++; - } - return p; - } - - //! \internal - //! - //! Compare two instruction names. - //! - //! `a` is a null terminated instruction name from `???InstDB::nameData[]` table. - //! `b` is a non-null terminated instruction name passed to `???Inst::getIdByName()`. - static ASMJIT_INLINE int cmpInstName(const char* a, const char* b, size_t len) noexcept { - for (size_t i = 0; i < len; i++) { - int c = static_cast(static_cast(a[i])) - - static_cast(static_cast(b[i])) ; - if (c != 0) return c; - } - - return static_cast(a[len]); - } - - // -------------------------------------------------------------------------- - // [BSwap] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE uint32_t byteswap32(uint32_t x) noexcept { -#if ASMJIT_CC_MSC - return static_cast(_byteswap_ulong(x)); -#elif ASMJIT_CC_GCC_GE(4, 3, 0) || ASMJIT_CC_CLANG_GE(2, 6, 0) - return __builtin_bswap32(x); -#else - uint32_t y = x & 0x00FFFF00U; - x = (x << 24) + (x >> 24); - y = (y << 8) + (y >> 8); - return x + (y & 0x00FFFF00U); -#endif - } - - // -------------------------------------------------------------------------- - // [ReadMem] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE uint32_t readU8(const void* p) noexcept { - return static_cast(static_cast(p)[0]); - } - - static ASMJIT_INLINE int32_t readI8(const void* p) noexcept { - return static_cast(static_cast(p)[0]); - } - - template - static ASMJIT_INLINE uint32_t readU16xLE(const void* p) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { - return static_cast(static_cast(p)[0]); - } - else { - uint32_t x = static_cast(static_cast(p)[0]); - uint32_t y = static_cast(static_cast(p)[1]); - return x + (y << 8); - } - } - - template - static ASMJIT_INLINE uint32_t readU16xBE(const void* p) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { - return static_cast(static_cast(p)[0]); - } - else { - uint32_t x = static_cast(static_cast(p)[0]); - uint32_t y = static_cast(static_cast(p)[1]); - return (x << 8) + y; - } - } - - template - static ASMJIT_INLINE uint32_t readU16x(const void* p) noexcept { - return ASMJIT_ARCH_LE ? readU16xLE(p) : readU16xBE(p); - } - - template - static ASMJIT_INLINE int32_t readI16xLE(const void* p) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { - return static_cast(static_cast(p)[0]); - } - else { - int32_t x = static_cast(static_cast(p)[0]); - int32_t y = static_cast(static_cast(p)[1]); - return x + (y << 8); - } - } - - template - static ASMJIT_INLINE int32_t readI16xBE(const void* p) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { - return static_cast(static_cast(p)[0]); - } - else { - int32_t x = static_cast(static_cast(p)[0]); - int32_t y = static_cast(static_cast(p)[1]); - return (x << 8) + y; - } - } - - template - static ASMJIT_INLINE int32_t readI16x(const void* p) noexcept { - return ASMJIT_ARCH_LE ? readI16xLE(p) : readI16xBE(p); - } - - static ASMJIT_INLINE uint32_t readU16aLE(const void* p) noexcept { return readU16xLE<2>(p); } - static ASMJIT_INLINE uint32_t readU16uLE(const void* p) noexcept { return readU16xLE<0>(p); } - - static ASMJIT_INLINE uint32_t readU16aBE(const void* p) noexcept { return readU16xBE<2>(p); } - static ASMJIT_INLINE uint32_t readU16uBE(const void* p) noexcept { return readU16xBE<0>(p); } - - static ASMJIT_INLINE uint32_t readU16a(const void* p) noexcept { return readU16x<2>(p); } - static ASMJIT_INLINE uint32_t readU16u(const void* p) noexcept { return readU16x<0>(p); } - - static ASMJIT_INLINE int32_t readI16aLE(const void* p) noexcept { return readI16xLE<2>(p); } - static ASMJIT_INLINE int32_t readI16uLE(const void* p) noexcept { return readI16xLE<0>(p); } - - static ASMJIT_INLINE int32_t readI16aBE(const void* p) noexcept { return readI16xBE<2>(p); } - static ASMJIT_INLINE int32_t readI16uBE(const void* p) noexcept { return readI16xBE<0>(p); } - - static ASMJIT_INLINE int32_t readI16a(const void* p) noexcept { return readI16x<2>(p); } - static ASMJIT_INLINE int32_t readI16u(const void* p) noexcept { return readI16x<0>(p); } - - template - static ASMJIT_INLINE uint32_t readU32xLE(const void* p) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_UNALIGNED_32 || Alignment >= 4) { - uint32_t x = static_cast(p)[0]; - return ASMJIT_ARCH_LE ? x : byteswap32(x); - } - else { - uint32_t x = readU16xLE(static_cast(p) + 0); - uint32_t y = readU16xLE(static_cast(p) + 2); - return x + (y << 16); - } - } - - template - static ASMJIT_INLINE uint32_t readU32xBE(const void* p) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_UNALIGNED_32 || Alignment >= 4) { - uint32_t x = static_cast(p)[0]; - return ASMJIT_ARCH_BE ? x : byteswap32(x); - } - else { - uint32_t x = readU16xBE(static_cast(p) + 0); - uint32_t y = readU16xBE(static_cast(p) + 2); - return (x << 16) + y; - } - } - - template - static ASMJIT_INLINE uint32_t readU32x(const void* p) noexcept { - return ASMJIT_ARCH_LE ? readU32xLE(p) : readU32xBE(p); - } - - template - static ASMJIT_INLINE int32_t readI32xLE(const void* p) noexcept { - return static_cast(readU32xLE(p)); - } - - template - static ASMJIT_INLINE int32_t readI32xBE(const void* p) noexcept { - return static_cast(readU32xBE(p)); - } - - template - static ASMJIT_INLINE int32_t readI32x(const void* p) noexcept { - return ASMJIT_ARCH_LE ? readI32xLE(p) : readI32xBE(p); - } - - static ASMJIT_INLINE uint32_t readU32a(const void* p) noexcept { return readU32x<4>(p); } - static ASMJIT_INLINE uint32_t readU32u(const void* p) noexcept { return readU32x<0>(p); } - - static ASMJIT_INLINE uint32_t readU32aLE(const void* p) noexcept { return readU32xLE<4>(p); } - static ASMJIT_INLINE uint32_t readU32uLE(const void* p) noexcept { return readU32xLE<0>(p); } - - static ASMJIT_INLINE uint32_t readU32aBE(const void* p) noexcept { return readU32xBE<4>(p); } - static ASMJIT_INLINE uint32_t readU32uBE(const void* p) noexcept { return readU32xBE<0>(p); } - - static ASMJIT_INLINE int32_t readI32a(const void* p) noexcept { return readI32x<4>(p); } - static ASMJIT_INLINE int32_t readI32u(const void* p) noexcept { return readI32x<0>(p); } - - static ASMJIT_INLINE int32_t readI32aLE(const void* p) noexcept { return readI32xLE<4>(p); } - static ASMJIT_INLINE int32_t readI32uLE(const void* p) noexcept { return readI32xLE<0>(p); } - - static ASMJIT_INLINE int32_t readI32aBE(const void* p) noexcept { return readI32xBE<4>(p); } - static ASMJIT_INLINE int32_t readI32uBE(const void* p) noexcept { return readI32xBE<0>(p); } - - template - static ASMJIT_INLINE uint64_t readU64xLE(const void* p) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_64 || Alignment >= 8)) { - return static_cast(p)[0]; - } - else { - uint32_t x = readU32xLE(static_cast(p) + 0); - uint32_t y = readU32xLE(static_cast(p) + 4); - return static_cast(x) + (static_cast(y) << 32); - } - } - - template - static ASMJIT_INLINE uint64_t readU64xBE(const void* p) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_64 || Alignment >= 8)) { - return static_cast(p)[0]; - } - else { - uint32_t x = readU32xLE(static_cast(p) + 0); - uint32_t y = readU32xLE(static_cast(p) + 4); - return (static_cast(x) << 32) + static_cast(y); - } - } - - template - static ASMJIT_INLINE uint64_t readU64x(const void* p) noexcept { - return ASMJIT_ARCH_LE ? readU64xLE(p) : readU64xBE(p); - } - - template - static ASMJIT_INLINE int64_t readI64xLE(const void* p) noexcept { - return static_cast(readU64xLE(p)); - } - - template - static ASMJIT_INLINE int64_t readI64xBE(const void* p) noexcept { - return static_cast(readU64xBE(p)); - } - - template - static ASMJIT_INLINE int64_t readI64x(const void* p) noexcept { - return ASMJIT_ARCH_LE ? readI64xLE(p) : readI64xBE(p); - } - - static ASMJIT_INLINE uint64_t readU64a(const void* p) noexcept { return readU64x<8>(p); } - static ASMJIT_INLINE uint64_t readU64u(const void* p) noexcept { return readU64x<0>(p); } - - static ASMJIT_INLINE uint64_t readU64aLE(const void* p) noexcept { return readU64xLE<8>(p); } - static ASMJIT_INLINE uint64_t readU64uLE(const void* p) noexcept { return readU64xLE<0>(p); } - - static ASMJIT_INLINE uint64_t readU64aBE(const void* p) noexcept { return readU64xBE<8>(p); } - static ASMJIT_INLINE uint64_t readU64uBE(const void* p) noexcept { return readU64xBE<0>(p); } - - static ASMJIT_INLINE int64_t readI64a(const void* p) noexcept { return readI64x<8>(p); } - static ASMJIT_INLINE int64_t readI64u(const void* p) noexcept { return readI64x<0>(p); } - - static ASMJIT_INLINE int64_t readI64aLE(const void* p) noexcept { return readI64xLE<8>(p); } - static ASMJIT_INLINE int64_t readI64uLE(const void* p) noexcept { return readI64xLE<0>(p); } - - static ASMJIT_INLINE int64_t readI64aBE(const void* p) noexcept { return readI64xBE<8>(p); } - static ASMJIT_INLINE int64_t readI64uBE(const void* p) noexcept { return readI64xBE<0>(p); } - - // -------------------------------------------------------------------------- - // [WriteMem] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE void writeU8(void* p, uint32_t x) noexcept { - static_cast(p)[0] = static_cast(x & 0xFFU); - } - - static ASMJIT_INLINE void writeI8(void* p, int32_t x) noexcept { - static_cast(p)[0] = static_cast(x & 0xFF); - } - - template - static ASMJIT_INLINE void writeU16xLE(void* p, uint32_t x) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { - static_cast(p)[0] = static_cast(x & 0xFFFFU); - } - else { - static_cast(p)[0] = static_cast((x ) & 0xFFU); - static_cast(p)[1] = static_cast((x >> 8) & 0xFFU); - } - } - - template - static ASMJIT_INLINE void writeU16xBE(void* p, uint32_t x) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { - static_cast(p)[0] = static_cast(x & 0xFFFFU); - } - else { - static_cast(p)[0] = static_cast((x >> 8) & 0xFFU); - static_cast(p)[1] = static_cast((x ) & 0xFFU); - } - } - - template - static ASMJIT_INLINE void writeU16x(void* p, uint32_t x) noexcept { - if (ASMJIT_ARCH_LE) - writeU16xLE(p, x); - else - writeU16xBE(p, x); - } - - template - static ASMJIT_INLINE void writeI16xLE(void* p, int32_t x) noexcept { - writeU16xLE(p, static_cast(x)); - } - - template - static ASMJIT_INLINE void writeI16xBE(void* p, int32_t x) noexcept { - writeU16xBE(p, static_cast(x)); - } - - template - static ASMJIT_INLINE void writeI16x(void* p, int32_t x) noexcept { - writeU16x(p, static_cast(x)); - } - - static ASMJIT_INLINE void writeU16aLE(void* p, uint32_t x) noexcept { writeU16xLE<2>(p, x); } - static ASMJIT_INLINE void writeU16uLE(void* p, uint32_t x) noexcept { writeU16xLE<0>(p, x); } - - static ASMJIT_INLINE void writeU16aBE(void* p, uint32_t x) noexcept { writeU16xBE<2>(p, x); } - static ASMJIT_INLINE void writeU16uBE(void* p, uint32_t x) noexcept { writeU16xBE<0>(p, x); } - - static ASMJIT_INLINE void writeU16a(void* p, uint32_t x) noexcept { writeU16x<2>(p, x); } - static ASMJIT_INLINE void writeU16u(void* p, uint32_t x) noexcept { writeU16x<0>(p, x); } - - static ASMJIT_INLINE void writeI16aLE(void* p, int32_t x) noexcept { writeI16xLE<2>(p, x); } - static ASMJIT_INLINE void writeI16uLE(void* p, int32_t x) noexcept { writeI16xLE<0>(p, x); } - - static ASMJIT_INLINE void writeI16aBE(void* p, int32_t x) noexcept { writeI16xBE<2>(p, x); } - static ASMJIT_INLINE void writeI16uBE(void* p, int32_t x) noexcept { writeI16xBE<0>(p, x); } - - static ASMJIT_INLINE void writeI16a(void* p, int32_t x) noexcept { writeI16x<2>(p, x); } - static ASMJIT_INLINE void writeI16u(void* p, int32_t x) noexcept { writeI16x<0>(p, x); } - - template - static ASMJIT_INLINE void writeU32xLE(void* p, uint32_t x) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_UNALIGNED_32 || Alignment >= 4) { - static_cast(p)[0] = ASMJIT_ARCH_LE ? x : byteswap32(x); - } - else { - writeU16xLE(static_cast(p) + 0, x >> 16); - writeU16xLE(static_cast(p) + 2, x); - } - } - - template - static ASMJIT_INLINE void writeU32xBE(void* p, uint32_t x) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_UNALIGNED_32 || Alignment >= 4) { - static_cast(p)[0] = ASMJIT_ARCH_BE ? x : byteswap32(x); - } - else { - writeU16xBE(static_cast(p) + 0, x); - writeU16xBE(static_cast(p) + 2, x >> 16); - } - } - - template - static ASMJIT_INLINE void writeU32x(void* p, uint32_t x) noexcept { - if (ASMJIT_ARCH_LE) - writeU32xLE(p, x); - else - writeU32xBE(p, x); - } - - template - static ASMJIT_INLINE void writeI32xLE(void* p, int32_t x) noexcept { - writeU32xLE(p, static_cast(x)); - } - - template - static ASMJIT_INLINE void writeI32xBE(void* p, int32_t x) noexcept { - writeU32xBE(p, static_cast(x)); - } - - template - static ASMJIT_INLINE void writeI32x(void* p, int32_t x) noexcept { - writeU32x(p, static_cast(x)); - } - - static ASMJIT_INLINE void writeU32aLE(void* p, uint32_t x) noexcept { writeU32xLE<4>(p, x); } - static ASMJIT_INLINE void writeU32uLE(void* p, uint32_t x) noexcept { writeU32xLE<0>(p, x); } - - static ASMJIT_INLINE void writeU32aBE(void* p, uint32_t x) noexcept { writeU32xBE<4>(p, x); } - static ASMJIT_INLINE void writeU32uBE(void* p, uint32_t x) noexcept { writeU32xBE<0>(p, x); } - - static ASMJIT_INLINE void writeU32a(void* p, uint32_t x) noexcept { writeU32x<4>(p, x); } - static ASMJIT_INLINE void writeU32u(void* p, uint32_t x) noexcept { writeU32x<0>(p, x); } - - static ASMJIT_INLINE void writeI32aLE(void* p, int32_t x) noexcept { writeI32xLE<4>(p, x); } - static ASMJIT_INLINE void writeI32uLE(void* p, int32_t x) noexcept { writeI32xLE<0>(p, x); } - - static ASMJIT_INLINE void writeI32aBE(void* p, int32_t x) noexcept { writeI32xBE<4>(p, x); } - static ASMJIT_INLINE void writeI32uBE(void* p, int32_t x) noexcept { writeI32xBE<0>(p, x); } - - static ASMJIT_INLINE void writeI32a(void* p, int32_t x) noexcept { writeI32x<4>(p, x); } - static ASMJIT_INLINE void writeI32u(void* p, int32_t x) noexcept { writeI32x<0>(p, x); } - - template - static ASMJIT_INLINE void writeU64xLE(void* p, uint64_t x) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_64 || Alignment >= 8)) { - static_cast(p)[0] = x; - } - else { - writeU32xLE(static_cast(p) + 0, static_cast(x >> 32)); - writeU32xLE(static_cast(p) + 4, static_cast(x & 0xFFFFFFFFU)); - } - } - - template - static ASMJIT_INLINE void writeU64xBE(void* p, uint64_t x) noexcept { - ASMJIT_ASSUME_ALIGNED(p, Alignment > 1 ? Alignment : 1U); - if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_64 || Alignment >= 8)) { - static_cast(p)[0] = x; - } - else { - writeU32xBE(static_cast(p) + 0, static_cast(x & 0xFFFFFFFFU)); - writeU32xBE(static_cast(p) + 4, static_cast(x >> 32)); - } - } - - template - static ASMJIT_INLINE void writeU64x(void* p, uint64_t x) noexcept { - if (ASMJIT_ARCH_LE) - writeU64xLE(p, x); - else - writeU64xBE(p, x); - } - - template - static ASMJIT_INLINE void writeI64xLE(void* p, int64_t x) noexcept { - writeU64xLE(p, static_cast(x)); - } - - template - static ASMJIT_INLINE void writeI64xBE(void* p, int64_t x) noexcept { - writeU64xBE(p, static_cast(x)); - } - - template - static ASMJIT_INLINE void writeI64x(void* p, int64_t x) noexcept { - writeU64x(p, static_cast(x)); - } - - static ASMJIT_INLINE void writeU64aLE(void* p, uint64_t x) noexcept { writeU64xLE<8>(p, x); } - static ASMJIT_INLINE void writeU64uLE(void* p, uint64_t x) noexcept { writeU64xLE<0>(p, x); } - - static ASMJIT_INLINE void writeU64aBE(void* p, uint64_t x) noexcept { writeU64xBE<8>(p, x); } - static ASMJIT_INLINE void writeU64uBE(void* p, uint64_t x) noexcept { writeU64xBE<0>(p, x); } - - static ASMJIT_INLINE void writeU64a(void* p, uint64_t x) noexcept { writeU64x<8>(p, x); } - static ASMJIT_INLINE void writeU64u(void* p, uint64_t x) noexcept { writeU64x<0>(p, x); } - - static ASMJIT_INLINE void writeI64aLE(void* p, int64_t x) noexcept { writeI64xLE<8>(p, x); } - static ASMJIT_INLINE void writeI64uLE(void* p, int64_t x) noexcept { writeI64xLE<0>(p, x); } - - static ASMJIT_INLINE void writeI64aBE(void* p, int64_t x) noexcept { writeI64xBE<8>(p, x); } - static ASMJIT_INLINE void writeI64uBE(void* p, int64_t x) noexcept { writeI64xBE<0>(p, x); } - - static ASMJIT_INLINE void writeI64a(void* p, int64_t x) noexcept { writeI64x<8>(p, x); } - static ASMJIT_INLINE void writeI64u(void* p, int64_t x) noexcept { writeI64x<0>(p, x); } -} // Utils namespace - -// ============================================================================ -// [asmjit::UInt64] -// ============================================================================ - -union UInt64 { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE UInt64 fromUInt64(uint64_t val) noexcept { - UInt64 data; - data.setUInt64(val); - return data; - } - - ASMJIT_INLINE UInt64 fromUInt64(const UInt64& val) noexcept { - UInt64 data; - data.setUInt64(val); - return data; - } - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void reset() noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 = 0; - } - else { - u32[0] = 0; - u32[1] = 0; - } - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE uint64_t getUInt64() const noexcept { - return u64; - } - - ASMJIT_INLINE UInt64& setUInt64(uint64_t val) noexcept { - u64 = val; - return *this; - } - - ASMJIT_INLINE UInt64& setUInt64(const UInt64& val) noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 = val.u64; - } - else { - u32[0] = val.u32[0]; - u32[1] = val.u32[1]; - } - return *this; - } - - ASMJIT_INLINE UInt64& setPacked_2x32(uint32_t u0, uint32_t u1) noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 = Utils::pack64_2x32(u0, u1); - } - else { - u32[0] = u0; - u32[1] = u1; - } - return *this; - } - - // -------------------------------------------------------------------------- - // [Add] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE UInt64& add(uint64_t val) noexcept { - u64 += val; - return *this; - } - - ASMJIT_INLINE UInt64& add(const UInt64& val) noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 += val.u64; - } - else { - u32[0] += val.u32[0]; - u32[1] += val.u32[1]; - } - return *this; - } - - // -------------------------------------------------------------------------- - // [Sub] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE UInt64& sub(uint64_t val) noexcept { - u64 -= val; - return *this; - } - - ASMJIT_INLINE UInt64& sub(const UInt64& val) noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 -= val.u64; - } - else { - u32[0] -= val.u32[0]; - u32[1] -= val.u32[1]; - } - return *this; - } - - // -------------------------------------------------------------------------- - // [And] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE UInt64& and_(uint64_t val) noexcept { - u64 &= val; - return *this; - } - - ASMJIT_INLINE UInt64& and_(const UInt64& val) noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 &= val.u64; - } - else { - u32[0] &= val.u32[0]; - u32[1] &= val.u32[1]; - } - return *this; - } - - // -------------------------------------------------------------------------- - // [AndNot] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE UInt64& andNot(uint64_t val) noexcept { - u64 &= ~val; - return *this; - } - - ASMJIT_INLINE UInt64& andNot(const UInt64& val) noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 &= ~val.u64; - } - else { - u32[0] &= ~val.u32[0]; - u32[1] &= ~val.u32[1]; - } - return *this; - } - - // -------------------------------------------------------------------------- - // [Or] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE UInt64& or_(uint64_t val) noexcept { - u64 |= val; - return *this; - } - - ASMJIT_INLINE UInt64& or_(const UInt64& val) noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 |= val.u64; - } - else { - u32[0] |= val.u32[0]; - u32[1] |= val.u32[1]; - } - return *this; - } - - // -------------------------------------------------------------------------- - // [Xor] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE UInt64& xor_(uint64_t val) noexcept { - u64 ^= val; - return *this; - } - - ASMJIT_INLINE UInt64& xor_(const UInt64& val) noexcept { - if (ASMJIT_ARCH_64BIT) { - u64 ^= val.u64; - } - else { - u32[0] ^= val.u32[0]; - u32[1] ^= val.u32[1]; - } - return *this; - } - - // -------------------------------------------------------------------------- - // [Eq] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool isZero() const noexcept { - if (ASMJIT_ARCH_64BIT) - return u64 == 0; - else - return (u32[0] | u32[1]) == 0; - } - - ASMJIT_INLINE bool isNonZero() const noexcept { - if (ASMJIT_ARCH_64BIT) - return u64 != 0; - else - return (u32[0] | u32[1]) != 0; - } - - ASMJIT_INLINE bool eq(uint64_t val) const noexcept { - return u64 == val; - } - - ASMJIT_INLINE bool eq(const UInt64& val) const noexcept { - if (ASMJIT_ARCH_64BIT) - return u64 == val.u64; - else - return u32[0] == val.u32[0] && u32[1] == val.u32[1]; - } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE UInt64& operator+=(uint64_t val) noexcept { return add(val); } - ASMJIT_INLINE UInt64& operator+=(const UInt64& val) noexcept { return add(val); } - - ASMJIT_INLINE UInt64& operator-=(uint64_t val) noexcept { return sub(val); } - ASMJIT_INLINE UInt64& operator-=(const UInt64& val) noexcept { return sub(val); } - - ASMJIT_INLINE UInt64& operator&=(uint64_t val) noexcept { return and_(val); } - ASMJIT_INLINE UInt64& operator&=(const UInt64& val) noexcept { return and_(val); } - - ASMJIT_INLINE UInt64& operator|=(uint64_t val) noexcept { return or_(val); } - ASMJIT_INLINE UInt64& operator|=(const UInt64& val) noexcept { return or_(val); } - - ASMJIT_INLINE UInt64& operator^=(uint64_t val) noexcept { return xor_(val); } - ASMJIT_INLINE UInt64& operator^=(const UInt64& val) noexcept { return xor_(val); } - - ASMJIT_INLINE bool operator==(uint64_t val) const noexcept { return eq(val); } - ASMJIT_INLINE bool operator==(const UInt64& val) const noexcept { return eq(val); } - - ASMJIT_INLINE bool operator!=(uint64_t val) const noexcept { return !eq(val); } - ASMJIT_INLINE bool operator!=(const UInt64& val) const noexcept { return !eq(val); } - - ASMJIT_INLINE bool operator<(uint64_t val) const noexcept { return u64 < val; } - ASMJIT_INLINE bool operator<(const UInt64& val) const noexcept { return u64 < val.u64; } - - ASMJIT_INLINE bool operator<=(uint64_t val) const noexcept { return u64 <= val; } - ASMJIT_INLINE bool operator<=(const UInt64& val) const noexcept { return u64 <= val.u64; } - - ASMJIT_INLINE bool operator>(uint64_t val) const noexcept { return u64 > val; } - ASMJIT_INLINE bool operator>(const UInt64& val) const noexcept { return u64 > val.u64; } - - ASMJIT_INLINE bool operator>=(uint64_t val) const noexcept { return u64 >= val; } - ASMJIT_INLINE bool operator>=(const UInt64& val) const noexcept { return u64 >= val.u64; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - int8_t i8[8]; //!< 8-bit signed integer (8x). - uint8_t u8[8]; //!< 8-bit unsigned integer (8x). - - int16_t i16[4]; //!< 16-bit signed integer (4x). - uint16_t u16[4]; //!< 16-bit unsigned integer (4x). - - int32_t i32[2]; //!< 32-bit signed integer (2x). - uint32_t u32[2]; //!< 32-bit unsigned integer (2x). - - int64_t i64; //!< 64-bit signed integer. - uint64_t u64; //!< 64-bit unsigned integer. - - float f32[2]; //!< 32-bit floating point (2x). - double f64; //!< 64-bit floating point. - -#if ASMJIT_ARCH_LE - struct { float f32Lo, f32Hi; }; - struct { int32_t i32Lo, i32Hi; }; - struct { uint32_t u32Lo, u32Hi; }; -#else - struct { float f32Hi, f32Lo; }; - struct { int32_t i32Hi, i32Lo; }; - struct { uint32_t u32Hi, u32Lo; }; -#endif // ASMJIT_ARCH_LE -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_UTILS_H diff --git a/src/asmjit/base/vmem.cpp b/src/asmjit/base/vmem.cpp deleted file mode 100644 index 553032a..0000000 --- a/src/asmjit/base/vmem.cpp +++ /dev/null @@ -1,1077 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/osutils.h" -#include "../base/utils.h" -#include "../base/vmem.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -// This file contains implementation of virtual memory management for AsmJit -// library. There are several goals I decided to write implementation myself: -// -// - Granularity of allocated blocks is different than granularity for a typical -// C malloc. It is at least 64-bytes so CodeEmitter can guarantee the alignment -// up to 64 bytes, which is the size of a cache-line and it's also required by -// AVX-512 aligned loads and stores. Alignment requirements can grow in the future, -// but at the moment 64 bytes is safe (we may jump to 128 bytes if necessary or -// make it configurable). -// -// - Keep memory manager information outside of the allocated virtual memory -// pages, because these pages allow machine code execution and there should -// be not data required to keep track of these blocks. Another reason is that -// some environments (i.e. iOS) allow to generate and run JIT code, but this -// code has to be set to [Executable, but not Writable]. -// -// - Keep implementation simple and easy to follow. -// -// Implementation is based on bit arrays and binary trees. Bit arrays contain -// information related to allocated and unused blocks of memory. The size of -// a block is described by `MemNode::density`. Count of blocks is stored in -// `MemNode::blocks`. For example if density is 64 and count of blocks is 20, -// memory node contains 64*20 bytes of memory and the smallest possible allocation -// (and also alignment) is 64 bytes. So density is also related to memory -// alignment. Binary trees (RB) are used to enable fast lookup into all addresses -// allocated by memory manager instance. This is used mainly by `VMemPrivate::release()`. -// -// Bit array looks like this (empty = unused, X = used) - Size of block 64: -// -// ------------------------------------------------------------------------- -// | |X|X| | | | | |X|X|X|X|X|X| | | | | | | | | | | | |X| | | | |X|X|X| | | -// ------------------------------------------------------------------------- -// (Maximum continuous block) -// -// These bits show that there are 12 allocated blocks (X) of 64 bytes, so total -// size allocated is 768 bytes. Maximum count of continuous memory is 12 * 64. - -namespace asmjit { - -// ============================================================================ -// [asmjit::VMemMgr - BitOps] -// ============================================================================ - -#define M_DIV(x, y) ((x) / (y)) -#define M_MOD(x, y) ((x) % (y)) - -//! \internal -enum { kBitsPerEntity = (sizeof(size_t) * 8) }; - -//! \internal -//! -//! Set `len` bits in `buf` starting at `index` bit index. -static void _SetBits(size_t* buf, size_t index, size_t len) noexcept { - if (len == 0) - return; - - size_t i = index / kBitsPerEntity; // size_t[] - size_t j = index % kBitsPerEntity; // size_t[][] bit index - - // How many bytes process in the first group. - size_t c = kBitsPerEntity - j; - if (c > len) - c = len; - - // Offset. - buf += i; - - *buf++ |= ((~(size_t)0) >> (kBitsPerEntity - c)) << j; - len -= c; - - while (len >= kBitsPerEntity) { - *buf++ = ~(size_t)0; - len -= kBitsPerEntity; - } - - if (len) - *buf |= ((~(size_t)0) >> (kBitsPerEntity - len)); -} - -// ============================================================================ -// [asmjit::VMemMgr::TypeDefs] -// ============================================================================ - -typedef VMemMgr::RbNode RbNode; -typedef VMemMgr::MemNode MemNode; -typedef VMemMgr::PermanentNode PermanentNode; - -// ============================================================================ -// [asmjit::VMemMgr::RbNode] -// ============================================================================ - -//! \internal -//! -//! Base red-black tree node. -struct VMemMgr::RbNode { - // Implementation is based on article by Julienne Walker (Public Domain), - // including C code and original comments. Thanks for the excellent article. - - RbNode* node[2]; //!< Left[0] and right[1] nodes. - uint8_t* mem; //!< Virtual memory address. - uint32_t red; //!< Node color (red vs. black). -}; - -//! \internal -//! -//! Get if the node is red (nullptr or node with red flag). -static ASMJIT_INLINE bool rbIsRed(RbNode* node) noexcept { - return node && node->red; -} - -//! \internal -//! -//! Check whether the RB tree is valid. -static int rbAssert(RbNode* root) noexcept { - if (!root) return 1; - - RbNode* ln = root->node[0]; - RbNode* rn = root->node[1]; - - // Red violation. - ASMJIT_ASSERT(!(rbIsRed(root) && (rbIsRed(ln) || rbIsRed(rn)))); - - int lh = rbAssert(ln); - int rh = rbAssert(rn); - - // Invalid btree. - ASMJIT_ASSERT(ln == nullptr || ln->mem < root->mem); - ASMJIT_ASSERT(rn == nullptr || rn->mem > root->mem); - - // Black violation. - ASMJIT_ASSERT(!(lh != 0 && rh != 0 && lh != rh)); - - // Only count black links. - if (lh != 0 && rh != 0) - return rbIsRed(root) ? lh : lh + 1; - else - return 0; -} - -//! \internal -//! -//! Single rotation. -static ASMJIT_INLINE RbNode* rbRotateSingle(RbNode* root, int dir) noexcept { - RbNode* save = root->node[!dir]; - - root->node[!dir] = save->node[dir]; - save->node[dir] = root; - - root->red = 1; - save->red = 0; - - return save; -} - -//! \internal -//! -//! Double rotation. -static ASMJIT_INLINE RbNode* rbRotateDouble(RbNode* root, int dir) noexcept { - root->node[!dir] = rbRotateSingle(root->node[!dir], !dir); - return rbRotateSingle(root, dir); -} - -// ============================================================================ -// [asmjit::VMemMgr::MemNode] -// ============================================================================ - -struct VMemMgr::MemNode : public RbNode { - ASMJIT_INLINE void init(MemNode* other) noexcept { - mem = other->mem; - - size = other->size; - used = other->used; - blocks = other->blocks; - density = other->density; - largestBlock = other->largestBlock; - - baUsed = other->baUsed; - baCont = other->baCont; - } - - // Get available space. - ASMJIT_INLINE size_t getAvailable() const noexcept { return size - used; } - - MemNode* prev; // Prev node in list. - MemNode* next; // Next node in list. - - size_t size; // How many bytes contain this node. - size_t used; // How many bytes are used in this node. - size_t blocks; // How many blocks are here. - size_t density; // Minimum count of allocated bytes in this node (also alignment). - size_t largestBlock; // Contains largest block that can be allocated. - - size_t* baUsed; // Contains bits about used blocks (0 = unused, 1 = used). - size_t* baCont; // Contains bits about continuous blocks (0 = stop , 1 = continue). -}; - -// ============================================================================ -// [asmjit::VMemMgr::PermanentNode] -// ============================================================================ - -//! \internal -//! -//! Permanent node. -struct VMemMgr::PermanentNode { - //! Get available space. - ASMJIT_INLINE size_t getAvailable() const noexcept { return size - used; } - - PermanentNode* prev; // Pointer to prev chunk or nullptr. - uint8_t* mem; // Base pointer (virtual memory address). - size_t size; // Count of bytes allocated. - size_t used; // Count of bytes used. -}; - -// ============================================================================ -// [asmjit::VMemMgr - Private] -// ============================================================================ - -//! \internal -//! -//! Helper to avoid `#ifdef`s in the code. -ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSize) noexcept { - uint32_t flags = OSUtils::kVMWritable | OSUtils::kVMExecutable; -#if !ASMJIT_OS_WINDOWS - return static_cast(OSUtils::allocVirtualMemory(size, vSize, flags)); -#else - return static_cast(OSUtils::allocProcessMemory(self->_hProcess, size, vSize, flags)); -#endif -} - -//! \internal -//! -//! Helper to avoid `#ifdef`s in the code. -ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) noexcept { -#if !ASMJIT_OS_WINDOWS - return OSUtils::releaseVirtualMemory(p, vSize); -#else - return OSUtils::releaseProcessMemory(self->_hProcess, p, vSize); -#endif -} - -//! \internal -//! -//! Check whether the Red-Black tree is valid. -static bool vMemMgrCheckTree(VMemMgr* self) noexcept { - return rbAssert(self->_root) > 0; -} - -//! \internal -//! -//! Alloc virtual memory including a heap memory needed for `MemNode` data. -//! -//! Returns set-up `MemNode*` or nullptr if allocation failed. -static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) noexcept { - size_t vSize; - uint8_t* vmem = vMemMgrAllocVMem(self, size, &vSize); - if (!vmem) return nullptr; - - size_t blocks = (vSize / density); - size_t bsize = (((blocks + 7) >> 3) + sizeof(size_t) - 1) & ~(size_t)(sizeof(size_t) - 1); - - MemNode* node = static_cast(Internal::allocMemory(sizeof(MemNode))); - uint8_t* data = static_cast(Internal::allocMemory(bsize * 2)); - - // Out of memory. - if (!node || !data) { - vMemMgrReleaseVMem(self, vmem, vSize); - if (node) Internal::releaseMemory(node); - if (data) Internal::releaseMemory(data); - return nullptr; - } - - // Initialize RbNode data. - node->node[0] = nullptr; - node->node[1] = nullptr; - node->mem = vmem; - node->red = 1; - - // Initialize MemNode data. - node->prev = nullptr; - node->next = nullptr; - - node->size = vSize; - node->used = 0; - node->blocks = blocks; - node->density = density; - node->largestBlock = vSize; - - ::memset(data, 0, bsize * 2); - node->baUsed = reinterpret_cast(data); - node->baCont = reinterpret_cast(data + bsize); - - return node; -} - -static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) noexcept { - if (!self->_root) { - // Empty tree case. - self->_root = node; - } - else { - // False tree root. - RbNode head = { { nullptr, nullptr }, 0, 0 }; - - // Grandparent & parent. - RbNode* g = nullptr; - RbNode* t = &head; - - // Iterator & parent. - RbNode* p = nullptr; - RbNode* q = t->node[1] = self->_root; - - int dir = 0; - int last = 0; // Not needed to initialize, but makes some tools happy. - - // Search down the tree. - for (;;) { - if (!q) { - // Insert new node at the bottom. - q = node; - p->node[dir] = node; - } - else if (rbIsRed(q->node[0]) && rbIsRed(q->node[1])) { - // Color flip. - q->red = 1; - q->node[0]->red = 0; - q->node[1]->red = 0; - } - - // Fix red violation. - if (rbIsRed(q) && rbIsRed(p)) { - int dir2 = t->node[1] == g; - t->node[dir2] = q == p->node[last] ? rbRotateSingle(g, !last) : rbRotateDouble(g, !last); - } - - // Stop if found. - if (q == node) - break; - - last = dir; - dir = q->mem < node->mem; - - // Update helpers. - if (g) t = g; - - g = p; - p = q; - q = q->node[dir]; - } - - // Update root. - self->_root = static_cast(head.node[1]); - } - - // Make root black. - self->_root->red = 0; - - // Link with others. - node->prev = self->_last; - - if (!self->_first) { - self->_first = node; - self->_last = node; - self->_optimal = node; - } - else { - node->prev = self->_last; - self->_last->next = node; - self->_last = node; - } -} - -//! \internal -//! -//! Remove node from Red-Black tree. -//! -//! Returns node that should be freed, but it doesn't have to be necessarily -//! the `node` passed. -static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) noexcept { - // False tree root. - RbNode head = { { nullptr, nullptr }, 0, 0 }; - - // Helpers. - RbNode* q = &head; - RbNode* p = nullptr; - RbNode* g = nullptr; - - // Found item. - RbNode* f = nullptr; - int dir = 1; - - // Set up. - q->node[1] = self->_root; - - // Search and push a red down. - while (q->node[dir]) { - int last = dir; - - // Update helpers. - g = p; - p = q; - q = q->node[dir]; - dir = q->mem < node->mem; - - // Save found node. - if (q == node) - f = q; - - // Push the red node down. - if (!rbIsRed(q) && !rbIsRed(q->node[dir])) { - if (rbIsRed(q->node[!dir])) { - p = p->node[last] = rbRotateSingle(q, dir); - } - else if (!rbIsRed(q->node[!dir])) { - RbNode* s = p->node[!last]; - - if (s) { - if (!rbIsRed(s->node[!last]) && !rbIsRed(s->node[last])) { - // Color flip. - p->red = 0; - s->red = 1; - q->red = 1; - } - else { - int dir2 = g->node[1] == p; - - if (rbIsRed(s->node[last])) - g->node[dir2] = rbRotateDouble(p, last); - else if (rbIsRed(s->node[!last])) - g->node[dir2] = rbRotateSingle(p, last); - - // Ensure correct coloring. - q->red = g->node[dir2]->red = 1; - g->node[dir2]->node[0]->red = 0; - g->node[dir2]->node[1]->red = 0; - } - } - } - } - } - - // Replace and remove. - ASMJIT_ASSERT(f != nullptr); - ASMJIT_ASSERT(f != &head); - ASMJIT_ASSERT(q != &head); - - if (f != q) { - ASMJIT_ASSERT(f != &head); - static_cast(f)->init(static_cast(q)); - } - - p->node[p->node[1] == q] = q->node[q->node[0] == nullptr]; - - // Update root and make it black. - self->_root = static_cast(head.node[1]); - if (self->_root) self->_root->red = 0; - - // Unlink. - MemNode* next = static_cast(q)->next; - MemNode* prev = static_cast(q)->prev; - - if (prev) - prev->next = next; - else - self->_first = next; - - if (next) - next->prev = prev; - else - self->_last = prev; - - if (self->_optimal == q) - self->_optimal = prev ? prev : next; - - return static_cast(q); -} - -static MemNode* vMemMgrFindNodeByPtr(VMemMgr* self, uint8_t* mem) noexcept { - MemNode* node = self->_root; - while (node) { - uint8_t* nodeMem = node->mem; - - // Go left. - if (mem < nodeMem) { - node = static_cast(node->node[0]); - continue; - } - - // Go right. - uint8_t* nodeEnd = nodeMem + node->size; - if (mem >= nodeEnd) { - node = static_cast(node->node[1]); - continue; - } - - // Match. - break; - } - return node; -} - -static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) noexcept { - static const size_t permanentAlignment = 32; - static const size_t permanentNodeSize = 32768; - - vSize = Utils::alignTo(vSize, permanentAlignment); - - AutoLock locked(self->_lock); - PermanentNode* node = self->_permanent; - - // Try to find space in allocated chunks. - while (node && vSize > node->getAvailable()) - node = node->prev; - - // Or allocate new node. - if (!node) { - size_t nodeSize = permanentNodeSize; - if (nodeSize < vSize) nodeSize = vSize; - - node = static_cast(Internal::allocMemory(sizeof(PermanentNode))); - if (!node) return nullptr; - - node->mem = vMemMgrAllocVMem(self, nodeSize, &node->size); - if (!node->mem) { - Internal::releaseMemory(node); - return nullptr; - } - - node->used = 0; - node->prev = self->_permanent; - self->_permanent = node; - } - - // Finally, copy function code to our space we reserved for. - uint8_t* result = node->mem + node->used; - - // Update Statistics. - node->used += vSize; - self->_usedBytes += vSize; - - // Code can be null to only reserve space for code. - return static_cast(result); -} - -static void* vMemMgrAllocFreeable(VMemMgr* self, size_t vSize) noexcept { - // Current index. - size_t i; - - // How many we need to be freed. - size_t need; - size_t minVSize; - - // Align to 32 bytes by default. - vSize = Utils::alignTo(vSize, 32); - if (vSize == 0) - return nullptr; - - AutoLock locked(self->_lock); - MemNode* node = self->_optimal; - minVSize = self->_blockSize; - - // Try to find memory block in existing nodes. - while (node) { - // Skip this node? - if ((node->getAvailable() < vSize) || (node->largestBlock < vSize && node->largestBlock != 0)) { - MemNode* next = node->next; - - if (node->getAvailable() < minVSize && node == self->_optimal && next) - self->_optimal = next; - - node = next; - continue; - } - - size_t* up = node->baUsed; // Current ubits address. - size_t ubits; // Current ubits[0] value. - size_t bit; // Current bit mask. - size_t blocks = node->blocks; // Count of blocks in node. - size_t cont = 0; // How many bits are currently freed in find loop. - size_t maxCont = 0; // Largest continuous block (bits count). - size_t j; - - need = M_DIV((vSize + node->density - 1), node->density); - i = 0; - - // Try to find node that is large enough. - while (i < blocks) { - ubits = *up++; - - // Fast skip used blocks. - if (ubits == ~(size_t)0) { - if (cont > maxCont) - maxCont = cont; - cont = 0; - - i += kBitsPerEntity; - continue; - } - - size_t max = kBitsPerEntity; - if (i + max > blocks) - max = blocks - i; - - for (j = 0, bit = 1; j < max; bit <<= 1) { - j++; - if ((ubits & bit) == 0) { - if (++cont == need) { - i += j; - i -= cont; - goto L_Found; - } - - continue; - } - - if (cont > maxCont) maxCont = cont; - cont = 0; - } - - i += kBitsPerEntity; - } - - // Because we traversed the entire node, we can set largest node size that - // will be used to cache next traversing. - node->largestBlock = maxCont * node->density; - - node = node->next; - } - - // If we are here, we failed to find existing memory block and we must - // allocate a new one. - { - size_t blockSize = self->_blockSize; - if (blockSize < vSize) blockSize = vSize; - - node = vMemMgrCreateNode(self, blockSize, self->_blockDensity); - if (!node) return nullptr; - - // Update binary tree. - vMemMgrInsertNode(self, node); - ASMJIT_ASSERT(vMemMgrCheckTree(self)); - - // Alloc first node at start. - i = 0; - need = (vSize + node->density - 1) / node->density; - - // Update statistics. - self->_allocatedBytes += node->size; - } - -L_Found: - // Update bits. - _SetBits(node->baUsed, i, need); - _SetBits(node->baCont, i, need - 1); - - // Update statistics. - { - size_t u = need * node->density; - node->used += u; - node->largestBlock = 0; - self->_usedBytes += u; - } - - // And return pointer to allocated memory. - uint8_t* result = node->mem + i * node->density; - ASMJIT_ASSERT(result >= node->mem && result <= node->mem + node->size - vSize); - return result; -} - -//! \internal -//! -//! Reset the whole `VMemMgr` instance, freeing all heap memory allocated an -//! virtual memory allocated unless `keepVirtualMemory` is true (and this is -//! only used when writing data to a remote process). -static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) noexcept { - MemNode* node = self->_first; - - while (node) { - MemNode* next = node->next; - - if (!keepVirtualMemory) - vMemMgrReleaseVMem(self, node->mem, node->size); - - Internal::releaseMemory(node->baUsed); - Internal::releaseMemory(node); - - node = next; - } - - self->_allocatedBytes = 0; - self->_usedBytes = 0; - - self->_root = nullptr; - self->_first = nullptr; - self->_last = nullptr; - self->_optimal = nullptr; -} - -// ============================================================================ -// [asmjit::VMemMgr - Construction / Destruction] -// ============================================================================ - -#if !ASMJIT_OS_WINDOWS -VMemMgr::VMemMgr() noexcept { -#else -VMemMgr::VMemMgr(HANDLE hProcess) noexcept { -#endif - - VMemInfo vm = OSUtils::getVirtualMemoryInfo(); - -#if ASMJIT_OS_WINDOWS - _hProcess = hProcess ? hProcess : vm.hCurrentProcess; -#endif // ASMJIT_OS_WINDOWS - - _blockSize = vm.pageGranularity; - _blockDensity = 64; - - _allocatedBytes = 0; - _usedBytes = 0; - - _root = nullptr; - _first = nullptr; - _last = nullptr; - _optimal = nullptr; - - _permanent = nullptr; - _keepVirtualMemory = false; -} - -VMemMgr::~VMemMgr() noexcept { - // Freeable memory cleanup - Also frees the virtual memory if configured to. - vMemMgrReset(this, _keepVirtualMemory); - - // Permanent memory cleanup - Never frees the virtual memory. - PermanentNode* node = _permanent; - while (node) { - PermanentNode* prev = node->prev; - Internal::releaseMemory(node); - node = prev; - } -} - -// ============================================================================ -// [asmjit::VMemMgr - Reset] -// ============================================================================ - -void VMemMgr::reset() noexcept { - vMemMgrReset(this, false); -} - -// ============================================================================ -// [asmjit::VMemMgr - Alloc / Release] -// ============================================================================ - -void* VMemMgr::alloc(size_t size, uint32_t type) noexcept { - if (type == kAllocPermanent) - return vMemMgrAllocPermanent(this, size); - else - return vMemMgrAllocFreeable(this, size); -} - -Error VMemMgr::release(void* p) noexcept { - if (!p) return kErrorOk; - - AutoLock locked(_lock); - MemNode* node = vMemMgrFindNodeByPtr(this, static_cast(p)); - if (!node) return DebugUtils::errored(kErrorInvalidArgument); - - size_t offset = (size_t)((uint8_t*)p - (uint8_t*)node->mem); - size_t bitpos = M_DIV(offset, node->density); - size_t i = (bitpos / kBitsPerEntity); - - size_t* up = node->baUsed + i; // Current ubits address. - size_t* cp = node->baCont + i; // Current cbits address. - size_t ubits = *up; // Current ubits[0] value. - size_t cbits = *cp; // Current cbits[0] value. - size_t bit = (size_t)1 << (bitpos % kBitsPerEntity); - - size_t cont = 0; - bool stop; - - for (;;) { - stop = (cbits & bit) == 0; - ubits &= ~bit; - cbits &= ~bit; - - bit <<= 1; - cont++; - - if (stop || bit == 0) { - *up = ubits; - *cp = cbits; - if (stop) - break; - - ubits = *++up; - cbits = *++cp; - bit = 1; - } - } - - // If the freed block is fully allocated node then it's needed to - // update 'optimal' pointer in memory manager. - if (node->used == node->size) { - MemNode* cur = _optimal; - - do { - cur = cur->prev; - if (cur == node) { - _optimal = node; - break; - } - } while (cur); - } - - // Statistics. - cont *= node->density; - if (node->largestBlock < cont) - node->largestBlock = cont; - - node->used -= cont; - _usedBytes -= cont; - - // If page is empty, we can free it. - if (node->used == 0) { - // Free memory associated with node (this memory is not accessed - // anymore so it's safe). - vMemMgrReleaseVMem(this, node->mem, node->size); - Internal::releaseMemory(node->baUsed); - - node->baUsed = nullptr; - node->baCont = nullptr; - - // Statistics. - _allocatedBytes -= node->size; - - // Remove node. This function can return different node than - // passed into, but data is copied into previous node if needed. - Internal::releaseMemory(vMemMgrRemoveNode(this, node)); - ASMJIT_ASSERT(vMemMgrCheckTree(this)); - } - - return kErrorOk; -} - -Error VMemMgr::shrink(void* p, size_t used) noexcept { - if (!p) return kErrorOk; - if (used == 0) - return release(p); - - AutoLock locked(_lock); - MemNode* node = vMemMgrFindNodeByPtr(this, (uint8_t*)p); - if (!node) return DebugUtils::errored(kErrorInvalidArgument); - - size_t offset = (size_t)((uint8_t*)p - (uint8_t*)node->mem); - size_t bitpos = M_DIV(offset, node->density); - size_t i = (bitpos / kBitsPerEntity); - - size_t* up = node->baUsed + i; // Current ubits address. - size_t* cp = node->baCont + i; // Current cbits address. - size_t ubits = *up; // Current ubits[0] value. - size_t cbits = *cp; // Current cbits[0] value. - size_t bit = (size_t)1 << (bitpos % kBitsPerEntity); - - size_t cont = 0; - size_t usedBlocks = (used + node->density - 1) / node->density; - - bool stop; - - // Find the first block we can mark as free. - for (;;) { - stop = (cbits & bit) == 0; - if (stop) - return kErrorOk; - - if (++cont == usedBlocks) - break; - - bit <<= 1; - if (bit == 0) { - ubits = *++up; - cbits = *++cp; - bit = 1; - } - } - - // Free the tail blocks. - cont = ~(size_t)0; - goto _EnterFreeLoop; - - for (;;) { - stop = (cbits & bit) == 0; - ubits &= ~bit; - -_EnterFreeLoop: - cbits &= ~bit; - - bit <<= 1; - cont++; - - if (stop || bit == 0) { - *up = ubits; - *cp = cbits; - if (stop) - break; - - ubits = *++up; - cbits = *++cp; - bit = 1; - } - } - - // Statistics. - cont *= node->density; - if (node->largestBlock < cont) - node->largestBlock = cont; - - node->used -= cont; - _usedBytes -= cont; - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::VMem - Test] -// ============================================================================ - -#if defined(ASMJIT_TEST) -static void VMemTest_fill(void* a, void* b, int i) noexcept { - int pattern = rand() % 256; - *(int *)a = i; - *(int *)b = i; - ::memset((char*)a + sizeof(int), pattern, i - sizeof(int)); - ::memset((char*)b + sizeof(int), pattern, i - sizeof(int)); -} - -static void VMemTest_verify(void* a, void* b) noexcept { - int ai = *(int*)a; - int bi = *(int*)b; - - EXPECT(ai == bi, - "The length of 'a' (%d) and 'b' (%d) should be same", ai, bi); - - EXPECT(::memcmp(a, b, ai) == 0, - "Pattern (%p) doesn't match", a); -} - -static void VMemTest_stats(VMemMgr& memmgr) noexcept { - INFO("Used : %u", static_cast(memmgr.getUsedBytes())); - INFO("Allocated: %u", static_cast(memmgr.getAllocatedBytes())); -} - -static void VMemTest_shuffle(void** a, void** b, size_t count) noexcept { - for (size_t i = 0; i < count; ++i) { - size_t si = (size_t)rand() % count; - - void* ta = a[i]; - void* tb = b[i]; - - a[i] = a[si]; - b[i] = b[si]; - - a[si] = ta; - b[si] = tb; - } -} - -UNIT(base_vmem) { - VMemMgr memmgr; - - // Should be predictible. - srand(100); - - int i; - int kCount = 200000; - - INFO("Memory alloc/free test - %d allocations", static_cast(kCount)); - - void** a = (void**)Internal::allocMemory(sizeof(void*) * kCount); - void** b = (void**)Internal::allocMemory(sizeof(void*) * kCount); - - EXPECT(a != nullptr && b != nullptr, - "Couldn't allocate %u bytes on heap", kCount * 2); - - INFO("Allocating virtual memory..."); - for (i = 0; i < kCount; i++) { - int r = (rand() % 1000) + 4; - - a[i] = memmgr.alloc(r); - EXPECT(a[i] != nullptr, - "Couldn't allocate %d bytes of virtual memory", r); - ::memset(a[i], 0, r); - } - VMemTest_stats(memmgr); - - INFO("Freeing virtual memory..."); - for (i = 0; i < kCount; i++) { - EXPECT(memmgr.release(a[i]) == kErrorOk, - "Failed to free %p", b[i]); - } - VMemTest_stats(memmgr); - - INFO("Verified alloc/free test - %d allocations", static_cast(kCount)); - for (i = 0; i < kCount; i++) { - int r = (rand() % 1000) + 4; - - a[i] = memmgr.alloc(r); - EXPECT(a[i] != nullptr, - "Couldn't allocate %d bytes of virtual memory", r); - - b[i] = Internal::allocMemory(r); - EXPECT(b[i] != nullptr, - "Couldn't allocate %d bytes on heap", r); - - VMemTest_fill(a[i], b[i], r); - } - VMemTest_stats(memmgr); - - INFO("Shuffling..."); - VMemTest_shuffle(a, b, kCount); - - INFO("Verify and free..."); - for (i = 0; i < kCount / 2; i++) { - VMemTest_verify(a[i], b[i]); - EXPECT(memmgr.release(a[i]) == kErrorOk, - "Failed to free %p", a[i]); - Internal::releaseMemory(b[i]); - } - VMemTest_stats(memmgr); - - INFO("Alloc again"); - for (i = 0; i < kCount / 2; i++) { - int r = (rand() % 1000) + 4; - - a[i] = memmgr.alloc(r); - EXPECT(a[i] != nullptr, - "Couldn't allocate %d bytes of virtual memory", r); - - b[i] = Internal::allocMemory(r); - EXPECT(b[i] != nullptr, - "Couldn't allocate %d bytes on heap"); - - VMemTest_fill(a[i], b[i], r); - } - VMemTest_stats(memmgr); - - INFO("Verify and free..."); - for (i = 0; i < kCount; i++) { - VMemTest_verify(a[i], b[i]); - EXPECT(memmgr.release(a[i]) == kErrorOk, - "Failed to free %p", a[i]); - Internal::releaseMemory(b[i]); - } - VMemTest_stats(memmgr); - - Internal::releaseMemory(a); - Internal::releaseMemory(b); -} -#endif // ASMJIT_TEST - -} // asmjit namespace diff --git a/src/asmjit/base/vmem.h b/src/asmjit/base/vmem.h deleted file mode 100644 index 6a1a513..0000000 --- a/src/asmjit/base/vmem.h +++ /dev/null @@ -1,154 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_VMEM_H -#define _ASMJIT_BASE_VMEM_H - -// [Dependencies] -#include "../base/globals.h" -#include "../base/osutils.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::VMemMgr] -// ============================================================================ - -//! Reference implementation of memory manager that uses `VMemUtil` to allocate -//! chunks of virtual memory and bit arrays to manage it. -class VMemMgr { -public: - //! Type of virtual memory allocation, see `VMemMgr::alloc()`. - ASMJIT_ENUM(AllocType) { - //! Normal memory allocation, has to be freed by `VMemMgr::release()`. - kAllocFreeable = 0, - //! Allocate permanent memory, can't be freed. - kAllocPermanent = 1 - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - -#if !ASMJIT_OS_WINDOWS - //! Create a `VMemMgr` instance. - ASMJIT_API VMemMgr() noexcept; -#else - //! Create a `VMemMgr` instance. - //! - //! NOTE: When running on Windows it's possible to specify a `hProcess` to - //! be used for memory allocation. Using `hProcess` allows to allocate memory - //! of a remote process. - ASMJIT_API VMemMgr(HANDLE hProcess = static_cast(0)) noexcept; -#endif // ASMJIT_OS_WINDOWS - - //! Destroy the `VMemMgr` instance and free all blocks. - ASMJIT_API ~VMemMgr() noexcept; - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - //! Free all allocated memory. - ASMJIT_API void reset() noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - -#if ASMJIT_OS_WINDOWS - //! Get the handle of the process memory manager is bound to. - ASMJIT_INLINE HANDLE getProcessHandle() const noexcept { return _hProcess; } -#endif // ASMJIT_OS_WINDOWS - - //! Get how many bytes are currently allocated. - ASMJIT_INLINE size_t getAllocatedBytes() const noexcept { return _allocatedBytes; } - //! Get how many bytes are currently used. - ASMJIT_INLINE size_t getUsedBytes() const noexcept { return _usedBytes; } - - //! Get whether to keep allocated memory after the `VMemMgr` is destroyed. - //! - //! \sa \ref setKeepVirtualMemory. - ASMJIT_INLINE bool getKeepVirtualMemory() const noexcept { return _keepVirtualMemory; } - //! Set whether to keep allocated memory after the memory manager is destroyed. - //! - //! This method is usable when patching code of remote process. You need to - //! allocate process memory, store generated assembler into it and patch the - //! method you want to redirect (into your code). This method affects only - //! VMemMgr destructor. After destruction all internal - //! structures are freed, only the process virtual memory remains. - //! - //! NOTE: Memory allocated with kAllocPermanent is always kept. - //! - //! \sa \ref getKeepVirtualMemory. - ASMJIT_INLINE void setKeepVirtualMemory(bool val) noexcept { _keepVirtualMemory = val; } - - // -------------------------------------------------------------------------- - // [Alloc / Release] - // -------------------------------------------------------------------------- - - //! Allocate a `size` bytes of virtual memory. - //! - //! Note that if you are implementing your own virtual memory manager then you - //! can quitly ignore type of allocation. This is mainly for AsmJit to memory - //! manager that allocated memory will be never freed. - ASMJIT_API void* alloc(size_t size, uint32_t type = kAllocFreeable) noexcept; - //! Free previously allocated memory at a given `address`. - ASMJIT_API Error release(void* p) noexcept; - //! Free extra memory allocated with `p`. - ASMJIT_API Error shrink(void* p, size_t used) noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - -#if ASMJIT_OS_WINDOWS - HANDLE _hProcess; //!< Process passed to `VirtualAllocEx` and `VirtualFree`. -#endif // ASMJIT_OS_WINDOWS - Lock _lock; //!< Lock to enable thread-safe functionality. - - size_t _blockSize; //!< Default block size. - size_t _blockDensity; //!< Default block density. - bool _keepVirtualMemory; //!< Keep virtual memory after destroyed. - - size_t _allocatedBytes; //!< How many bytes are currently allocated. - size_t _usedBytes; //!< How many bytes are currently used. - - //! \internal - //! \{ - - struct RbNode; - struct MemNode; - struct PermanentNode; - - // Memory nodes root. - MemNode* _root; - // Memory nodes list. - MemNode* _first; - MemNode* _last; - MemNode* _optimal; - // Permanent memory. - PermanentNode* _permanent; - - //! \} -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_VMEM_H diff --git a/src/asmjit/base/zone.cpp b/src/asmjit/base/zone.cpp deleted file mode 100644 index 644b26e..0000000 --- a/src/asmjit/base/zone.cpp +++ /dev/null @@ -1,833 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies] -#include "../base/utils.h" -#include "../base/zone.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! Zero size block used by `Zone` that doesn't have any memory allocated. -static const Zone::Block Zone_zeroBlock = { nullptr, nullptr, 0, { 0 } }; - -static ASMJIT_INLINE uint32_t Zone_getAlignmentOffsetFromAlignment(uint32_t x) noexcept { - switch (x) { - default: return 0; - case 0 : return 0; - case 1 : return 0; - case 2 : return 1; - case 4 : return 2; - case 8 : return 3; - case 16: return 4; - case 32: return 5; - case 64: return 6; - } -} - -// ============================================================================ -// [asmjit::Zone - Construction / Destruction] -// ============================================================================ - -Zone::Zone(uint32_t blockSize, uint32_t blockAlignment) noexcept - : _ptr(nullptr), - _end(nullptr), - _block(const_cast(&Zone_zeroBlock)), - _blockSize(blockSize), - _blockAlignmentShift(Zone_getAlignmentOffsetFromAlignment(blockAlignment)) {} - -Zone::~Zone() noexcept { - reset(true); -} - -// ============================================================================ -// [asmjit::Zone - Reset] -// ============================================================================ - -void Zone::reset(bool releaseMemory) noexcept { - Block* cur = _block; - - // Can't be altered. - if (cur == &Zone_zeroBlock) - return; - - if (releaseMemory) { - // Since cur can be in the middle of the double-linked list, we have to - // traverse to both directions `prev` and `next` separately. - Block* next = cur->next; - do { - Block* prev = cur->prev; - Internal::releaseMemory(cur); - cur = prev; - } while (cur); - - cur = next; - while (cur) { - next = cur->next; - Internal::releaseMemory(cur); - cur = next; - } - - _ptr = nullptr; - _end = nullptr; - _block = const_cast(&Zone_zeroBlock); - } - else { - while (cur->prev) - cur = cur->prev; - - _ptr = cur->data; - _end = _ptr + cur->size; - _block = cur; - } -} - -// ============================================================================ -// [asmjit::Zone - Alloc] -// ============================================================================ - -void* Zone::_alloc(size_t size) noexcept { - Block* curBlock = _block; - uint8_t* p; - - size_t blockSize = std::max(_blockSize, size); - size_t blockAlignment = getBlockAlignment(); - - // The `_alloc()` method can only be called if there is not enough space - // in the current block, see `alloc()` implementation for more details. - ASMJIT_ASSERT(curBlock == &Zone_zeroBlock || getRemainingSize() < size); - - // If the `Zone` has been cleared the current block doesn't have to be the - // last one. Check if there is a block that can be used instead of allocating - // a new one. If there is a `next` block it's completely unused, we don't have - // to check for remaining bytes. - Block* next = curBlock->next; - if (next && next->size >= size) { - p = Utils::alignTo(next->data, blockAlignment); - - _block = next; - _ptr = p + size; - _end = next->data + next->size; - - return static_cast(p); - } - - // Prevent arithmetic overflow. - if (ASMJIT_UNLIKELY(blockSize > (~static_cast(0) - sizeof(Block) - blockAlignment))) - return nullptr; - - blockSize += blockAlignment; - Block* newBlock = static_cast(Internal::allocMemory(sizeof(Block) + blockSize)); - - if (ASMJIT_UNLIKELY(!newBlock)) - return nullptr; - - // Align the pointer to `blockAlignment` and adjust the size of this block - // accordingly. It's the same as using `blockAlignment - Utils::alignDiff()`, - // just written differently. - p = Utils::alignTo(newBlock->data, blockAlignment); - newBlock->prev = nullptr; - newBlock->next = nullptr; - newBlock->size = blockSize; - - if (curBlock != &Zone_zeroBlock) { - newBlock->prev = curBlock; - curBlock->next = newBlock; - - // Does only happen if there is a next block, but the requested memory - // can't fit into it. In this case a new buffer is allocated and inserted - // between the current block and the next one. - if (next) { - newBlock->next = next; - next->prev = newBlock; - } - } - - _block = newBlock; - _ptr = p + size; - _end = newBlock->data + blockSize; - - return static_cast(p); -} - -void* Zone::allocZeroed(size_t size) noexcept { - void* p = alloc(size); - if (ASMJIT_UNLIKELY(!p)) return p; - return ::memset(p, 0, size); -} - -void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept { - if (ASMJIT_UNLIKELY(!data || !size)) return nullptr; - - ASMJIT_ASSERT(size != IntTraits::maxValue()); - uint8_t* m = allocT(size + nullTerminate); - if (ASMJIT_UNLIKELY(!m)) return nullptr; - - ::memcpy(m, data, size); - if (nullTerminate) m[size] = '\0'; - - return static_cast(m); -} - -char* Zone::sformat(const char* fmt, ...) noexcept { - if (ASMJIT_UNLIKELY(!fmt)) return nullptr; - - char buf[512]; - size_t len; - - va_list ap; - va_start(ap, fmt); - - len = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap); - buf[len++] = 0; - - va_end(ap); - return static_cast(dup(buf, len)); -} - -// ============================================================================ -// [asmjit::ZoneHeap - Helpers] -// ============================================================================ - -static bool ZoneHeap_hasDynamicBlock(ZoneHeap* self, ZoneHeap::DynamicBlock* block) noexcept { - ZoneHeap::DynamicBlock* cur = self->_dynamicBlocks; - while (cur) { - if (cur == block) - return true; - cur = cur->next; - } - return false; -} - -// ============================================================================ -// [asmjit::ZoneHeap - Init / Reset] -// ============================================================================ - -void ZoneHeap::reset(Zone* zone) noexcept { - // Free dynamic blocks. - DynamicBlock* block = _dynamicBlocks; - while (block) { - DynamicBlock* next = block->next; - Internal::releaseMemory(block); - block = next; - } - - // Zero the entire class and initialize to the given `zone`. - ::memset(this, 0, sizeof(*this)); - _zone = zone; -} - -// ============================================================================ -// [asmjit::ZoneHeap - Alloc / Release] -// ============================================================================ - -void* ZoneHeap::_alloc(size_t size, size_t& allocatedSize) noexcept { - ASMJIT_ASSERT(isInitialized()); - - // We use our memory pool only if the requested block is of a reasonable size. - uint32_t slot; - if (_getSlotIndex(size, slot, allocatedSize)) { - // Slot reuse. - uint8_t* p = reinterpret_cast(_slots[slot]); - size = allocatedSize; - - if (p) { - _slots[slot] = reinterpret_cast(p)->next; - //printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot); - return p; - } - - // So use Zone to allocate a new chunk for us. But before we use it, we - // check if there is enough room for the new chunk in zone, and if not, - // we redistribute the remaining memory in Zone's current block into slots. - Zone* zone = _zone; - p = Utils::alignTo(zone->getCursor(), kBlockAlignment); - size_t remain = (p <= zone->getEnd()) ? (size_t)(zone->getEnd() - p) : size_t(0); - - if (ASMJIT_LIKELY(remain >= size)) { - zone->setCursor(p + size); - //printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot); - return p; - } - else { - // Distribute the remaining memory to suitable slots. - if (remain >= kLoGranularity) { - do { - size_t distSize = std::min(remain, kLoMaxSize); - uint32_t distSlot = static_cast((distSize - kLoGranularity) / kLoGranularity); - ASMJIT_ASSERT(distSlot < kLoCount); - - reinterpret_cast(p)->next = _slots[distSlot]; - _slots[distSlot] = reinterpret_cast(p); - - p += distSize; - remain -= distSize; - } while (remain >= kLoGranularity); - zone->setCursor(p); - } - - p = static_cast(zone->_alloc(size)); - if (ASMJIT_UNLIKELY(!p)) { - allocatedSize = 0; - return nullptr; - } - - //printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot); - return p; - } - } - else { - // Allocate a dynamic block. - size_t overhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment; - - // Handle a possible overflow. - if (ASMJIT_UNLIKELY(overhead >= ~static_cast(0) - size)) - return nullptr; - - void* p = Internal::allocMemory(size + overhead); - if (ASMJIT_UNLIKELY(!p)) { - allocatedSize = 0; - return nullptr; - } - - // Link as first in `_dynamicBlocks` double-linked list. - DynamicBlock* block = static_cast(p); - DynamicBlock* next = _dynamicBlocks; - - if (next) - next->prev = block; - - block->prev = nullptr; - block->next = next; - _dynamicBlocks = block; - - // Align the pointer to the guaranteed alignment and store `DynamicBlock` - // at the end of the memory block, so `_releaseDynamic()` can find it. - p = Utils::alignTo(static_cast(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment); - reinterpret_cast(p)[-1] = block; - - allocatedSize = size; - //printf("ALLOCATED DYNAMIC %p of size %d\n", p, int(size)); - return p; - } -} - -void* ZoneHeap::_allocZeroed(size_t size, size_t& allocatedSize) noexcept { - ASMJIT_ASSERT(isInitialized()); - - void* p = _alloc(size, allocatedSize); - if (ASMJIT_UNLIKELY(!p)) return p; - return ::memset(p, 0, allocatedSize); -} - -void ZoneHeap::_releaseDynamic(void* p, size_t size) noexcept { - ASMJIT_ASSERT(isInitialized()); - //printf("RELEASING DYNAMIC %p of size %d\n", p, int(size)); - - // Pointer to `DynamicBlock` is stored at [-1]. - DynamicBlock* block = reinterpret_cast(p)[-1]; - ASMJIT_ASSERT(ZoneHeap_hasDynamicBlock(this, block)); - - // Unlink and free. - DynamicBlock* prev = block->prev; - DynamicBlock* next = block->next; - - if (prev) - prev->next = next; - else - _dynamicBlocks = next; - - if (next) - next->prev = prev; - - Internal::releaseMemory(block); -} - -// ============================================================================ -// [asmjit::ZoneVectorBase - Helpers] -// ============================================================================ - -Error ZoneVectorBase::_grow(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept { - size_t threshold = Globals::kAllocThreshold / sizeOfT; - size_t capacity = _capacity; - size_t after = _length; - - if (ASMJIT_UNLIKELY(IntTraits::maxValue() - n < after)) - return DebugUtils::errored(kErrorNoHeapMemory); - - after += n; - if (capacity >= after) - return kErrorOk; - - // ZoneVector is used as an array to hold short-lived data structures used - // during code generation. The growing strategy is simple - use small capacity - // at the beginning (very good for ZoneHeap) and then grow quicker to prevent - // successive reallocations. - if (capacity < 4) - capacity = 4; - else if (capacity < 8) - capacity = 8; - else if (capacity < 16) - capacity = 16; - else if (capacity < 64) - capacity = 64; - else if (capacity < 256) - capacity = 256; - - while (capacity < after) { - if (capacity < threshold) - capacity *= 2; - else - capacity += threshold; - } - - return _reserve(heap, sizeOfT, capacity); -} - -Error ZoneVectorBase::_reserve(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept { - size_t oldCapacity = _capacity; - if (oldCapacity >= n) return kErrorOk; - - size_t nBytes = n * sizeOfT; - if (ASMJIT_UNLIKELY(nBytes < n)) - return DebugUtils::errored(kErrorNoHeapMemory); - - size_t allocatedBytes; - uint8_t* newData = static_cast(heap->alloc(nBytes, allocatedBytes)); - - if (ASMJIT_UNLIKELY(!newData)) - return DebugUtils::errored(kErrorNoHeapMemory); - - void* oldData = _data; - if (_length) - ::memcpy(newData, oldData, _length * sizeOfT); - - if (oldData) - heap->release(oldData, oldCapacity * sizeOfT); - - _capacity = allocatedBytes / sizeOfT; - ASMJIT_ASSERT(_capacity >= n); - - _data = newData; - return kErrorOk; -} - -Error ZoneVectorBase::_resize(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept { - size_t length = _length; - if (_capacity < n) { - ASMJIT_PROPAGATE(_grow(heap, sizeOfT, n - length)); - ASMJIT_ASSERT(_capacity >= n); - } - - if (length < n) - ::memset(static_cast(_data) + length * sizeOfT, 0, (n - length) * sizeOfT); - - _length = n; - return kErrorOk; -} - -// ============================================================================ -// [asmjit::ZoneBitVector - Ops] -// ============================================================================ - -Error ZoneBitVector::_resize(ZoneHeap* heap, size_t newLength, size_t idealCapacity, bool newBitsValue) noexcept { - ASMJIT_ASSERT(idealCapacity >= newLength); - - if (newLength <= _length) { - // The size after the resize is lesser than or equal to the current length. - size_t idx = newLength / kBitsPerWord; - size_t bit = newLength % kBitsPerWord; - - // Just set all bits outside of the new length in the last word to zero. - // There is a case that there are not bits to set if `bit` is zero. This - // happens when `newLength` is a multiply of `kBitsPerWord` like 64, 128, - // and so on. In that case don't change anything as that would mean settings - // bits outside of the `_length`. - if (bit) - _data[idx] &= (static_cast(1) << bit) - 1U; - - _length = newLength; - return kErrorOk; - } - - size_t oldLength = _length; - BitWord* data = _data; - - if (newLength > _capacity) { - // Realloc needed... Calculate the minimum capacity (in bytes) requied. - size_t minimumCapacityInBits = Utils::alignTo(idealCapacity, kBitsPerWord); - size_t allocatedCapacity; - - if (ASMJIT_UNLIKELY(minimumCapacityInBits < newLength)) - return DebugUtils::errored(kErrorNoHeapMemory); - - // Normalize to bytes. - size_t minimumCapacity = minimumCapacityInBits / 8; - BitWord* newData = static_cast(heap->alloc(minimumCapacity, allocatedCapacity)); - - if (ASMJIT_UNLIKELY(!newData)) - return DebugUtils::errored(kErrorNoHeapMemory); - - // `allocatedCapacity` now contains number in bytes, we need bits. - size_t allocatedCapacityInBits = allocatedCapacity * 8; - - // Arithmetic overflow should normally not happen. If it happens we just - // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as - // this value is still safe to be used to call `_heap->release(...)`. - if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) - allocatedCapacityInBits = minimumCapacityInBits; - - if (oldLength) - ::memcpy(newData, data, _wordsPerBits(oldLength)); - - if (data) - heap->release(data, _capacity / 8); - data = newData; - - _data = data; - _capacity = allocatedCapacityInBits; - } - - // Start (of the old length) and end (of the new length) bits - size_t idx = oldLength / kBitsPerWord; - size_t startBit = oldLength % kBitsPerWord; - size_t endBit = newLength % kBitsPerWord; - - // Set new bits to either 0 or 1. The `pattern` is used to set multiple - // bits per bit-word and contains either all zeros or all ones. - BitWord pattern = _patternFromBit(newBitsValue); - - // First initialize the last bit-word of the old length. - if (startBit) { - size_t nBits = 0; - - if (idx == (newLength / kBitsPerWord)) { - // The number of bit-words is the same after the resize. In that case - // we need to set only bits necessary in the current last bit-word. - ASMJIT_ASSERT(startBit < endBit); - nBits = endBit - startBit; - } - else { - // There is be more bit-words after the resize. In that case we don't - // have to be extra careful about the last bit-word of the old length. - nBits = kBitsPerWord - startBit; - } - - data[idx++] |= pattern << nBits; - } - - // Initialize all bit-words after the last bit-word of the old length. - size_t endIdx = _wordsPerBits(newLength); - endIdx -= static_cast(endIdx * kBitsPerWord == newLength); - - while (idx <= endIdx) - data[idx++] = pattern; - - // Clear unused bits of the last bit-word. - if (endBit) - data[endIdx] &= (static_cast(1) << endBit) - 1; - - _length = newLength; - return kErrorOk; -} - -Error ZoneBitVector::_append(ZoneHeap* heap, bool value) noexcept { - size_t kThreshold = Globals::kAllocThreshold * 8; - size_t newLength = _length + 1; - size_t idealCapacity = _capacity; - - if (idealCapacity < 128) - idealCapacity = 128; - else if (idealCapacity <= kThreshold) - idealCapacity *= 2; - else - idealCapacity += kThreshold; - - if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) { - // It's technically impossible that `_length + 1` overflows. - idealCapacity = newLength; - ASMJIT_ASSERT(idealCapacity > _capacity); - } - - return _resize(heap, newLength, idealCapacity, value); -} - -Error ZoneBitVector::fill(size_t from, size_t to, bool value) noexcept { - if (ASMJIT_UNLIKELY(from >= to)) { - if (from > to) - return DebugUtils::errored(kErrorInvalidArgument); - else - return kErrorOk; - } - - ASMJIT_ASSERT(from <= _length); - ASMJIT_ASSERT(to <= _length); - - // This is very similar to `ZoneBitVector::_fill()`, however, since we - // actually set bits that are already part of the container we need to - // special case filiing to zeros and ones. - size_t idx = from / kBitsPerWord; - size_t startBit = from % kBitsPerWord; - - size_t endIdx = to / kBitsPerWord; - size_t endBit = to % kBitsPerWord; - - BitWord* data = _data; - ASMJIT_ASSERT(data != nullptr); - - // Special case for non-zero `startBit`. - if (startBit) { - if (idx == endIdx) { - ASMJIT_ASSERT(startBit < endBit); - - size_t nBits = endBit - startBit; - BitWord mask = ((static_cast(1) << nBits) - 1) << startBit; - - if (value) - data[idx] |= mask; - else - data[idx] &= ~mask; - return kErrorOk; - } - else { - BitWord mask = (static_cast(0) - 1) << startBit; - - if (value) - data[idx++] |= mask; - else - data[idx++] &= ~mask; - } - } - - // Fill all bits in case there is a gap between the current `idx` and `endIdx`. - if (idx < endIdx) { - BitWord pattern = _patternFromBit(value); - do { - data[idx++] = pattern; - } while (idx < endIdx); - } - - // Special case for non-zero `endBit`. - if (endBit) { - BitWord mask = ((static_cast(1) << endBit) - 1); - if (value) - data[endIdx] |= mask; - else - data[endIdx] &= ~mask; - } - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::ZoneHashBase - Utilities] -// ============================================================================ - -static uint32_t ZoneHash_getClosestPrime(uint32_t x) noexcept { - static const uint32_t primeTable[] = { - 23, 53, 193, 389, 769, 1543, 3079, 6151, 12289, 24593 - }; - - size_t i = 0; - uint32_t p; - - do { - if ((p = primeTable[i]) > x) - break; - } while (++i < ASMJIT_ARRAY_SIZE(primeTable)); - - return p; -} - -// ============================================================================ -// [asmjit::ZoneHashBase - Reset] -// ============================================================================ - -void ZoneHashBase::reset(ZoneHeap* heap) noexcept { - ZoneHashNode** oldData = _data; - if (oldData != _embedded) - _heap->release(oldData, _bucketsCount * sizeof(ZoneHashNode*)); - - _heap = heap; - _size = 0; - _bucketsCount = 1; - _bucketsGrow = 1; - _data = _embedded; - _embedded[0] = nullptr; -} - -// ============================================================================ -// [asmjit::ZoneHashBase - Rehash] -// ============================================================================ - -void ZoneHashBase::_rehash(uint32_t newCount) noexcept { - ASMJIT_ASSERT(isInitialized()); - - ZoneHashNode** oldData = _data; - ZoneHashNode** newData = reinterpret_cast( - _heap->allocZeroed(static_cast(newCount) * sizeof(ZoneHashNode*))); - - // We can still store nodes into the table, but it will degrade. - if (ASMJIT_UNLIKELY(newData == nullptr)) - return; - - uint32_t i; - uint32_t oldCount = _bucketsCount; - - for (i = 0; i < oldCount; i++) { - ZoneHashNode* node = oldData[i]; - while (node) { - ZoneHashNode* next = node->_hashNext; - uint32_t hMod = node->_hVal % newCount; - - node->_hashNext = newData[hMod]; - newData[hMod] = node; - - node = next; - } - } - - // 90% is the maximum occupancy, can't overflow since the maximum capacity - // is limited to the last prime number stored in the prime table. - if (oldData != _embedded) - _heap->release(oldData, _bucketsCount * sizeof(ZoneHashNode*)); - - _bucketsCount = newCount; - _bucketsGrow = newCount * 9 / 10; - - _data = newData; -} - -// ============================================================================ -// [asmjit::ZoneHashBase - Ops] -// ============================================================================ - -ZoneHashNode* ZoneHashBase::_put(ZoneHashNode* node) noexcept { - uint32_t hMod = node->_hVal % _bucketsCount; - ZoneHashNode* next = _data[hMod]; - - node->_hashNext = next; - _data[hMod] = node; - - if (++_size >= _bucketsGrow && next) { - uint32_t newCapacity = ZoneHash_getClosestPrime(_bucketsCount); - if (newCapacity != _bucketsCount) - _rehash(newCapacity); - } - - return node; -} - -ZoneHashNode* ZoneHashBase::_del(ZoneHashNode* node) noexcept { - uint32_t hMod = node->_hVal % _bucketsCount; - - ZoneHashNode** pPrev = &_data[hMod]; - ZoneHashNode* p = *pPrev; - - while (p) { - if (p == node) { - *pPrev = p->_hashNext; - return node; - } - - pPrev = &p->_hashNext; - p = *pPrev; - } - - return nullptr; -} - -// ============================================================================ -// [asmjit::Zone - Test] -// ============================================================================ - -#if defined(ASMJIT_TEST) -UNIT(base_zonevector) { - Zone zone(8096 - Zone::kZoneOverhead); - ZoneHeap heap(&zone); - - int i; - int kMax = 100000; - - ZoneVector vec; - - INFO("ZoneVector basic tests"); - EXPECT(vec.append(&heap, 0) == kErrorOk); - EXPECT(vec.isEmpty() == false); - EXPECT(vec.getLength() == 1); - EXPECT(vec.getCapacity() >= 1); - EXPECT(vec.indexOf(0) == 0); - EXPECT(vec.indexOf(-11) == Globals::kInvalidIndex); - - vec.clear(); - EXPECT(vec.isEmpty()); - EXPECT(vec.getLength() == 0); - EXPECT(vec.indexOf(0) == Globals::kInvalidIndex); - - for (i = 0; i < kMax; i++) { - EXPECT(vec.append(&heap, i) == kErrorOk); - } - EXPECT(vec.isEmpty() == false); - EXPECT(vec.getLength() == static_cast(kMax)); - EXPECT(vec.indexOf(kMax - 1) == static_cast(kMax - 1)); -} - -UNIT(base_ZoneBitVector) { - Zone zone(8096 - Zone::kZoneOverhead); - ZoneHeap heap(&zone); - - size_t i, count; - size_t kMaxCount = 100; - - ZoneBitVector vec; - EXPECT(vec.isEmpty()); - EXPECT(vec.getLength() == 0); - - INFO("ZoneBitVector::resize()"); - for (count = 1; count < kMaxCount; count++) { - vec.clear(); - EXPECT(vec.resize(&heap, count, false) == kErrorOk); - EXPECT(vec.getLength() == count); - - for (i = 0; i < count; i++) - EXPECT(vec.getAt(i) == false); - - vec.clear(); - EXPECT(vec.resize(&heap, count, true) == kErrorOk); - EXPECT(vec.getLength() == count); - - for (i = 0; i < count; i++) - EXPECT(vec.getAt(i) == true); - } - - INFO("ZoneBitVector::fill()"); - for (count = 1; count < kMaxCount; count += 2) { - vec.clear(); - EXPECT(vec.resize(&heap, count) == kErrorOk); - EXPECT(vec.getLength() == count); - - for (i = 0; i < (count + 1) / 2; i++) { - bool value = static_cast(i & 1); - EXPECT(vec.fill(i, count - i, value) == kErrorOk); - } - - for (i = 0; i < count; i++) { - EXPECT(vec.getAt(i) == static_cast(i & 1)); - } - } -} - -#endif // ASMJIT_TEST - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" diff --git a/src/asmjit/base/zone.h b/src/asmjit/base/zone.h deleted file mode 100644 index 5ec00a7..0000000 --- a/src/asmjit/base/zone.h +++ /dev/null @@ -1,1128 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_ZONE_H -#define _ASMJIT_BASE_ZONE_H - -// [Dependencies] -#include "../base/utils.h" - -// [Api-Begin] -#include "../asmjit_apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base -//! \{ - -// ============================================================================ -// [asmjit::Zone] -// ============================================================================ - -//! Memory zone. -//! -//! Zone is an incremental memory allocator that allocates memory by simply -//! incrementing a pointer. It allocates blocks of memory by using standard -//! C `malloc`, but divides these blocks into smaller segments requested by -//! calling `Zone::alloc()` and friends. -//! -//! Zone has no function to release the allocated memory. It has to be released -//! all at once by calling `reset()`. If you need a more friendly allocator that -//! also supports `release()`, consider using \ref Zone with \ref ZoneHeap. -class Zone { -public: - //! \internal - //! - //! A single block of memory. - struct Block { - Block* prev; //!< Link to the previous block. - Block* next; //!< Link to the next block. - size_t size; //!< Size of the block. - uint8_t data[sizeof(void*)]; //!< Data. - }; - - enum { - //! Zone allocator overhead. - kZoneOverhead = Globals::kAllocOverhead + static_cast(sizeof(Block)) - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new instance of `Zone` allocator. - //! - //! The `blockSize` parameter describes the default size of the block. If the - //! `size` parameter passed to `alloc()` is greater than the default size - //! `Zone` will allocate and use a larger block, but it will not change the - //! default `blockSize`. - //! - //! It's not required, but it's good practice to set `blockSize` to a - //! reasonable value that depends on the usage of `Zone`. Greater block sizes - //! are generally safer and perform better than unreasonably low values. - ASMJIT_API Zone(uint32_t blockSize, uint32_t blockAlignment = 0) noexcept; - - //! Destroy the `Zone` instance. - //! - //! This will destroy the `Zone` instance and release all blocks of memory - //! allocated by it. It performs implicit `reset(true)`. - ASMJIT_API ~Zone() noexcept; - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - //! Reset the `Zone` invalidating all blocks allocated. - //! - //! If `releaseMemory` is true all buffers will be released to the system. - ASMJIT_API void reset(bool releaseMemory = false) noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the default block size. - ASMJIT_INLINE uint32_t getBlockSize() const noexcept { return _blockSize; } - //! Get the default block alignment. - ASMJIT_INLINE uint32_t getBlockAlignment() const noexcept { return (uint32_t)1 << _blockAlignmentShift; } - //! Get remaining size of the current block. - ASMJIT_INLINE size_t getRemainingSize() const noexcept { return (size_t)(_end - _ptr); } - - //! Get the current zone cursor (dangerous). - //! - //! This is a function that can be used to get exclusive access to the current - //! block's memory buffer. - ASMJIT_INLINE uint8_t* getCursor() noexcept { return _ptr; } - //! Get the end of the current zone block, only useful if you use `getCursor()`. - ASMJIT_INLINE uint8_t* getEnd() noexcept { return _end; } - - //! Set the current zone cursor to `p` (must match the current block). - //! - //! This is a counterpart of `getZoneCursor()`. - ASMJIT_INLINE void setCursor(uint8_t* p) noexcept { - ASMJIT_ASSERT(p >= _ptr && p <= _end); - _ptr = p; - } - - // -------------------------------------------------------------------------- - // [Alloc] - // -------------------------------------------------------------------------- - - //! Allocate `size` bytes of memory. - //! - //! Pointer returned is valid until the `Zone` instance is destroyed or reset - //! by calling `reset()`. If you plan to make an instance of C++ from the - //! given pointer use placement `new` and `delete` operators: - //! - //! ~~~ - //! using namespace asmjit; - //! - //! class Object { ... }; - //! - //! // Create Zone with default block size of approximately 65536 bytes. - //! Zone zone(65536 - Zone::kZoneOverhead); - //! - //! // Create your objects using zone object allocating, for example: - //! Object* obj = static_cast( zone.alloc(sizeof(Object)) ); - // - //! if (!obj) { - //! // Handle out of memory error. - //! } - //! - //! // Placement `new` and `delete` operators can be used to instantiate it. - //! new(obj) Object(); - //! - //! // ... lifetime of your objects ... - //! - //! // To destroy the instance (if required). - //! obj->~Object(); - //! - //! // Reset or destroy `Zone`. - //! zone.reset(); - //! ~~~ - ASMJIT_INLINE void* alloc(size_t size) noexcept { - uint8_t* ptr = _ptr; - size_t remainingBytes = (size_t)(_end - ptr); - - if (ASMJIT_UNLIKELY(remainingBytes < size)) - return _alloc(size); - - _ptr += size; - ASMJIT_ASSERT(_ptr <= _end); - - return static_cast(ptr); - } - - //! Allocate `size` bytes without any checks. - //! - //! Can only be called if `getRemainingSize()` returns size at least equal - //! to `size`. - ASMJIT_INLINE void* allocNoCheck(size_t size) noexcept { - ASMJIT_ASSERT((size_t)(_end - _ptr) >= size); - - uint8_t* ptr = _ptr; - _ptr += size; - return static_cast(ptr); - } - - //! Allocate `size` bytes of zeroed memory. - //! - //! See \ref alloc() for more details. - ASMJIT_API void* allocZeroed(size_t size) noexcept; - - //! Like `alloc()`, but the return pointer is casted to `T*`. - template - ASMJIT_INLINE T* allocT(size_t size = sizeof(T)) noexcept { - return static_cast(alloc(size)); - } - - //! Like `allocNoCheck()`, but the return pointer is casted to `T*`. - template - ASMJIT_INLINE T* allocNoCheckT(size_t size = sizeof(T)) noexcept { - return static_cast(allocNoCheck(size)); - } - - //! Like `allocZeroed()`, but the return pointer is casted to `T*`. - template - ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T)) noexcept { - return static_cast(allocZeroed(size)); - } - - //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. - template - ASMJIT_INLINE T* newT() noexcept { - void* p = alloc(sizeof(T)); - if (ASMJIT_UNLIKELY(!p)) - return nullptr; - return new(p) T(); - } - //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. - template - ASMJIT_INLINE T* newT(P1 p1) noexcept { - void* p = alloc(sizeof(T)); - if (ASMJIT_UNLIKELY(!p)) - return nullptr; - return new(p) T(p1); - } - - //! \internal - ASMJIT_API void* _alloc(size_t size) noexcept; - - //! Helper to duplicate data. - ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept; - - //! Helper to duplicate formatted string, maximum length is 256 bytes. - ASMJIT_API char* sformat(const char* str, ...) noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint8_t* _ptr; //!< Pointer in the current block's buffer. - uint8_t* _end; //!< End of the current block's buffer. - Block* _block; //!< Current block. - -#if ASMJIT_ARCH_64BIT - uint32_t _blockSize; //!< Default size of a newly allocated block. - uint32_t _blockAlignmentShift; //!< Minimum alignment of each block. -#else - uint32_t _blockSize : 29; //!< Default size of a newly allocated block. - uint32_t _blockAlignmentShift : 3; //!< Minimum alignment of each block. -#endif -}; - -// ============================================================================ -// [asmjit::ZoneHeap] -// ============================================================================ - -//! Zone-based memory allocator that uses an existing \ref Zone and provides -//! a `release()` functionality on top of it. It uses \ref Zone only for chunks -//! that can be pooled, and uses libc `malloc()` for chunks that are large. -//! -//! The advantage of ZoneHeap is that it can allocate small chunks of memory -//! really fast, and these chunks, when released, will be reused by consecutive -//! calls to `alloc()`. Also, since ZoneHeap uses \ref Zone, you can turn any -//! \ref Zone into a \ref ZoneHeap, and use it in your \ref Pass when necessary. -//! -//! ZoneHeap is used by AsmJit containers to make containers having only -//! few elements fast (and lightweight) and to allow them to grow and use -//! dynamic blocks when require more storage. -class ZoneHeap { - ASMJIT_NONCOPYABLE(ZoneHeap) - - enum { - // In short, we pool chunks of these sizes: - // [32, 64, 96, 128, 192, 256, 320, 384, 448, 512] - - //! How many bytes per a low granularity pool (has to be at least 16). - kLoGranularity = 32, - //! Number of slots of a low granularity pool. - kLoCount = 4, - //! Maximum size of a block that can be allocated in a low granularity pool. - kLoMaxSize = kLoGranularity * kLoCount, - - //! How many bytes per a high granularity pool. - kHiGranularity = 64, - //! Number of slots of a high granularity pool. - kHiCount = 6, - //! Maximum size of a block that can be allocated in a high granularity pool. - kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount, - - //! Alignment of every pointer returned by `alloc()`. - kBlockAlignment = kLoGranularity - }; - - //! Single-linked list used to store unused chunks. - struct Slot { - //! Link to a next slot in a single-linked list. - Slot* next; - }; - - //! A block of memory that has been allocated dynamically and is not part of - //! block-list used by the allocator. This is used to keep track of all these - //! blocks so they can be freed by `reset()` if not freed explicitly. - struct DynamicBlock { - DynamicBlock* prev; - DynamicBlock* next; - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `ZoneHeap`. - //! - //! NOTE: To use it, you must first `init()` it. - ASMJIT_INLINE ZoneHeap() noexcept { - ::memset(this, 0, sizeof(*this)); - } - //! Create a new `ZoneHeap` initialized to use `zone`. - explicit ASMJIT_INLINE ZoneHeap(Zone* zone) noexcept { - ::memset(this, 0, sizeof(*this)); - _zone = zone; - } - //! Destroy the `ZoneHeap`. - ASMJIT_INLINE ~ZoneHeap() noexcept { reset(); } - - // -------------------------------------------------------------------------- - // [Init / Reset] - // -------------------------------------------------------------------------- - - //! Get if the `ZoneHeap` is initialized (i.e. has `Zone`). - ASMJIT_INLINE bool isInitialized() const noexcept { return _zone != nullptr; } - - //! Convenience method to initialize the `ZoneHeap` with `zone`. - //! - //! It's the same as calling `reset(zone)`. - ASMJIT_INLINE void init(Zone* zone) noexcept { reset(zone); } - - //! Reset this `ZoneHeap` and also forget about the current `Zone` which - //! is attached (if any). Reset optionally attaches a new `zone` passed, or - //! keeps the `ZoneHeap` in an uninitialized state, if `zone` is null. - ASMJIT_API void reset(Zone* zone = nullptr) noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the `Zone` the `ZoneHeap` is using, or null if it's not initialized. - ASMJIT_INLINE Zone* getZone() const noexcept { return _zone; } - - // -------------------------------------------------------------------------- - // [Utilities] - // -------------------------------------------------------------------------- - - //! \internal - //! - //! Get the slot index to be used for `size`. Returns `true` if a valid slot - //! has been written to `slot` and `allocatedSize` has been filled with slot - //! exact size (`allocatedSize` can be equal or slightly greater than `size`). - static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot) noexcept { - ASMJIT_ASSERT(size > 0); - if (size > kHiMaxSize) - return false; - - if (size <= kLoMaxSize) - slot = static_cast((size - 1) / kLoGranularity); - else - slot = static_cast((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount; - - return true; - } - - //! \overload - static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept { - ASMJIT_ASSERT(size > 0); - if (size > kHiMaxSize) - return false; - - if (size <= kLoMaxSize) { - slot = static_cast((size - 1) / kLoGranularity); - allocatedSize = Utils::alignTo(size, kLoGranularity); - } - else { - slot = static_cast((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount; - allocatedSize = Utils::alignTo(size, kHiGranularity); - } - - return true; - } - - // -------------------------------------------------------------------------- - // [Alloc / Release] - // -------------------------------------------------------------------------- - - ASMJIT_API void* _alloc(size_t size, size_t& allocatedSize) noexcept; - ASMJIT_API void* _allocZeroed(size_t size, size_t& allocatedSize) noexcept; - ASMJIT_API void _releaseDynamic(void* p, size_t size) noexcept; - - //! Allocate `size` bytes of memory, ideally from an available pool. - //! - //! NOTE: `size` can't be zero, it will assert in debug mode in such case. - ASMJIT_INLINE void* alloc(size_t size) noexcept { - ASMJIT_ASSERT(isInitialized()); - size_t allocatedSize; - return _alloc(size, allocatedSize); - } - - //! Like `alloc(size)`, but provides a second argument `allocatedSize` that - //! provides a way to know how big the block returned actually is. This is - //! useful for containers to prevent growing too early. - ASMJIT_INLINE void* alloc(size_t size, size_t& allocatedSize) noexcept { - ASMJIT_ASSERT(isInitialized()); - return _alloc(size, allocatedSize); - } - - //! Like `alloc()`, but the return pointer is casted to `T*`. - template - ASMJIT_INLINE T* allocT(size_t size = sizeof(T)) noexcept { - return static_cast(alloc(size)); - } - - //! Like `alloc(size)`, but returns zeroed memory. - ASMJIT_INLINE void* allocZeroed(size_t size) noexcept { - ASMJIT_ASSERT(isInitialized()); - - size_t allocatedSize; - return _allocZeroed(size, allocatedSize); - } - - //! Like `alloc(size, allocatedSize)`, but returns zeroed memory. - ASMJIT_INLINE void* allocZeroed(size_t size, size_t& allocatedSize) noexcept { - ASMJIT_ASSERT(isInitialized()); - - return _allocZeroed(size, allocatedSize); - } - - //! Like `allocZeroed()`, but the return pointer is casted to `T*`. - template - ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T)) noexcept { - return static_cast(allocZeroed(size)); - } - - //! Release the memory previously allocated by `alloc()`. The `size` argument - //! has to be the same as used to call `alloc()` or `allocatedSize` returned - //! by `alloc()`. - ASMJIT_INLINE void release(void* p, size_t size) noexcept { - ASMJIT_ASSERT(isInitialized()); - - ASMJIT_ASSERT(p != nullptr); - ASMJIT_ASSERT(size != 0); - - uint32_t slot; - if (_getSlotIndex(size, slot)) { - //printf("RELEASING %p of size %d (SLOT %u)\n", p, int(size), slot); - static_cast(p)->next = static_cast(_slots[slot]); - _slots[slot] = static_cast(p); - } - else { - _releaseDynamic(p, size); - } - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - Zone* _zone; //!< Zone used to allocate memory that fits into slots. - Slot* _slots[kLoCount + kHiCount]; //!< Indexed slots containing released memory. - DynamicBlock* _dynamicBlocks; //!< Dynamic blocks for larger allocations (no slots). -}; - -// ============================================================================ -// [asmjit::ZoneList] -// ============================================================================ - -//! \internal -template -class ZoneList { -public: - ASMJIT_NONCOPYABLE(ZoneList) - - // -------------------------------------------------------------------------- - // [Link] - // -------------------------------------------------------------------------- - - //! ZoneList node. - struct Link { - //! Get next node. - ASMJIT_INLINE Link* getNext() const noexcept { return _next; } - //! Get value. - ASMJIT_INLINE T getValue() const noexcept { return _value; } - //! Set value to `value`. - ASMJIT_INLINE void setValue(const T& value) noexcept { _value = value; } - - Link* _next; - T _value; - }; - - // -------------------------------------------------------------------------- - // [Appender] - // -------------------------------------------------------------------------- - - //! Specialized appender that takes advantage of ZoneList structure. You must - //! initialize it and then call done(). - struct Appender { - ASMJIT_INLINE Appender(ZoneList& list) noexcept { init(list); } - - ASMJIT_INLINE void init(ZoneList& list) noexcept { - pPrev = &list._first; - } - - ASMJIT_INLINE void done(ZoneList& list) noexcept { - list._last = *pPrev; - *pPrev = nullptr; - } - - ASMJIT_INLINE void append(Link* node) noexcept { - *pPrev = node; - pPrev = &node->_next; - } - - Link** pPrev; - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE ZoneList() noexcept : _first(nullptr), _last(nullptr) {} - ASMJIT_INLINE ~ZoneList() noexcept {} - - // -------------------------------------------------------------------------- - // [Data] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool isEmpty() const noexcept { return _first != nullptr; } - ASMJIT_INLINE Link* getFirst() const noexcept { return _first; } - ASMJIT_INLINE Link* getLast() const noexcept { return _last; } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void reset() noexcept { - _first = nullptr; - _last = nullptr; - } - - ASMJIT_INLINE void prepend(Link* link) noexcept { - link->_next = _first; - if (!_first) _last = link; - _first = link; - } - - ASMJIT_INLINE void append(Link* link) noexcept { - link->_next = nullptr; - if (!_first) - _first = link; - else - _last->_next = link; - _last = link; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - Link* _first; - Link* _last; -}; - -// ============================================================================ -// [asmjit::ZoneVectorBase] -// ============================================================================ - -//! \internal -class ZoneVectorBase { -public: - ASMJIT_NONCOPYABLE(ZoneVectorBase) - -protected: - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new instance of `ZoneVectorBase`. - explicit ASMJIT_INLINE ZoneVectorBase() noexcept - : _data(nullptr), - _length(0), - _capacity(0) {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - -public: - //! Get if the vector is empty. - ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; } - //! Get vector length. - ASMJIT_INLINE size_t getLength() const noexcept { return _length; } - //! Get vector capacity. - ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - //! Makes the vector empty (won't change the capacity or data pointer). - ASMJIT_INLINE void clear() noexcept { _length = 0; } - //! Reset the vector data and set its `length` to zero. - ASMJIT_INLINE void reset() noexcept { - _data = nullptr; - _length = 0; - _capacity = 0; - } - - //! Truncate the vector to at most `n` items. - ASMJIT_INLINE void truncate(size_t n) noexcept { - _length = std::min(_length, n); - } - - // -------------------------------------------------------------------------- - // [Memory Management] - // -------------------------------------------------------------------------- - -protected: - ASMJIT_INLINE void _release(ZoneHeap* heap, size_t sizeOfT) noexcept { - if (_data != nullptr) { - heap->release(_data, _capacity * sizeOfT); - reset(); - } - } - - ASMJIT_API Error _grow(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept; - ASMJIT_API Error _resize(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept; - ASMJIT_API Error _reserve(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - -public: - void* _data; //!< Vector data. - size_t _length; //!< Length of the vector. - size_t _capacity; //!< Capacity of the vector. -}; - -// ============================================================================ -// [asmjit::ZoneVector] -// ============================================================================ - -//! Template used to store and manage array of Zone allocated data. -//! -//! This template has these advantages over other std::vector<>: -//! - Always non-copyable (designed to be non-copyable, we want it). -//! - No copy-on-write (some implementations of STL can use it). -//! - Optimized for working only with POD types. -//! - Uses ZoneHeap, thus small vectors are basically for free. -template -class ZoneVector : public ZoneVectorBase { -public: - ASMJIT_NONCOPYABLE(ZoneVector) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new instance of `ZoneVector`. - explicit ASMJIT_INLINE ZoneVector() noexcept : ZoneVectorBase() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get data. - ASMJIT_INLINE T* getData() noexcept { return static_cast(_data); } - //! \overload - ASMJIT_INLINE const T* getData() const noexcept { return static_cast(_data); } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - //! Prepend `item` to the vector. - Error prepend(ZoneHeap* heap, const T& item) noexcept { - if (ASMJIT_UNLIKELY(_length == _capacity)) - ASMJIT_PROPAGATE(grow(heap, 1)); - - ::memmove(static_cast(_data) + 1, _data, _length * sizeof(T)); - ::memcpy(_data, &item, sizeof(T)); - - _length++; - return kErrorOk; - } - - //! Insert an `item` at the specified `index`. - Error insert(ZoneHeap* heap, size_t index, const T& item) noexcept { - ASMJIT_ASSERT(index <= _length); - - if (ASMJIT_UNLIKELY(_length == _capacity)) - ASMJIT_PROPAGATE(grow(heap, 1)); - - T* dst = static_cast(_data) + index; - ::memmove(dst + 1, dst, _length - index); - ::memcpy(dst, &item, sizeof(T)); - - _length++; - return kErrorOk; - } - - //! Append `item` to the vector. - Error append(ZoneHeap* heap, const T& item) noexcept { - if (ASMJIT_UNLIKELY(_length == _capacity)) - ASMJIT_PROPAGATE(grow(heap, 1)); - - ::memcpy(static_cast(_data) + _length, &item, sizeof(T)); - - _length++; - return kErrorOk; - } - - Error concat(ZoneHeap* heap, const ZoneVector& other) noexcept { - size_t count = other._length; - if (_capacity - _length < count) - ASMJIT_PROPAGATE(grow(heap, count)); - - ::memcpy(static_cast(_data) + _length, other._data, count * sizeof(T)); - - _length += count; - return kErrorOk; - } - - //! Prepend `item` to the vector (unsafe case). - //! - //! Can only be used together with `willGrow()`. If `willGrow(N)` returns - //! `kErrorOk` then N elements can be added to the vector without checking - //! if there is a place for them. Used mostly internally. - ASMJIT_INLINE void prependUnsafe(const T& item) noexcept { - ASMJIT_ASSERT(_length < _capacity); - T* data = static_cast(_data); - - if (_length) - ::memmove(data + 1, data, _length * sizeof(T)); - - ::memcpy(data, &item, sizeof(T)); - _length++; - } - - //! Append `item` to the vector (unsafe case). - //! - //! Can only be used together with `willGrow()`. If `willGrow(N)` returns - //! `kErrorOk` then N elements can be added to the vector without checking - //! if there is a place for them. Used mostly internally. - ASMJIT_INLINE void appendUnsafe(const T& item) noexcept { - ASMJIT_ASSERT(_length < _capacity); - - ::memcpy(static_cast(_data) + _length, &item, sizeof(T)); - _length++; - } - - //! Concatenate all items of `other` at the end of the vector. - ASMJIT_INLINE void concatUnsafe(const ZoneVector& other) noexcept { - size_t count = other._length; - ASMJIT_ASSERT(_capacity - _length >= count); - - ::memcpy(static_cast(_data) + _length, other._data, count * sizeof(T)); - _length += count; - } - - //! Get index of `val` or `kInvalidIndex` if not found. - ASMJIT_INLINE size_t indexOf(const T& val) const noexcept { - const T* data = static_cast(_data); - size_t length = _length; - - for (size_t i = 0; i < length; i++) - if (data[i] == val) - return i; - - return Globals::kInvalidIndex; - } - - //! Get whether the vector contains `val`. - ASMJIT_INLINE bool contains(const T& val) const noexcept { - return indexOf(val) != Globals::kInvalidIndex; - } - - //! Remove item at index `i`. - ASMJIT_INLINE void removeAt(size_t i) noexcept { - ASMJIT_ASSERT(i < _length); - - T* data = static_cast(_data) + i; - _length--; - ::memmove(data, data + 1, _length - i); - } - - //! Swap this pod-vector with `other`. - ASMJIT_INLINE void swap(ZoneVector& other) noexcept { - Utils::swap(_length, other._length); - Utils::swap(_capacity, other._capacity); - Utils::swap(_data, other._data); - } - - //! Get item at index `i` (const). - ASMJIT_INLINE const T& getAt(size_t i) const noexcept { - ASMJIT_ASSERT(i < _length); - return getData()[i]; - } - - //! Get item at index `i`. - ASMJIT_INLINE T& operator[](size_t i) noexcept { - ASMJIT_ASSERT(i < _length); - return getData()[i]; - } - - //! Get item at index `i`. - ASMJIT_INLINE const T& operator[](size_t i) const noexcept { - ASMJIT_ASSERT(i < _length); - return getData()[i]; - } - - // -------------------------------------------------------------------------- - // [Memory Management] - // -------------------------------------------------------------------------- - - //! Release the memory held by `ZoneVector` back to the `heap`. - ASMJIT_INLINE void release(ZoneHeap* heap) noexcept { _release(heap, sizeof(T)); } - - //! Called to grow the buffer to fit at least `n` elements more. - ASMJIT_INLINE Error grow(ZoneHeap* heap, size_t n) noexcept { return ZoneVectorBase::_grow(heap, sizeof(T), n); } - - //! Resize the vector to hold `n` elements. - //! - //! If `n` is greater than the current length then the additional elements' - //! content will be initialized to zero. If `n` is less than the current - //! length then the vector will be truncated to exactly `n` elements. - ASMJIT_INLINE Error resize(ZoneHeap* heap, size_t n) noexcept { return ZoneVectorBase::_resize(heap, sizeof(T), n); } - - //! Realloc internal array to fit at least `n` items. - ASMJIT_INLINE Error reserve(ZoneHeap* heap, size_t n) noexcept { return ZoneVectorBase::_reserve(heap, sizeof(T), n); } - - ASMJIT_INLINE Error willGrow(ZoneHeap* heap, size_t n = 1) noexcept { - return _capacity - _length < n ? grow(heap, n) : static_cast(kErrorOk); - } -}; - -// ============================================================================ -// [asmjit::ZoneBitVector] -// ============================================================================ - -class ZoneBitVector { -public: - ASMJIT_NONCOPYABLE(ZoneBitVector) - - //! Storage used to store a pack of bits (should by compatible with a machine word). - typedef uintptr_t BitWord; - enum { kBitsPerWord = static_cast(sizeof(BitWord)) * 8 }; - - static ASMJIT_INLINE size_t _wordsPerBits(size_t nBits) noexcept { - return ((nBits + kBitsPerWord) / kBitsPerWord) - 1; - } - - // Return all bits zero if 0 and all bits set if 1. - static ASMJIT_INLINE BitWord _patternFromBit(bool bit) noexcept { - BitWord bitAsWord = static_cast(bit); - ASMJIT_ASSERT(bitAsWord == 0 || bitAsWord == 1); - return static_cast(0) - bitAsWord; - } - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - explicit ASMJIT_INLINE ZoneBitVector() noexcept : - _data(nullptr), - _length(0), - _capacity(0) {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get if the bit-vector is empty (has no bits). - ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; } - //! Get a length of this bit-vector (in bits). - ASMJIT_INLINE size_t getLength() const noexcept { return _length; } - //! Get a capacity of this bit-vector (in bits). - ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; } - - //! Get data. - ASMJIT_INLINE BitWord* getData() noexcept { return _data; } - //! \overload - ASMJIT_INLINE const BitWord* getData() const noexcept { return _data; } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void clear() noexcept { - _length = 0; - } - - ASMJIT_INLINE void reset() noexcept { - _data = nullptr; - _length = 0; - _capacity = 0; - } - - ASMJIT_INLINE void truncate(size_t newLength) noexcept { - _length = std::min(_length, newLength); - _clearUnusedBits(); - } - - ASMJIT_INLINE bool getAt(size_t index) const noexcept { - ASMJIT_ASSERT(index < _length); - - size_t idx = index / kBitsPerWord; - size_t bit = index % kBitsPerWord; - return static_cast((_data[idx] >> bit) & 1); - } - - ASMJIT_INLINE void setAt(size_t index, bool value) noexcept { - ASMJIT_ASSERT(index < _length); - - size_t idx = index / kBitsPerWord; - size_t bit = index % kBitsPerWord; - if (value) - _data[idx] |= static_cast(1) << bit; - else - _data[idx] &= ~(static_cast(1) << bit); - } - - ASMJIT_INLINE void toggleAt(size_t index) noexcept { - ASMJIT_ASSERT(index < _length); - - size_t idx = index / kBitsPerWord; - size_t bit = index % kBitsPerWord; - _data[idx] ^= static_cast(1) << bit; - } - - ASMJIT_INLINE Error append(ZoneHeap* heap, bool value) noexcept { - size_t index = _length; - if (ASMJIT_UNLIKELY(index >= _capacity)) - return _append(heap, value); - - size_t idx = index / kBitsPerWord; - size_t bit = index % kBitsPerWord; - - if (bit == 0) - _data[idx] = static_cast(value) << bit; - else - _data[idx] |= static_cast(value) << bit; - - _length++; - return kErrorOk; - } - - ASMJIT_API Error fill(size_t fromIndex, size_t toIndex, bool value) noexcept; - - ASMJIT_INLINE void and_(const ZoneBitVector& other) noexcept { - BitWord* dst = _data; - const BitWord* src = other._data; - - size_t numWords = (std::min(_length, other._length) + kBitsPerWord - 1) / kBitsPerWord; - for (size_t i = 0; i < numWords; i++) - dst[i] = dst[i] & src[i]; - _clearUnusedBits(); - } - - ASMJIT_INLINE void andNot(const ZoneBitVector& other) noexcept { - BitWord* dst = _data; - const BitWord* src = other._data; - - size_t numWords = (std::min(_length, other._length) + kBitsPerWord - 1) / kBitsPerWord; - for (size_t i = 0; i < numWords; i++) - dst[i] = dst[i] & ~src[i]; - _clearUnusedBits(); - } - - ASMJIT_INLINE void or_(const ZoneBitVector& other) noexcept { - BitWord* dst = _data; - const BitWord* src = other._data; - - size_t numWords = (std::min(_length, other._length) + kBitsPerWord - 1) / kBitsPerWord; - for (size_t i = 0; i < numWords; i++) - dst[i] = dst[i] | src[i]; - _clearUnusedBits(); - } - - ASMJIT_INLINE void _clearUnusedBits() noexcept { - size_t idx = _length / kBitsPerWord; - size_t bit = _length % kBitsPerWord; - - if (!bit) return; - _data[idx] &= (static_cast(1) << bit) - 1U; - } - - // -------------------------------------------------------------------------- - // [Memory Management] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void release(ZoneHeap* heap) noexcept { - if (_data != nullptr) { - heap->release(_data, _capacity / 8); - reset(); - } - } - - ASMJIT_INLINE Error resize(ZoneHeap* heap, size_t newLength, bool newBitsValue = false) noexcept { - return _resize(heap, newLength, newLength, newBitsValue); - } - - ASMJIT_API Error _resize(ZoneHeap* heap, size_t newLength, size_t idealCapacity, bool newBitsValue) noexcept; - ASMJIT_API Error _append(ZoneHeap* heap, bool value) noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - BitWord* _data; //!< Bits. - size_t _length; //!< Length of the bit-vector (in bits). - size_t _capacity; //!< Capacity of the bit-vector (in bits). -}; - -// ============================================================================ -// [asmjit::ZoneHashNode] -// ============================================================================ - -//! Node used by \ref ZoneHash<> template. -//! -//! You must provide function `bool eq(const Key& key)` in order to make -//! `ZoneHash::get()` working. -class ZoneHashNode { -public: - ASMJIT_INLINE ZoneHashNode(uint32_t hVal = 0) noexcept - : _hashNext(nullptr), - _hVal(hVal) {} - - //! Next node in the chain, null if it terminates the chain. - ZoneHashNode* _hashNext; - //! Key hash. - uint32_t _hVal; - //! Should be used by Node that inherits ZoneHashNode, it aligns ZoneHashNode. - uint32_t _customData; -}; - -// ============================================================================ -// [asmjit::ZoneHashBase] -// ============================================================================ - -class ZoneHashBase { -public: - ASMJIT_NONCOPYABLE(ZoneHashBase) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE ZoneHashBase(ZoneHeap* heap) noexcept { - _heap = heap; - _size = 0; - _bucketsCount = 1; - _bucketsGrow = 1; - _data = _embedded; - _embedded[0] = nullptr; - } - ASMJIT_INLINE ~ZoneHashBase() noexcept { reset(nullptr); } - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool isInitialized() const noexcept { return _heap != nullptr; } - ASMJIT_API void reset(ZoneHeap* heap) noexcept; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get a `ZoneHeap` attached to this container. - ASMJIT_INLINE ZoneHeap* getHeap() const noexcept { return _heap; } - - ASMJIT_INLINE size_t getSize() const noexcept { return _size; } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - ASMJIT_API void _rehash(uint32_t newCount) noexcept; - ASMJIT_API ZoneHashNode* _put(ZoneHashNode* node) noexcept; - ASMJIT_API ZoneHashNode* _del(ZoneHashNode* node) noexcept; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - ZoneHeap* _heap; //!< ZoneHeap used to allocate data. - size_t _size; //!< Count of records inserted into the hash table. - uint32_t _bucketsCount; //!< Count of hash buckets. - uint32_t _bucketsGrow; //!< When buckets array should grow. - - ZoneHashNode** _data; //!< Buckets data. - ZoneHashNode* _embedded[1]; //!< Embedded data, used by empty hash tables. -}; - -// ============================================================================ -// [asmjit::ZoneHash] -// ============================================================================ - -//! Low-level hash table specialized for storing string keys and POD values. -//! -//! This hash table allows duplicates to be inserted (the API is so low -//! level that it's up to you if you allow it or not, as you should first -//! `get()` the node and then modify it or insert a new node by using `put()`, -//! depending on the intention). -template -class ZoneHash : public ZoneHashBase { -public: - explicit ASMJIT_INLINE ZoneHash(ZoneHeap* heap = nullptr) noexcept - : ZoneHashBase(heap) {} - ASMJIT_INLINE ~ZoneHash() noexcept {} - - template - ASMJIT_INLINE Node* get(const Key& key) const noexcept { - uint32_t hMod = key.hVal % _bucketsCount; - Node* node = static_cast(_data[hMod]); - - while (node && !key.matches(node)) - node = static_cast(node->_hashNext); - return node; - } - - ASMJIT_INLINE Node* put(Node* node) noexcept { return static_cast(_put(node)); } - ASMJIT_INLINE Node* del(Node* node) noexcept { return static_cast(_del(node)); } -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_ZONE_H diff --git a/src/asmjit/core.h b/src/asmjit/core.h new file mode 100644 index 0000000..4aaa579 --- /dev/null +++ b/src/asmjit/core.h @@ -0,0 +1,85 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_H +#define _ASMJIT_CORE_H + +//! \defgroup asmjit_core Core +//! \brief Core API. +//! +//! API that provides classes and functions not specific to any architecture. + +//! \defgroup asmjit_builder Builder +//! \brief Builder API. +//! +//! Both Builder and Compiler are emitters that emit everything to a representation +//! that allows further processing. The code stored in such representation is +//! completely safe to be patched, simplified, reordered, obfuscated, removed, +//! injected, analyzed, or processed some other way. Each instruction, label, +//! directive, or other building block is stored as \ref BaseNode (or derived +//! class like \ref InstNode or \ref LabelNode) and contains all the information +//! necessary to pass that node later to the Assembler. + +//! \defgroup asmjit_compiler Compiler +//! \brief Compiler API. +//! +//! Compiler tool is built on top of a \ref asmjit_builder API and adds register +//! allocation and support for defining and calling functions into it. At the +//! moment it's the easiest way to generate some code as most architecture and +//! OS specific stuff is properly abstracted, however, abstractions also mean +//! that not everything is possible with the Compiler. + +//! \defgroup asmjit_func Function +//! \brief Function API. + +//! \defgroup asmjit_jit JIT +//! \brief JIT API and Virtual Memory Management. + +//! \defgroup asmjit_zone Zone +//! \brief Zone allocator and zone allocated containers. + +//! \defgroup asmjit_support Support +//! \brief Support API. + +//! \cond INTERNAL +//! \defgroup asmjit_ra RA +//! \brief Register allocator internals. +//! \endcond + +#include "./core/globals.h" + +#include "./core/arch.h" +#include "./core/assembler.h" +#include "./core/builder.h" +#include "./core/callconv.h" +#include "./core/codeholder.h" +#include "./core/compiler.h" +#include "./core/constpool.h" +#include "./core/cpuinfo.h" +#include "./core/datatypes.h" +#include "./core/emitter.h" +#include "./core/features.h" +#include "./core/func.h" +#include "./core/inst.h" +#include "./core/jitallocator.h" +#include "./core/jitruntime.h" +#include "./core/logging.h" +#include "./core/operand.h" +#include "./core/osutils.h" +#include "./core/string.h" +#include "./core/support.h" +#include "./core/target.h" +#include "./core/type.h" +#include "./core/virtmem.h" +#include "./core/zone.h" +#include "./core/zonehash.h" +#include "./core/zonelist.h" +#include "./core/zonetree.h" +#include "./core/zonestack.h" +#include "./core/zonestring.h" +#include "./core/zonevector.h" + +#endif // _ASMJIT_CORE_H diff --git a/src/asmjit/core/arch.cpp b/src/asmjit/core/arch.cpp new file mode 100644 index 0000000..0f94a53 --- /dev/null +++ b/src/asmjit/core/arch.cpp @@ -0,0 +1,160 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/arch.h" +#include "../core/support.h" +#include "../core/type.h" + +#ifdef ASMJIT_BUILD_X86 + #include "../x86/x86operand.h" +#endif + +#ifdef ASMJIT_BUILD_ARM + #include "../arm/armoperand.h" +#endif + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::ArchInfo] +// ============================================================================ + +// NOTE: Keep `const constexpr` otherwise MSC would not compile this code correctly. +static const constexpr uint32_t archInfoTable[] = { + // <--------------------+---------------------+-------------------+-------+ + // | Type | SubType | GPInfo| + // <--------------------+---------------------+-------------------+-------+ + Support::bytepack32_4x8(ArchInfo::kIdNone , ArchInfo::kSubIdNone, 0, 0), + Support::bytepack32_4x8(ArchInfo::kIdX86 , ArchInfo::kSubIdNone, 4, 8), + Support::bytepack32_4x8(ArchInfo::kIdX64 , ArchInfo::kSubIdNone, 8, 16), + Support::bytepack32_4x8(ArchInfo::kIdA32 , ArchInfo::kSubIdNone, 4, 16), + Support::bytepack32_4x8(ArchInfo::kIdA64 , ArchInfo::kSubIdNone, 8, 32) +}; + +ASMJIT_FAVOR_SIZE void ArchInfo::init(uint32_t id, uint32_t subId) noexcept { + uint32_t index = id < ASMJIT_ARRAY_SIZE(archInfoTable) ? id : uint32_t(0); + + // Make sure the `archInfoTable` array is correctly indexed. + _signature = archInfoTable[index]; + ASMJIT_ASSERT(_id == index); + + // Even if the architecture is not known we setup its id and sub-id, + // however, such architecture is not really useful. + _id = uint8_t(id); + _subId = uint8_t(subId); +} + +// ============================================================================ +// [asmjit::ArchUtils] +// ============================================================================ + +ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t archId, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept { + uint32_t typeId = typeIdInOut; + + // Zero the signature so it's clear in case that typeId is not invalid. + regInfo._signature = 0; + + // TODO: Move to X86 backend. + #ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) { + // Passed RegType instead of TypeId? + if (typeId <= BaseReg::kTypeMax) + typeId = x86::opData.archRegs.regTypeToTypeId[typeId]; + + if (ASMJIT_UNLIKELY(!Type::isValid(typeId))) + return DebugUtils::errored(kErrorInvalidTypeId); + + // First normalize architecture dependent types. + if (Type::isAbstract(typeId)) { + if (typeId == Type::kIdIntPtr) + typeId = (archId == ArchInfo::kIdX86) ? Type::kIdI32 : Type::kIdI64; + else + typeId = (archId == ArchInfo::kIdX86) ? Type::kIdU32 : Type::kIdU64; + } + + // Type size helps to construct all groupss of registers. If the size is zero + // then the TypeId is invalid. + uint32_t size = Type::sizeOf(typeId); + if (ASMJIT_UNLIKELY(!size)) + return DebugUtils::errored(kErrorInvalidTypeId); + + if (ASMJIT_UNLIKELY(typeId == Type::kIdF80)) + return DebugUtils::errored(kErrorInvalidUseOfF80); + + uint32_t regType = 0; + + switch (typeId) { + case Type::kIdI8: + case Type::kIdU8: + regType = x86::Reg::kTypeGpbLo; + break; + + case Type::kIdI16: + case Type::kIdU16: + regType = x86::Reg::kTypeGpw; + break; + + case Type::kIdI32: + case Type::kIdU32: + regType = x86::Reg::kTypeGpd; + break; + + case Type::kIdI64: + case Type::kIdU64: + if (archId == ArchInfo::kIdX86) + return DebugUtils::errored(kErrorInvalidUseOfGpq); + + regType = x86::Reg::kTypeGpq; + break; + + // F32 and F64 are always promoted to use vector registers. + case Type::kIdF32: + typeId = Type::kIdF32x1; + regType = x86::Reg::kTypeXmm; + break; + + case Type::kIdF64: + typeId = Type::kIdF64x1; + regType = x86::Reg::kTypeXmm; + break; + + // Mask registers {k}. + case Type::kIdMask8: + case Type::kIdMask16: + case Type::kIdMask32: + case Type::kIdMask64: + regType = x86::Reg::kTypeKReg; + break; + + // MMX registers. + case Type::kIdMmx32: + case Type::kIdMmx64: + regType = x86::Reg::kTypeMm; + break; + + // XMM|YMM|ZMM registers. + default: + if (size <= 16) + regType = x86::Reg::kTypeXmm; + else if (size == 32) + regType = x86::Reg::kTypeYmm; + else + regType = x86::Reg::kTypeZmm; + break; + } + + typeIdInOut = typeId; + regInfo._signature = x86::opData.archRegs.regInfo[regType].signature(); + return kErrorOk; + } + #endif + + return DebugUtils::errored(kErrorInvalidArch); +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/arch.h b/src/asmjit/core/arch.h new file mode 100644 index 0000000..fc1bcaa --- /dev/null +++ b/src/asmjit/core/arch.h @@ -0,0 +1,187 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_ARCH_H +#define _ASMJIT_CORE_ARCH_H + +#include "../core/globals.h" +#include "../core/operand.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [asmjit::ArchInfo] +// ============================================================================ + +class ArchInfo { +public: + union { + struct { + //! Architecture id. + uint8_t _id; + //! Architecture sub-id. + uint8_t _subId; + //! Default size of a general purpose register. + uint8_t _gpSize; + //! Count of all general purpose registers. + uint8_t _gpCount; + }; + //! Architecture signature (32-bit int). + uint32_t _signature; + }; + + //! Architecture id. + enum Id : uint32_t { + kIdNone = 0, //!< No/Unknown architecture. + + // X86 architectures. + kIdX86 = 1, //!< X86 architecture (32-bit). + kIdX64 = 2, //!< X64 architecture (64-bit) (AMD64). + + // ARM architectures. + kIdA32 = 3, //!< ARM 32-bit architecture (AArch32/ARM/THUMB). + kIdA64 = 4, //!< ARM 64-bit architecture (AArch64). + + //! Architecture detected at compile-time (architecture of the host). + kIdHost = ASMJIT_ARCH_X86 == 32 ? kIdX86 : + ASMJIT_ARCH_X86 == 64 ? kIdX64 : + ASMJIT_ARCH_ARM == 32 ? kIdA32 : + ASMJIT_ARCH_ARM == 64 ? kIdA64 : kIdNone + }; + + //! Architecture sub-type or execution mode. + enum SubType : uint32_t { + kSubIdNone = 0, //!< Default mode (or no specific mode). + + // X86 sub-types. + kSubIdX86_AVX = 1, //!< Code generation uses AVX by default (VEC instructions). + kSubIdX86_AVX2 = 2, //!< Code generation uses AVX2 by default (VEC instructions). + kSubIdX86_AVX512 = 3, //!< Code generation uses AVX-512F by default (+32 vector regs). + kSubIdX86_AVX512VL = 4, //!< Code generation uses AVX-512F-VL by default (+VL extensions). + + // ARM sub-types. + kSubIdA32_Thumb = 8, //!< THUMB|THUMBv2 sub-type (only ARM in 32-bit mode). + + #if (ASMJIT_ARCH_X86) && defined(__AVX512VL__) + kSubIdHost = kSubIdX86_AVX512VL + #elif (ASMJIT_ARCH_X86) && defined(__AVX512F__) + kSubIdHost = kSubIdX86_AVX512 + #elif (ASMJIT_ARCH_X86) && defined(__AVX2__) + kSubIdHost = kSubIdX86_AVX2 + #elif (ASMJIT_ARCH_X86) && defined(__AVX__) + kSubIdHost = kSubIdX86_AVX + #elif (ASMJIT_ARCH_ARM == 32) && (defined(_M_ARMT) || defined(__thumb__) || defined(__thumb2__)) + kSubIdHost = kSubIdA32_Thumb + #else + kSubIdHost = 0 + #endif + }; + + //! \name Construction & Destruction + //! \{ + + inline ArchInfo() noexcept : _signature(0) {} + inline ArchInfo(const ArchInfo& other) noexcept : _signature(other._signature) {} + inline explicit ArchInfo(uint32_t type, uint32_t subType = kSubIdNone) noexcept { init(type, subType); } + inline explicit ArchInfo(Globals::NoInit_) noexcept {} + + inline static ArchInfo host() noexcept { return ArchInfo(kIdHost, kSubIdHost); } + + inline bool isInitialized() const noexcept { return _id != kIdNone; } + + ASMJIT_API void init(uint32_t type, uint32_t subType = kSubIdNone) noexcept; + inline void reset() noexcept { _signature = 0; } + + //! \} + + //! \name Overloaded Operators + //! \{ + + inline ArchInfo& operator=(const ArchInfo& other) noexcept = default; + + inline bool operator==(const ArchInfo& other) const noexcept { return _signature == other._signature; } + inline bool operator!=(const ArchInfo& other) const noexcept { return _signature != other._signature; } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the architecture id, see `Id`. + inline uint32_t archId() const noexcept { return _id; } + + //! Returns the architecture sub-id, see `SubType`. + //! + //! X86 & X64 + //! --------- + //! + //! Architecture subtype describe the highest instruction-set level that can + //! be used. + //! + //! A32 & A64 + //! --------- + //! + //! Architecture mode means the instruction encoding to be used when generating + //! machine code, thus mode can be used to force generation of THUMB and THUMBv2 + //! encoding or regular ARM encoding. + inline uint32_t archSubId() const noexcept { return _subId; } + + //! Tests whether this architecture is 32-bit. + inline bool is32Bit() const noexcept { return _gpSize == 4; } + //! Tests whether this architecture is 64-bit. + inline bool is64Bit() const noexcept { return _gpSize == 8; } + + //! Tests whether this architecture is X86, X64. + inline bool isX86Family() const noexcept { return isX86Family(_id); } + //! Tests whether this architecture is ARM32 or ARM64. + inline bool isArmFamily() const noexcept { return isArmFamily(_id); } + + //! Returns the native size of a general-purpose register. + inline uint32_t gpSize() const noexcept { return _gpSize; } + //! Returns number of general-purpose registers. + inline uint32_t gpCount() const noexcept { return _gpCount; } + + //! \} + + //! \name Static Functions + //! \{ + + static inline bool isX86Family(uint32_t archId) noexcept { return archId >= kIdX86 && archId <= kIdX64; } + static inline bool isArmFamily(uint32_t archId) noexcept { return archId >= kIdA32 && archId <= kIdA64; } + + //! \} +}; + +// ============================================================================ +// [asmjit::ArchRegs] +// ============================================================================ + +//! Information about all architecture registers. +struct ArchRegs { + //! Register information and signatures indexed by `BaseReg::RegType`. + RegInfo regInfo[BaseReg::kTypeMax + 1]; + //! Count (maximum) of registers per `BaseReg::RegType`. + uint8_t regCount[BaseReg::kTypeMax + 1]; + //! Converts RegType to TypeId, see `Type::Id`. + uint8_t regTypeToTypeId[BaseReg::kTypeMax + 1]; +}; + +// ============================================================================ +// [asmjit::ArchUtils] +// ============================================================================ + +struct ArchUtils { + ASMJIT_API static Error typeIdToRegInfo(uint32_t archId, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept; +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_ARCH_H diff --git a/src/asmjit/core/assembler.cpp b/src/asmjit/core/assembler.cpp new file mode 100644 index 0000000..9e7eaf2 --- /dev/null +++ b/src/asmjit/core/assembler.cpp @@ -0,0 +1,491 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/assembler.h" +#include "../core/codebufferwriter_p.h" +#include "../core/constpool.h" +#include "../core/logging.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::BaseAssembler - Construction / Destruction] +// ============================================================================ + +BaseAssembler::BaseAssembler() noexcept + : BaseEmitter(kTypeAssembler), + _section(nullptr), + _bufferData(nullptr), + _bufferEnd(nullptr), + _bufferPtr(nullptr), + _op4(), + _op5() {} +BaseAssembler::~BaseAssembler() noexcept {} + +// ============================================================================ +// [asmjit::BaseAssembler - Buffer Management] +// ============================================================================ + +Error BaseAssembler::setOffset(size_t offset) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + size_t size = Support::max(_section->bufferSize(), this->offset()); + if (ASMJIT_UNLIKELY(offset > size)) + return reportError(DebugUtils::errored(kErrorInvalidArgument)); + + _bufferPtr = _bufferData + offset; + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseAssembler - Logging] +// ============================================================================ + +#ifndef ASMJIT_NO_LOGGING +static void BaseAssembler_logLabel(BaseAssembler* self, const Label& label) noexcept { + Logger* logger = self->_code->_logger; + + StringTmp<512> sb; + size_t binSize = logger->hasFlag(FormatOptions::kFlagMachineCode) ? size_t(0) : std::numeric_limits::max(); + + sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationLabel)); + Logging::formatLabel(sb, logger->flags(), self, label.id()); + sb.appendChar(':'); + Logging::formatLine(sb, nullptr, binSize, 0, 0, self->_inlineComment); + logger->log(sb.data(), sb.size()); +} +#endif + +// ============================================================================ +// [asmjit::BaseAssembler - Section Management] +// ============================================================================ + +static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept { + uint8_t* p = section->_buffer._data; + + self->_section = section; + self->_bufferData = p; + self->_bufferPtr = p + section->_buffer._size; + self->_bufferEnd = p + section->_buffer._capacity; +} + +Error BaseAssembler::section(Section* section) { + if (ASMJIT_UNLIKELY(!_code)) + return reportError(DebugUtils::errored(kErrorNotInitialized)); + + if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section) + return reportError(DebugUtils::errored(kErrorInvalidSection)); + + #ifndef ASMJIT_NO_LOGGING + if (hasEmitterOption(kOptionLoggingEnabled)) + _code->_logger->logf(".section %s {#%u}\n", section->name(), section->id()); + #endif + + BaseAssembler_initSection(this, section); + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseAssembler - Label Management] +// ============================================================================ + +Label BaseAssembler::newLabel() { + uint32_t labelId = Globals::kInvalidId; + if (ASMJIT_LIKELY(_code)) { + LabelEntry* le; + Error err = _code->newLabelEntry(&le); + if (ASMJIT_UNLIKELY(err)) + reportError(err); + labelId = le->id(); + } + return Label(labelId); +} + +Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) { + uint32_t labelId = Globals::kInvalidId; + if (ASMJIT_LIKELY(_code)) { + LabelEntry* le; + Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId); + if (ASMJIT_UNLIKELY(err)) + reportError(err); + labelId = le->id(); + } + return Label(labelId); +} + +Error BaseAssembler::bind(const Label& label) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + Error err = _code->bindLabel(label, _section->id(), offset()); + + #ifndef ASMJIT_NO_LOGGING + if (hasEmitterOption(kOptionLoggingEnabled)) + BaseAssembler_logLabel(this, label); + #endif + + resetInlineComment(); + if (err) + return reportError(err); + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseAssembler - Emit (Low-Level)] +// ============================================================================ + +Error BaseAssembler::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) { + _op4 = o4; + _op5 = o5; + _instOptions |= BaseInst::kOptionOp4Op5Used; + return _emit(instId, o0, o1, o2, o3); +} + +Error BaseAssembler::_emitOpArray(uint32_t instId, const Operand_* operands, size_t count) { + const Operand_* o0 = &operands[0]; + const Operand_* o1 = &operands[1]; + const Operand_* o2 = &operands[2]; + const Operand_* o3 = &operands[3]; + + switch (count) { + case 0: o0 = &Globals::none; ASMJIT_FALLTHROUGH; + case 1: o1 = &Globals::none; ASMJIT_FALLTHROUGH; + case 2: o2 = &Globals::none; ASMJIT_FALLTHROUGH; + case 3: o3 = &Globals::none; ASMJIT_FALLTHROUGH; + case 4: + return _emit(instId, *o0, *o1, *o2, *o3); + + case 5: + _op4 = operands[4]; + _op5.reset(); + _instOptions |= BaseInst::kOptionOp4Op5Used; + return _emit(instId, *o0, *o1, *o2, *o3); + + case 6: + _op4 = operands[4]; + _op5 = operands[5]; + _instOptions |= BaseInst::kOptionOp4Op5Used; + return _emit(instId, *o0, *o1, *o2, *o3); + + default: + return DebugUtils::errored(kErrorInvalidArgument); + } +} + +#ifndef ASMJIT_NO_LOGGING +void BaseAssembler::_emitLog( + uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, + uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) { + + Logger* logger = _code->logger(); + ASMJIT_ASSERT(logger != nullptr); + ASMJIT_ASSERT(options & BaseEmitter::kOptionLoggingEnabled); + + StringTmp<256> sb; + uint32_t flags = logger->flags(); + + uint8_t* beforeCursor = _bufferPtr; + intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor); + + Operand_ operands[Globals::kMaxOpCount]; + operands[0].copyFrom(o0); + operands[1].copyFrom(o1); + operands[2].copyFrom(o2); + operands[3].copyFrom(o3); + + if (options & BaseInst::kOptionOp4Op5Used) { + operands[4].copyFrom(_op4); + operands[5].copyFrom(_op5); + } + else { + operands[4].reset(); + operands[5].reset(); + } + + sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationCode)); + Logging::formatInstruction(sb, flags, this, archId(), BaseInst(instId, options, _extraReg), operands, Globals::kMaxOpCount); + + if ((flags & FormatOptions::kFlagMachineCode) != 0) + Logging::formatLine(sb, _bufferPtr, size_t(emittedSize), relSize, immSize, inlineComment()); + else + Logging::formatLine(sb, nullptr, std::numeric_limits::max(), 0, 0, inlineComment()); + logger->log(sb); +} + +Error BaseAssembler::_emitFailed( + Error err, + uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) { + + StringTmp<256> sb; + sb.appendString(DebugUtils::errorAsString(err)); + sb.appendString(": "); + + Operand_ operands[Globals::kMaxOpCount]; + operands[0].copyFrom(o0); + operands[1].copyFrom(o1); + operands[2].copyFrom(o2); + operands[3].copyFrom(o3); + + if (options & BaseInst::kOptionOp4Op5Used) { + operands[4].copyFrom(_op4); + operands[5].copyFrom(_op5); + } + else { + operands[4].reset(); + operands[5].reset(); + } + + Logging::formatInstruction(sb, 0, this, archId(), BaseInst(instId, options, _extraReg), operands, Globals::kMaxOpCount); + resetInstOptions(); + resetExtraReg(); + resetInlineComment(); + return reportError(err, sb.data()); +} +#endif + +// ============================================================================ +// [asmjit::BaseAssembler - Embed] +// ============================================================================ + +struct DataSizeByPower { + char str[4]; +}; + +static const DataSizeByPower dataSizeByPowerTable[] = { + { "db" }, + { "dw" }, + { "dd" }, + { "dq" } +}; + +Error BaseAssembler::embed(const void* data, uint32_t dataSize) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + if (dataSize == 0) + return DebugUtils::errored(kErrorInvalidArgument); + + CodeBufferWriter writer(this); + ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); + + writer.emitData(data, dataSize); + + #ifndef ASMJIT_NO_LOGGING + if (ASMJIT_UNLIKELY(hasEmitterOption(kOptionLoggingEnabled))) + _code->_logger->logBinary(data, dataSize); + #endif + + writer.done(this); + return kErrorOk; +} + +Error BaseAssembler::embedLabel(const Label& label) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + ASMJIT_ASSERT(_code != nullptr); + RelocEntry* re; + LabelEntry* le = _code->labelEntry(label); + + if (ASMJIT_UNLIKELY(!le)) + return reportError(DebugUtils::errored(kErrorInvalidLabel)); + + uint32_t dataSize = gpSize(); + ASMJIT_ASSERT(dataSize <= 8); + + CodeBufferWriter writer(this); + ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); + + #ifndef ASMJIT_NO_LOGGING + if (ASMJIT_UNLIKELY(hasEmitterOption(kOptionLoggingEnabled))) { + StringTmp<256> sb; + sb.appendFormat(".%s ", dataSizeByPowerTable[Support::ctz(dataSize)].str); + Logging::formatLabel(sb, 0, this, label.id()); + sb.appendChar('\n'); + _code->_logger->log(sb); + } + #endif + + // TODO: Does it make sense to calculate the address here if everything is known? + /* + if (_code->hasBaseAddress() && currentSection() == _code->textSection() && le->isBound()) { + uint64_t addr = _code->baseAddress() + _code->textSection()->offset() + le->offset(); + writer.emitValueLE(addr, dataSize); + } + */ + + Error err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, dataSize); + if (ASMJIT_UNLIKELY(err)) + return reportError(err); + + re->_sourceSectionId = _section->id(); + re->_sourceOffset = offset(); + + if (le->isBound()) { + re->_targetSectionId = le->section()->id(); + re->_payload = le->offset(); + } + else { + LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0); + if (ASMJIT_UNLIKELY(!link)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + link->relocId = re->id(); + } + + // Emit dummy DWORD/QWORD depending on the data size. + writer.emitZeros(dataSize); + writer.done(this); + + return kErrorOk; +} + +Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + LabelEntry* labelEntry = _code->labelEntry(label); + LabelEntry* baseEntry = _code->labelEntry(base); + + if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry)) + return reportError(DebugUtils::errored(kErrorInvalidLabel)); + + if (dataSize == 0) + dataSize = gpSize(); + + if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) + return reportError(DebugUtils::errored(kErrorInvalidOperandSize)); + + CodeBufferWriter writer(this); + ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); + + #ifndef ASMJIT_NO_LOGGING + if (ASMJIT_UNLIKELY(hasEmitterOption(kOptionLoggingEnabled))) { + StringTmp<256> sb; + sb.appendFormat(".%s (", dataSizeByPowerTable[Support::ctz(dataSize)].str); + Logging::formatLabel(sb, 0, this, label.id()); + sb.appendString(" - "); + Logging::formatLabel(sb, 0, this, base.id()); + sb.appendString(")\n"); + _code->_logger->log(sb); + } + #endif + + // If both labels are bound within the same section it means the delta can be calculated now. + if (labelEntry->isBound() && baseEntry->isBound() && labelEntry->section() == baseEntry->section()) { + uint64_t delta = labelEntry->offset() - baseEntry->offset(); + writer.emitValueLE(delta, dataSize); + } + else { + RelocEntry* re; + Error err = _code->newRelocEntry(&re, RelocEntry::kTypeExpression, dataSize); + if (ASMJIT_UNLIKELY(err)) + return reportError(err); + + Expression* exp = _code->_zone.newT(); + if (ASMJIT_UNLIKELY(!exp)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + + exp->reset(); + exp->opType = Expression::kOpSub; + exp->setValueAsLabel(0, labelEntry); + exp->setValueAsLabel(1, baseEntry); + + re->_sourceSectionId = _section->id(); + re->_sourceOffset = offset(); + re->_payload = (uint64_t)(uintptr_t)exp; + + writer.emitZeros(dataSize); + } + + writer.done(this); + return kErrorOk; +} + +Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + if (ASMJIT_UNLIKELY(!isLabelValid(label))) + return DebugUtils::errored(kErrorInvalidLabel); + + ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment()))); + ASMJIT_PROPAGATE(bind(label)); + + size_t size = pool.size(); + CodeBufferWriter writer(this); + ASMJIT_PROPAGATE(writer.ensureSpace(this, size)); + + pool.fill(writer.cursor()); + + #ifndef ASMJIT_NO_LOGGING + if (ASMJIT_UNLIKELY(hasEmitterOption(kOptionLoggingEnabled))) + _code->_logger->logBinary(writer.cursor(), size); + #endif + + writer.advance(size); + writer.done(this); + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseAssembler - Comment] +// ============================================================================ + +Error BaseAssembler::comment(const char* data, size_t size) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + #ifndef ASMJIT_NO_LOGGING + if (hasEmitterOption(kOptionLoggingEnabled)) { + Logger* logger = _code->logger(); + logger->log(data, size); + logger->log("\n", 1); + return kErrorOk; + } + #else + ASMJIT_UNUSED(data); + ASMJIT_UNUSED(size); + #endif + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseAssembler - Events] +// ============================================================================ + +Error BaseAssembler::onAttach(CodeHolder* code) noexcept { + ASMJIT_PROPAGATE(Base::onAttach(code)); + + // Attach to the end of the .text section. + BaseAssembler_initSection(this, code->_sections[0]); + + // And reset everything that is used temporarily. + _op4.reset(); + _op5.reset(); + + return kErrorOk; +} + +Error BaseAssembler::onDetach(CodeHolder* code) noexcept { + _section = nullptr; + _bufferData = nullptr; + _bufferEnd = nullptr; + _bufferPtr = nullptr; + + _op4.reset(); + _op5.reset(); + + return Base::onDetach(code); +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/assembler.h b/src/asmjit/core/assembler.h new file mode 100644 index 0000000..c3bc6ca --- /dev/null +++ b/src/asmjit/core/assembler.h @@ -0,0 +1,165 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_ASSEMBLER_H +#define _ASMJIT_CORE_ASSEMBLER_H + +#include "../core/codeholder.h" +#include "../core/datatypes.h" +#include "../core/emitter.h" +#include "../core/operand.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [asmjit::BaseAssembler] +// ============================================================================ + +//! Base encoder (assembler). +class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter { +public: + ASMJIT_NONCOPYABLE(BaseAssembler) + typedef BaseEmitter Base; + + //! Current section where the assembling happens. + Section* _section; + //! Start of the CodeBuffer of the current section. + uint8_t* _bufferData; + //! End (first invalid byte) of the current section. + uint8_t* _bufferEnd; + //! Pointer in the CodeBuffer of the current section. + uint8_t* _bufferPtr; + //! 5th operand data, used only temporarily. + Operand_ _op4; + //! 6th operand data, used only temporarily. + Operand_ _op5; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `BaseAssembler` instance. + ASMJIT_API BaseAssembler() noexcept; + //! Destroys the `BaseAssembler` instance. + ASMJIT_API virtual ~BaseAssembler() noexcept; + + //! \} + + //! \name Code-Buffer Management + //! \{ + + //! Returns the capacity of the current CodeBuffer. + inline size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); } + //! Returns the number of remaining bytes in the current CodeBuffer. + inline size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); } + + //! Returns the current position in the CodeBuffer. + inline size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); } + //! Sets the current position in the CodeBuffer to `offset`. + //! + //! \note The `offset` cannot be outside of the buffer size (even if it's + //! within buffer's capacity). + ASMJIT_API Error setOffset(size_t offset); + + //! Returns the start of the CodeBuffer in the current section. + inline uint8_t* bufferData() const noexcept { return _bufferData; } + //! Returns the end (first invalid byte) in the current section. + inline uint8_t* bufferEnd() const noexcept { return _bufferEnd; } + //! Returns the current pointer in the CodeBuffer in the current section. + inline uint8_t* bufferPtr() const noexcept { return _bufferPtr; } + + //! \} + + //! \name Section Management + //! \{ + + inline Section* currentSection() const noexcept { return _section; } + + ASMJIT_API Error section(Section* section) override; + + //! \} + + //! \name Label Management + //! \{ + + ASMJIT_API Label newLabel() override; + ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override; + ASMJIT_API Error bind(const Label& label) override; + + //! \} + + //! \cond INTERNAL + //! \name Emit + //! \{ + + using BaseEmitter::_emit; + + ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) override; + ASMJIT_API Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t count) override; + +protected: + #ifndef ASMJIT_NO_LOGGING + void _emitLog( + uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, + uint32_t relSize, uint32_t immSize, uint8_t* afterCursor); + + Error _emitFailed( + Error err, + uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3); + #else + inline Error _emitFailed( + uint32_t err, + uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) { + + ASMJIT_UNUSED(instId); + ASMJIT_UNUSED(options); + ASMJIT_UNUSED(o0); + ASMJIT_UNUSED(o1); + ASMJIT_UNUSED(o2); + ASMJIT_UNUSED(o3); + + resetInstOptions(); + resetInlineComment(); + return reportError(err); + } + #endif +public: + //! \} + //! \endcond + + //! \name Embed + //! \{ + + ASMJIT_API Error embed(const void* data, uint32_t dataSize) override; + ASMJIT_API Error embedLabel(const Label& label) override; + ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) override; + ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override; + + //! \} + + //! \name Comment + //! \{ + + ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override; + + //! \} + + //! \name Events + //! \{ + + ASMJIT_API Error onAttach(CodeHolder* code) noexcept override; + ASMJIT_API Error onDetach(CodeHolder* code) noexcept override; + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_ASSEMBLER_H diff --git a/src/asmjit/core/build.h b/src/asmjit/core/build.h new file mode 100644 index 0000000..894fdef --- /dev/null +++ b/src/asmjit/core/build.h @@ -0,0 +1,573 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_BUILD_H +#define _ASMJIT_CORE_BUILD_H + +// ============================================================================ +// [asmjit::Version] +// ============================================================================ + +#define ASMJIT_LIBRARY_VERSION 0x010200 /* 1.2.0 */ + +// ============================================================================ +// [asmjit::Options] +// ============================================================================ + +// AsmJit Static Builds and Embedding +// ---------------------------------- +// +// These definitions can be used to enable static library build. Embed is used +// when AsmJit's source code is embedded directly in another project, implies +// static build as well. +// +// #define ASMJIT_EMBED // Asmjit is embedded (implies ASMJIT_BUILD_STATIC). +// #define ASMJIT_STATIC // Enable static-library build. + +// AsmJit Build Mode +// ----------------- +// +// These definitions control the build mode and tracing support. The build mode +// should be auto-detected at compile time, but it's possible to override it in +// case that the auto-detection fails. +// +// Tracing is a feature that is never compiled by default and it's only used to +// debug AsmJit itself. +// +// #define ASMJIT_BUILD_DEBUG // Always use debug-mode (ASMJIT_ASSERT enabled). +// #define ASMJIT_BUILD_RELEASE // Always use release-mode (ASMJIT_ASSERT disabled). + +// AsmJit Build Backends +// --------------------- +// +// These definitions control which backends to compile. If none of these is +// defined AsmJit will use host architecture by default (for JIT code generation). +// +// #define ASMJIT_BUILD_X86 // Enable X86 targets (X86 and X86_64). +// #define ASMJIT_BUILD_ARM // Enable ARM targets (ARM and AArch64). +// #define ASMJIT_BUILD_HOST // Enable targets based on target arch (default). + +// AsmJit Build Options +// -------------------- +// +// Flags can be defined to disable standard features. These are handy especially +// when building AsmJit statically and some features are not needed or unwanted +// (like BaseCompiler). +// +// AsmJit features are enabled by default. +// #define ASMJIT_NO_BUILDER // Disable Builder (completely). +// #define ASMJIT_NO_COMPILER // Disable Compiler (completely). +// #define ASMJIT_NO_JIT // Disable JIT memory manager and JitRuntime. +// #define ASMJIT_NO_LOGGING // Disable logging and formatting (completely). +// #define ASMJIT_NO_TEXT // Disable everything that contains text +// // representation (instructions, errors, ...). +// #define ASMJIT_NO_VALIDATION // Disable validation API and options. +// #define ASMJIT_NO_INTROSPECTION // Disable API related to instruction database +// // (validation, cpu features, rw-info, etc). + +// Prevent compile-time errors caused by misconfiguration. +#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING) + #pragma "ASMJIT_NO_TEXT can only be defined when ASMJIT_NO_LOGGING is defined." + #undef ASMJIT_NO_TEXT +#endif + +#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER) + #pragma message("ASMJIT_NO_INTROSPECTION can only be defined when ASMJIT_NO_COMPILER is defined") + #undef ASMJIT_NO_INTROSPECTION +#endif + +// ============================================================================ +// [asmjit::Dependencies] +// ============================================================================ + +// We really want std-types as globals. +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if defined(_WIN32) + #ifndef WIN32_LEAN_AND_MEAN + #define WIN32_LEAN_AND_MEAN + #define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN + #endif + #ifndef NOMINMAX + #define NOMINMAX + #define ASMJIT_UNDEF_NOMINMAX + #endif + #include + #ifdef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN + #undef WIN32_LEAN_AND_MEAN + #undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN + #endif + #ifdef ASMJIT_UNDEF_NOMINMAX + #undef NOMINMAX + #undef ASMJIT_UNDEF_NOMINMAX + #endif +#else + #include +#endif + +// ============================================================================ +// [asmjit::Build - Globals - Deprecated] +// ============================================================================ + +// DEPRECATED: Will be removed in the future. +#if defined(ASMJIT_BUILD_EMBED) || defined(ASMJIT_BUILD_STATIC) + #if defined(ASMJIT_BUILD_EMBED) + #pragma message("'ASMJIT_BUILD_EMBED' is deprecated, use 'ASMJIT_STATIC'") + #endif + #if defined(ASMJIT_BUILD_STATIC) + #pragma message("'ASMJIT_BUILD_STATIC' is deprecated, use 'ASMJIT_STATIC'") + #endif + + #if !defined(ASMJIT_STATIC) + #define ASMJIT_STATIC + #endif +#endif + +// ============================================================================ +// [asmjit::Build - Globals - Build Mode] +// ============================================================================ + +// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined. +#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE) + #if !defined(NDEBUG) + #define ASMJIT_BUILD_DEBUG + #else + #define ASMJIT_BUILD_RELEASE + #endif +#endif + +// ============================================================================ +// [asmjit::Build - Globals - Target Architecture] +// ============================================================================ + +#if defined(_M_X64) || defined(__x86_64__) + #define ASMJIT_ARCH_X86 64 +#elif defined(_M_IX86) || defined(__X86__) || defined(__i386__) + #define ASMJIT_ARCH_X86 32 +#else + #define ASMJIT_ARCH_X86 0 +#endif + +#if defined(__arm64__) || defined(__aarch64__) +# define ASMJIT_ARCH_ARM 64 +#elif defined(_M_ARM) || defined(_M_ARMT) || defined(__arm__) || defined(__thumb__) || defined(__thumb2__) + #define ASMJIT_ARCH_ARM 32 +#else + #define ASMJIT_ARCH_ARM 0 +#endif + +#if defined(_MIPS_ARCH_MIPS64) || defined(__mips64) + #define ASMJIT_ARCH_MIPS 64 +#elif defined(_MIPS_ARCH_MIPS32) || defined(_M_MRX000) || defined(__mips__) + #define ASMJIT_ARCH_MIPS 32 +#else + #define ASMJIT_ARCH_MIPS 0 +#endif + +#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS) +#if ASMJIT_ARCH_BITS == 0 + #undef ASMJIT_ARCH_BITS + #if defined (__LP64__) || defined(_LP64) + #define ASMJIT_ARCH_BITS 64 + #else + #define ASMJIT_ARCH_BITS 32 + #endif +#endif + +#if (defined(__ARMEB__)) || \ + (defined(__MIPSEB__)) || \ + (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) + #define ASMJIT_ARCH_LE 0 + #define ASMJIT_ARCH_BE 1 +#else + #define ASMJIT_ARCH_LE 1 + #define ASMJIT_ARCH_BE 0 +#endif + +// Build host architecture if no architecture is selected. +#if !defined(ASMJIT_BUILD_HOST) && \ + !defined(ASMJIT_BUILD_X86) && \ + !defined(ASMJIT_BUILD_ARM) + #define ASMJIT_BUILD_HOST +#endif + +// Detect host architecture if building only for host. +#if ASMJIT_ARCH_X86 && defined(ASMJIT_BUILD_HOST) && !defined(ASMJIT_BUILD_X86) + #define ASMJIT_BUILD_X86 +#endif + +#if ASMJIT_ARCH_ARM && defined(ASMJIT_BUILD_HOST) && !defined(ASMJIT_BUILD_ARM) + #define ASMJIT_BUILD_ARM +#endif + +// ============================================================================ +// [asmjit::Build - Globals - C++ Compiler and Features Detection] +// ============================================================================ + +#define ASMJIT_CXX_CLANG 0 +#define ASMJIT_CXX_GNU 0 +#define ASMJIT_CXX_INTEL 0 +#define ASMJIT_CXX_MSC 0 +#define ASMJIT_CXX_MAKE_VER(MAJOR, MINOR, PATCH) ((MAJOR) * 10000000 + (MINOR) * 100000 + (PATCH)) + +// Intel Compiler [pretends to be GNU or MSC, so it must be checked first]: +// - https://software.intel.com/en-us/articles/c0x-features-supported-by-intel-c-compiler +// - https://software.intel.com/en-us/articles/c14-features-supported-by-intel-c-compiler +// - https://software.intel.com/en-us/articles/c17-features-supported-by-intel-c-compiler +#if defined(__INTEL_COMPILER) + + #undef ASMJIT_CXX_INTEL + #define ASMJIT_CXX_INTEL ASMJIT_CXX_MAKE_VER(__INTEL_COMPILER / 100, (__INTEL_COMPILER / 10) % 10, __INTEL_COMPILER % 10) + +// MSC Compiler: +// - https://msdn.microsoft.com/en-us/library/hh567368.aspx +// +// Version List: +// - 16.00.0 == VS2010 +// - 17.00.0 == VS2012 +// - 18.00.0 == VS2013 +// - 19.00.0 == VS2015 +// - 19.10.0 == VS2017 +#elif defined(_MSC_VER) && defined(_MSC_FULL_VER) + + #undef ASMJIT_CXX_MSC + #if _MSC_VER == _MSC_FULL_VER / 10000 + #define ASMJIT_CXX_MSC ASMJIT_CXX_MAKE_VER(_MSC_VER / 100, _MSC_VER % 100, _MSC_FULL_VER % 10000) + #else + #define ASMJIT_CXX_MSC ASMJIT_CXX_MAKE_VER(_MSC_VER / 100, (_MSC_FULL_VER / 100000) % 100, _MSC_FULL_VER % 100000) + #endif + + // SEVERE: VS2015 handles constexpr's incorrectly in case a struct contains a + // union. There is no workaround known other than rewriting the whole + // code. VS2017 has a similar bug, but it can be workarounded. + #if ASMJIT_CXX_MSC < ASMJIT_CXX_MAKE_VER(19, 10, 0) + #error "[asmjit] At least VS2017 is required due to a severe bug in VS2015's constexpr implementation" + #endif + +// Clang Compiler [Pretends to be GNU, so it must be checked before]: +// - https://clang.llvm.org/cxx_status.html +#elif defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__) + + #undef ASMJIT_CXX_CLANG + #define ASMJIT_CXX_CLANG ASMJIT_CXX_MAKE_VER(__clang_major__, __clang_minor__, __clang_patchlevel__) + +// GNU Compiler: +// - https://gcc.gnu.org/projects/cxx-status.html +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) + + #undef ASMJIT_CXX_GNU + #define ASMJIT_CXX_GNU ASMJIT_CXX_MAKE_VER(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) + +#endif + +// Compiler features detection macros. +#if ASMJIT_CXX_CLANG && defined(__has_builtin) + #define ASMJIT_CXX_HAS_BUILTIN(NAME, CHECK) (__has_builtin(NAME)) +#else + #define ASMJIT_CXX_HAS_BUILTIN(NAME, CHECK) (!(!(CHECK))) +#endif + +#if ASMJIT_CXX_CLANG && defined(__has_extension) + #define ASMJIT_CXX_HAS_FEATURE(NAME, CHECK) (__has_extension(NAME)) +#elif ASMJIT_CXX_CLANG && defined(__has_feature) + #define ASMJIT_CXX_HAS_FEATURE(NAME, CHECK) (__has_feature(NAME)) +#else + #define ASMJIT_CXX_HAS_FEATURE(NAME, CHECK) (!(!(CHECK))) +#endif + +#if ASMJIT_CXX_CLANG && defined(__has_attribute) + #define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (__has_attribute(NAME)) +#else + #define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK))) +#endif + +#if ASMJIT_CXX_CLANG && defined(__has_cpp_attribute) + #define ASMJIT_CXX_HAS_CPP_ATTRIBUTE(NAME, CHECK) (__has_cpp_attribute(NAME)) +#else + #define ASMJIT_CXX_HAS_CPP_ATTRIBUTE(NAME, CHECK) (!(!(CHECK))) +#endif + +// Compiler features by vendor. +#if defined(_MSC_VER) && !defined(_NATIVE_WCHAR_T_DEFINED) + #define ASMJIT_CXX_HAS_NATIVE_WCHAR_T 0 +#else + #define ASMJIT_CXX_HAS_NATIVE_WCHAR_T 1 +#endif + +#if ASMJIT_CXX_HAS_FEATURE(cxx_unicode_literals, ( \ + (ASMJIT_CXX_INTEL >= ASMJIT_CXX_MAKE_VER(14, 0, 0)) || \ + (ASMJIT_CXX_MSC >= ASMJIT_CXX_MAKE_VER(19, 0, 0)) || \ + (ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4 , 5, 0) && __cplusplus >= 201103L) )) + #define ASMJIT_CXX_HAS_UNICODE_LITERALS 1 +#else + #define ASMJIT_CXX_HAS_UNICODE_LITERALS 0 +#endif + +// ============================================================================ +// [asmjit::Build - Globals - API Decorators & Language Extensions] +// ============================================================================ + +// API (Export / Import). +#if !defined(ASMJIT_STATIC) + #if defined(_WIN32) && (defined(_MSC_VER) || defined(__MINGW32__)) + #if defined(ASMJIT_EXPORTS) + #define ASMJIT_API __declspec(dllexport) + #else + #define ASMJIT_API __declspec(dllimport) + #endif + #elif defined(_WIN32) && defined(__GNUC__) + #if defined(ASMJIT_EXPORTS) + #define ASMJIT_API __attribute__((__dllexport__)) + #else + #define ASMJIT_API __attribute__((__dllimport__)) + #endif + #elif defined(__GNUC__) + #define ASMJIT_API __attribute__((__visibility__("default"))) + #endif +#endif + +#if !defined(ASMJIT_API) + #define ASMJIT_API +#endif + +#if !defined(ASMJIT_VARAPI) + #define ASMJIT_VARAPI extern ASMJIT_API +#endif + +// This is basically a workaround. When using MSVC and marking class as DLL +// export everything gets exported, which is unwanted in most projects. MSVC +// automatically exports typeinfo and vtable if at least one symbol of the +// class is exported. However, GCC has some strange behavior that even if +// one or more symbol is exported it doesn't export typeinfo unless the +// class itself is decorated with "visibility(default)" (i.e. ASMJIT_API). +#if !defined(_WIN32) && defined(__GNUC__) + #define ASMJIT_VIRTAPI ASMJIT_API +#else + #define ASMJIT_VIRTAPI +#endif + +// Function attributes. +#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) + #define ASMJIT_INLINE inline __attribute__((__always_inline__)) +#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER) + #define ASMJIT_INLINE __forceinline +#else + #define ASMJIT_INLINE inline +#endif + +#if defined(__GNUC__) + #define ASMJIT_NOINLINE __attribute__((__noinline__)) + #define ASMJIT_NORETURN __attribute__((__noreturn__)) +#elif defined(_MSC_VER) + #define ASMJIT_NOINLINE __declspec(noinline) + #define ASMJIT_NORETURN __declspec(noreturn) +#else + #define ASMJIT_NOINLINE + #define ASMJIT_NORETURN +#endif + +// Calling conventions. +#if ASMJIT_ARCH_X86 == 32 && defined(__GNUC__) + #define ASMJIT_CDECL __attribute__((__cdecl__)) + #define ASMJIT_STDCALL __attribute__((__stdcall__)) + #define ASMJIT_FASTCALL __attribute__((__fastcall__)) + #define ASMJIT_REGPARM(N) __attribute__((__regparm__(N))) +#elif ASMJIT_ARCH_X86 == 32 && defined(_MSC_VER) + #define ASMJIT_CDECL __cdecl + #define ASMJIT_STDCALL __stdcall + #define ASMJIT_FASTCALL __fastcall + #define ASMJIT_REGPARM(N) +#else + #define ASMJIT_CDECL + #define ASMJIT_STDCALL + #define ASMJIT_FASTCALL + #define ASMJIT_REGPARM(N) +#endif + +// Type alignment (not allowed by C++11 'alignas' keyword). +#if defined(__GNUC__) + #define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE +#elif defined(_MSC_VER) + #define ASMJIT_ALIGN_TYPE(TYPE, N) __declspec(align(N)) TYPE +#else + #define ASMJIT_ALIGN_TYPE(TYPE, N) TYPE +#endif + +// Annotations. +#if defined(__GNUC__) + #define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1) + #define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0) +#else + #define ASMJIT_LIKELY(...) (__VA_ARGS__) + #define ASMJIT_UNLIKELY(...) (__VA_ARGS__) +#endif + +#if defined(__clang__) && __cplusplus >= 201103L + #define ASMJIT_FALLTHROUGH [[clang::fallthrough]] +#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(7, 0, 0) + #define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__)) +#else + #define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */ +#endif + +#define ASMJIT_UNUSED(X) (void)(X) + +// Utilities. +#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100) +#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0])) + +#if ASMJIT_CXX_HAS_ATTRIBUTE(attribute_deprecated_with_message, ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 5, 0)) + #define ASMJIT_DEPRECATED(DECL, MESSAGE) DECL __attribute__((__deprecated__(MESSAGE))) +#elif ASMJIT_MSC + #define ASMJIT_DEPRECATED(DECL, MESSAGE) __declspec(deprecated(MESSAGE)) DECL +#else + #define ASMJIT_DEPRECATED(DECL, MESSAGE) DECL +#endif + +#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0) + #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined"))) +#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 9, 0) + #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__)) +#else + #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF +#endif + +// ============================================================================ +// [asmjit::Build - Globals - Begin-Namespace / End-Namespace] +// ============================================================================ + +#if defined(__clang__) + #define ASMJIT_BEGIN_NAMESPACE \ + namespace asmjit { \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wconstant-logical-operand\"") \ + _Pragma("clang diagnostic ignored \"-Wunnamed-type-template-args\"") + #define ASMJIT_END_NAMESPACE \ + _Pragma("clang diagnostic pop") \ + } +#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 0, 0) && \ + ASMJIT_CXX_GNU < ASMJIT_CXX_MAKE_VER(5, 0, 0) + #define ASMJIT_BEGIN_NAMESPACE \ + namespace asmjit { \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") + #define ASMJIT_END_NAMESPACE \ + _Pragma("GCC diagnostic pop") \ + } +#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(8, 0, 0) + #define ASMJIT_BEGIN_NAMESPACE \ + namespace asmjit { \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"") + #define ASMJIT_END_NAMESPACE \ + _Pragma("GCC diagnostic pop") \ + } +#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER) + #define ASMJIT_BEGIN_NAMESPACE \ + namespace asmjit { \ + __pragma(warning(push)) \ + __pragma(warning(disable: 4127)) /* conditional expression is constant*/\ + __pragma(warning(disable: 4201)) /* nameless struct/union */ + #define ASMJIT_END_NAMESPACE \ + __pragma(warning(pop)) \ + } +#endif + +#if !defined(ASMJIT_BEGIN_NAMESPACE) && !defined(ASMJIT_END_NAMESPACE) + #define ASMJIT_BEGIN_NAMESPACE namespace asmjit { + #define ASMJIT_END_NAMESPACE } +#endif + +#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) \ + ASMJIT_BEGIN_NAMESPACE \ + namespace NAMESPACE { + +#define ASMJIT_END_SUB_NAMESPACE \ + } \ + ASMJIT_END_NAMESPACE + +// ============================================================================ +// [asmjit::Build - Globals - Utilities] +// ============================================================================ + +#define ASMJIT_NONCOPYABLE(...) \ + private: \ + __VA_ARGS__(const __VA_ARGS__& other) = delete; \ + __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \ + public: + +#define ASMJIT_NONCONSTRUCTIBLE(...) \ + private: \ + __VA_ARGS__() = delete; \ + __VA_ARGS__(const __VA_ARGS__& other) = delete; \ + __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \ + public: + +// ============================================================================ +// [asmjit::Build - Globals - Build-Only] +// ============================================================================ + +// Internal macros that are only used when building AsmJit itself. +#ifdef ASMJIT_EXPORTS + #if !defined(ASMJIT_BUILD_DEBUG) && ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 4, 0) + #define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os"))) + #define ASMJIT_FAVOR_SPEED __attribute__((__optimize__("O3"))) + #elif ASMJIT_CXX_HAS_ATTRIBUTE(__minsize__, 0) + #define ASMJIT_FAVOR_SIZE __attribute__((__minsize__)) + #define ASMJIT_FAVOR_SPEED + #else + #define ASMJIT_FAVOR_SIZE + #define ASMJIT_FAVOR_SPEED + #endif + + // Only turn-off these warnings when building asmjit itself. + #ifdef _MSC_VER + #ifndef _CRT_SECURE_NO_DEPRECATE + #define _CRT_SECURE_NO_DEPRECATE + #endif + #ifndef _CRT_SECURE_NO_WARNINGS + #define _CRT_SECURE_NO_WARNINGS + #endif + #endif +#endif + +// ============================================================================ +// [asmjit::Build - Globals - Cleanup] +// ============================================================================ + +// Undefine everything that is not used by AsmJit outside of `build.h` and that +// is considered private. +#undef ASMJIT_CXX_CLANG +#undef ASMJIT_CXX_GNU +#undef ASMJIT_CXX_INTEL +#undef ASMJIT_CXX_MSC +#undef ASMJIT_CXX_MAKE_VER + +// ============================================================================ +// [asmjit::Build - Globals - Unit Testing Boilerplate] +// ============================================================================ + +// IDE: Make sure '#ifdef'ed unit tests are properly highlighted. +#if defined(__INTELLISENSE__) && !defined(ASMJIT_TEST) + #define ASMJIT_TEST +#endif + +// IDE: Make sure '#ifdef'ed unit tests are not disabled by IDE. +#if defined(ASMJIT_TEST) + #include "../../../test/broken.h" +#endif + +#endif // _ASMJIT_CORE_BUILD_H diff --git a/src/asmjit/core/builder.cpp b/src/asmjit/core/builder.cpp new file mode 100644 index 0000000..541d053 --- /dev/null +++ b/src/asmjit/core/builder.cpp @@ -0,0 +1,995 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/build.h" +#ifndef ASMJIT_NO_BUILDER + +#include "../core/builder.h" +#include "../core/logging.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::PostponedErrorHandler (Internal)] +// ============================================================================ + +//! Postponed error handler that never throws. Used as a temporal error handler +//! to run passes. If error occurs, the caller is notified and will call the +//! real error handler, that can throw. +class PostponedErrorHandler : public ErrorHandler { +public: + void handleError(Error err, const char* message, BaseEmitter* origin) override { + ASMJIT_UNUSED(err); + ASMJIT_UNUSED(origin); + + _message.assignString(message); + } + + StringTmp<128> _message; +}; + +// ============================================================================ +// [asmjit::BaseBuilder - Construction / Destruction] +// ============================================================================ + +BaseBuilder::BaseBuilder() noexcept + : BaseEmitter(kTypeBuilder), + _codeZone(32768 - Zone::kBlockOverhead), + _dataZone(16384 - Zone::kBlockOverhead), + _passZone(65536 - Zone::kBlockOverhead), + _allocator(&_codeZone), + _passes(), + _labelNodes(), + _cursor(nullptr), + _firstNode(nullptr), + _lastNode(nullptr), + _nodeFlags(0) {} +BaseBuilder::~BaseBuilder() noexcept {} + +// ============================================================================ +// [asmjit::BaseBuilder - Node Management] +// ============================================================================ + +LabelNode* BaseBuilder::newLabelNode() noexcept { + LabelNode* node = newNodeT(); + if (!node || registerLabelNode(node) != kErrorOk) + return nullptr; + return node; +} + +AlignNode* BaseBuilder::newAlignNode(uint32_t alignMode, uint32_t alignment) noexcept { + return newNodeT(alignMode, alignment); +} + +EmbedDataNode* BaseBuilder::newEmbedDataNode(const void* data, uint32_t size) noexcept { + if (size > EmbedDataNode::kInlineBufferSize) { + void* cloned = _dataZone.alloc(size); + if (ASMJIT_UNLIKELY(!cloned)) + return nullptr; + + if (data) + memcpy(cloned, data, size); + data = cloned; + } + + return newNodeT(const_cast(data), size); +} + +ConstPoolNode* BaseBuilder::newConstPoolNode() noexcept { + ConstPoolNode* node = newNodeT(); + if (!node || registerLabelNode(node) != kErrorOk) + return nullptr; + return node; +} + +CommentNode* BaseBuilder::newCommentNode(const char* data, size_t size) noexcept { + if (data) { + if (size == SIZE_MAX) + size = strlen(data); + + if (size > 0) { + data = static_cast(_dataZone.dup(data, size, true)); + if (!data) return nullptr; + } + } + + return newNodeT(data); +} + +InstNode* BaseBuilder::newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0) noexcept { + uint32_t opCount = 1; + uint32_t opCapacity = InstNode::capacityOfOpCount(opCount); + ASMJIT_ASSERT(opCapacity >= 4); + + InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity)); + if (ASMJIT_UNLIKELY(!node)) + return nullptr; + + node = new(node) InstNode(this, instId, instOptions, opCount, opCapacity); + node->setOp(0, o0); + for (uint32_t i = opCount; i < opCapacity; i++) node->resetOp(i); + return node; +} + +InstNode* BaseBuilder::newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1) noexcept { + uint32_t opCount = 2; + uint32_t opCapacity = InstNode::capacityOfOpCount(opCount); + ASMJIT_ASSERT(opCapacity >= 4); + + InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity)); + if (ASMJIT_UNLIKELY(!node)) + return nullptr; + + node = new(node) InstNode(this, instId, instOptions, opCount, opCapacity); + node->setOp(0, o0); + node->setOp(1, o1); + for (uint32_t i = opCount; i < opCapacity; i++) node->resetOp(i); + return node; +} + +InstNode* BaseBuilder::newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { + uint32_t opCount = 3; + uint32_t opCapacity = InstNode::capacityOfOpCount(opCount); + ASMJIT_ASSERT(opCapacity >= 4); + + InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity)); + if (ASMJIT_UNLIKELY(!node)) + return nullptr; + + node = new(node) InstNode(this, instId, instOptions, opCount, opCapacity); + node->setOp(0, o0); + node->setOp(1, o1); + node->setOp(2, o2); + for (uint32_t i = opCount; i < opCapacity; i++) node->resetOp(i); + return node; +} + +InstNode* BaseBuilder::newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { + uint32_t opCount = 4; + uint32_t opCapacity = InstNode::capacityOfOpCount(opCount); + ASMJIT_ASSERT(opCapacity >= 4); + + InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity)); + if (ASMJIT_UNLIKELY(!node)) + return nullptr; + + node = new(node) InstNode(this, instId, instOptions, opCount, opCapacity); + node->setOp(0, o0); + node->setOp(1, o1); + node->setOp(2, o2); + node->setOp(3, o3); + for (uint32_t i = opCount; i < opCapacity; i++) node->resetOp(i); + return node; +} + +InstNode* BaseBuilder::newInstNodeRaw(uint32_t instId, uint32_t instOptions, uint32_t opCount) noexcept { + uint32_t opCapacity = InstNode::capacityOfOpCount(opCount); + ASMJIT_ASSERT(opCapacity >= 4); + + InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity)); + if (ASMJIT_UNLIKELY(!node)) + return nullptr; + return new(node) InstNode(this, instId, instOptions, opCount, opCapacity); +} + +BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept { + ASMJIT_ASSERT(node); + ASMJIT_ASSERT(!node->_prev); + ASMJIT_ASSERT(!node->_next); + ASMJIT_ASSERT(!node->isActive()); + + if (!_cursor) { + if (!_firstNode) { + _firstNode = node; + _lastNode = node; + } + else { + node->_next = _firstNode; + _firstNode->_prev = node; + _firstNode = node; + } + } + else { + BaseNode* prev = _cursor; + BaseNode* next = _cursor->next(); + + node->_prev = prev; + node->_next = next; + + prev->_next = node; + if (next) + next->_prev = node; + else + _lastNode = node; + } + + node->addFlags(BaseNode::kFlagIsActive); + if (node->isSection()) + _dirtySectionLinks = true; + + _cursor = node; + return node; +} + +BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept { + ASMJIT_ASSERT(node); + ASMJIT_ASSERT(ref); + + ASMJIT_ASSERT(!node->_prev); + ASMJIT_ASSERT(!node->_next); + + BaseNode* prev = ref; + BaseNode* next = ref->next(); + + node->_prev = prev; + node->_next = next; + + node->addFlags(BaseNode::kFlagIsActive); + if (node->isSection()) + _dirtySectionLinks = true; + + prev->_next = node; + if (next) + next->_prev = node; + else + _lastNode = node; + + return node; +} + +BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept { + ASMJIT_ASSERT(node != nullptr); + ASMJIT_ASSERT(!node->_prev); + ASMJIT_ASSERT(!node->_next); + ASMJIT_ASSERT(!node->isActive()); + ASMJIT_ASSERT(ref != nullptr); + ASMJIT_ASSERT(ref->isActive()); + + BaseNode* prev = ref->prev(); + BaseNode* next = ref; + + node->_prev = prev; + node->_next = next; + + node->addFlags(BaseNode::kFlagIsActive); + if (node->isSection()) + _dirtySectionLinks = true; + + next->_prev = node; + if (prev) + prev->_next = node; + else + _firstNode = node; + + return node; +} + +BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept { + if (!node->isActive()) + return node; + + BaseNode* prev = node->prev(); + BaseNode* next = node->next(); + + if (_firstNode == node) + _firstNode = next; + else + prev->_next = next; + + if (_lastNode == node) + _lastNode = prev; + else + next->_prev = prev; + + node->_prev = nullptr; + node->_next = nullptr; + node->clearFlags(BaseNode::kFlagIsActive); + if (node->isSection()) + _dirtySectionLinks = true; + + if (_cursor == node) + _cursor = prev; + + return node; +} + +void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept { + if (first == last) { + removeNode(first); + return; + } + + if (!first->isActive()) + return; + + BaseNode* prev = first->prev(); + BaseNode* next = last->next(); + + if (_firstNode == first) + _firstNode = next; + else + prev->_next = next; + + if (_lastNode == last) + _lastNode = prev; + else + next->_prev = prev; + + BaseNode* node = first; + uint32_t didRemoveSection = false; + + for (;;) { + next = node->next(); + ASMJIT_ASSERT(next != nullptr); + + node->_prev = nullptr; + node->_next = nullptr; + node->clearFlags(BaseNode::kFlagIsActive); + didRemoveSection |= uint32_t(node->isSection()); + + if (_cursor == node) + _cursor = prev; + + if (node == last) + break; + node = next; + } + + if (didRemoveSection) + _dirtySectionLinks = true; +} + +BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept { + BaseNode* old = _cursor; + _cursor = node; + return old; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Section] +// ============================================================================ + +Error BaseBuilder::sectionNodeOf(SectionNode** pOut, uint32_t sectionId) noexcept { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId))) + return DebugUtils::errored(kErrorInvalidSection); + + if (sectionId >= _sectionNodes.size()) + ASMJIT_PROPAGATE(_sectionNodes.resize(&_allocator, sectionId + 1)); + + SectionNode* node = _sectionNodes[sectionId]; + if (!node) { + node = newNodeT(sectionId); + if (ASMJIT_UNLIKELY(!node)) + return DebugUtils::errored(kErrorOutOfMemory); + _sectionNodes[sectionId] = node; + } + + *pOut = node; + return kErrorOk; +} + +Error BaseBuilder::section(Section* section) { + SectionNode* node; + Error err = sectionNodeOf(&node, section->id()); + + if (ASMJIT_UNLIKELY(err)) + return reportError(err); + + if (!node->isActive()) { + // Insert the section at the end if it was not part of the code. + addAfter(node, lastNode()); + _cursor = node; + } + else { + // This is a bit tricky. We cache section links to make sure that + // switching sections doesn't involve traversal in linked-list unless + // the position of the section has changed. + if (hasDirtySectionLinks()) + updateSectionLinks(); + + if (node->_nextSection) + _cursor = node->_nextSection->_prev; + else + _cursor = _lastNode; + } + + return kErrorOk; +} + +void BaseBuilder::updateSectionLinks() noexcept { + if (!_dirtySectionLinks) + return; + + BaseNode* node_ = _firstNode; + SectionNode* currentSection = nullptr; + + while (node_) { + if (node_->isSection()) { + if (currentSection) + currentSection->_nextSection = node_->as(); + currentSection = node_->as(); + } + node_ = node_->next(); + } + + if (currentSection) + currentSection->_nextSection = nullptr; + + _dirtySectionLinks = false; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Labels] +// ============================================================================ + +Error BaseBuilder::labelNodeOf(LabelNode** pOut, uint32_t labelId) noexcept { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + uint32_t index = labelId; + if (ASMJIT_UNLIKELY(index >= _code->labelCount())) + return DebugUtils::errored(kErrorInvalidLabel); + + if (index >= _labelNodes.size()) + ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1)); + + LabelNode* node = _labelNodes[index]; + if (!node) { + node = newNodeT(labelId); + if (ASMJIT_UNLIKELY(!node)) + return DebugUtils::errored(kErrorOutOfMemory); + _labelNodes[index] = node; + } + + *pOut = node; + return kErrorOk; +} + +Error BaseBuilder::registerLabelNode(LabelNode* node) noexcept { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + // Don't call `reportError()` from here, we are noexcept and we are called + // by `newLabelNode()` and `newFuncNode()`, which are noexcept as well. + LabelEntry* le; + ASMJIT_PROPAGATE(_code->newLabelEntry(&le)); + uint32_t labelId = le->id(); + + // We just added one label so it must be true. + ASMJIT_ASSERT(_labelNodes.size() < labelId + 1); + ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, labelId + 1)); + + _labelNodes[labelId] = node; + node->_id = labelId; + + return kErrorOk; +} + +static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) noexcept { + ASMJIT_ASSERT(self->_labelNodes.size() < labelId + 1); + LabelNode* node = self->newNodeT(labelId); + + if (ASMJIT_UNLIKELY(!node)) + return DebugUtils::errored(kErrorOutOfMemory); + + ASMJIT_PROPAGATE(self->_labelNodes.resize(&self->_allocator, labelId + 1)); + self->_labelNodes[labelId] = node; + node->_id = labelId; + return kErrorOk; +} + +Label BaseBuilder::newLabel() { + uint32_t labelId = Globals::kInvalidId; + if (_code) { + LabelEntry* le; + Error err = _code->newLabelEntry(&le); + if (ASMJIT_UNLIKELY(err)) { + reportError(err); + } + else { + err = BaseBuilder_newLabelInternal(this, le->id()); + if (ASMJIT_UNLIKELY(err)) + reportError(err); + else + labelId = le->id(); + } + } + return Label(labelId); +} + +Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) { + uint32_t labelId = Globals::kInvalidId; + if (_code) { + LabelEntry* le; + Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId); + if (ASMJIT_UNLIKELY(err)) { + reportError(err); + } + else { + err = BaseBuilder_newLabelInternal(this, le->id()); + if (ASMJIT_UNLIKELY(err)) + reportError(err); + else + labelId = le->id(); + } + } + return Label(labelId); +} + +Error BaseBuilder::bind(const Label& label) { + LabelNode* node; + Error err = labelNodeOf(&node, label); + + if (ASMJIT_UNLIKELY(err)) + return reportError(err); + + addNode(node); + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Passes] +// ============================================================================ + +ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept { + for (Pass* pass : _passes) + if (strcmp(pass->name(), name) == 0) + return pass; + return nullptr; +} + +ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + if (ASMJIT_UNLIKELY(pass == nullptr)) { + // Since this is directly called by `addPassT()` we treat `null` argument + // as out-of-memory condition. Otherwise it would be API misuse. + return DebugUtils::errored(kErrorOutOfMemory); + } + else if (ASMJIT_UNLIKELY(pass->_cb)) { + // Kinda weird, but okay... + if (pass->_cb == this) + return kErrorOk; + return DebugUtils::errored(kErrorInvalidState); + } + + ASMJIT_PROPAGATE(_passes.append(&_allocator, pass)); + pass->_cb = this; + return kErrorOk; +} + +ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + if (ASMJIT_UNLIKELY(pass == nullptr)) + return DebugUtils::errored(kErrorInvalidArgument); + + if (pass->_cb != nullptr) { + if (pass->_cb != this) + return DebugUtils::errored(kErrorInvalidState); + + uint32_t index = _passes.indexOf(pass); + ASMJIT_ASSERT(index != Globals::kNotFound); + + pass->_cb = nullptr; + _passes.removeAt(index); + } + + pass->~Pass(); + return kErrorOk; +} + +Error BaseBuilder::runPasses() { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + if (_passes.empty()) + return kErrorOk; + + Logger* logger = code()->logger(); + ErrorHandler* prev = errorHandler(); + PostponedErrorHandler postponed; + + Error err = kErrorOk; + setErrorHandler(&postponed); + + for (Pass* pass : _passes) { + _passZone.reset(); + err = pass->run(&_passZone, logger); + if (err) break; + } + _passZone.reset(); + setErrorHandler(prev); + + if (ASMJIT_UNLIKELY(err)) + return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr); + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Emit] +// ============================================================================ + +Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) { + uint32_t opCount = 4; + + if (o3.isNone()) { + opCount = 3; + if (o2.isNone()) { + opCount = 2; + if (o1.isNone()) { + opCount = 1; + if (o0.isNone()) + opCount = 0; + } + } + } + + uint32_t options = instOptions() | globalInstOptions(); + if (options & BaseInst::kOptionReserved) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + // Strict validation. + #ifndef ASMJIT_NO_VALIDATION + if (hasEmitterOption(kOptionStrictValidation)) { + Operand_ opArray[4]; + opArray[0].copyFrom(o0); + opArray[1].copyFrom(o1); + opArray[2].copyFrom(o2); + opArray[3].copyFrom(o3); + + Error err = InstAPI::validate(archId(), BaseInst(instId, options, _extraReg), opArray, opCount); + if (ASMJIT_UNLIKELY(err)) { + resetInstOptions(); + resetExtraReg(); + resetInlineComment(); + return reportError(err); + } + } + #endif + + // Clear options that should never be part of `InstNode`. + options &= ~BaseInst::kOptionReserved; + } + + uint32_t opCapacity = InstNode::capacityOfOpCount(opCount); + ASMJIT_ASSERT(opCapacity >= 4); + + InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity)); + if (ASMJIT_UNLIKELY(!node)) { + resetInstOptions(); + resetExtraReg(); + resetInlineComment(); + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } + + node = new(node) InstNode(this, instId, options, opCount, opCapacity); + node->setExtraReg(extraReg()); + node->setOp(0, o0); + node->setOp(1, o1); + node->setOp(2, o2); + node->setOp(3, o3); + + for (uint32_t i = 4; i < InstNode::kBaseOpCapacity; i++) + node->resetOp(i); + + const char* comment = inlineComment(); + if (comment) + node->setInlineComment(static_cast(_dataZone.dup(comment, strlen(comment), true))); + + resetInstOptions(); + resetExtraReg(); + resetInlineComment(); + + addNode(node); + return kErrorOk; +} + +Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) { + uint32_t opCount = Globals::kMaxOpCount; + if (o5.isNone()) { + opCount = 5; + if (o4.isNone()) + return _emit(instId, o0, o1, o2, o3); + } + + uint32_t options = instOptions() | globalInstOptions(); + if (ASMJIT_UNLIKELY(options & BaseInst::kOptionReserved)) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + // Strict validation. + #ifndef ASMJIT_NO_VALIDATION + if (hasEmitterOption(kOptionStrictValidation)) { + Operand_ opArray[Globals::kMaxOpCount]; + opArray[0].copyFrom(o0); + opArray[1].copyFrom(o1); + opArray[2].copyFrom(o2); + opArray[3].copyFrom(o3); + opArray[4].copyFrom(o4); + opArray[5].copyFrom(o5); + + Error err = InstAPI::validate(archId(), BaseInst(instId, options, _extraReg), opArray, opCount); + if (ASMJIT_UNLIKELY(err)) { + resetInstOptions(); + resetExtraReg(); + resetInlineComment(); + return reportError(err); + } + } + #endif + + // Clear options that should never be part of `InstNode`. + options &= ~BaseInst::kOptionReserved; + } + + uint32_t opCapacity = InstNode::capacityOfOpCount(opCount); + ASMJIT_ASSERT(opCapacity >= opCount); + + InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity)); + if (ASMJIT_UNLIKELY(!node)) { + resetInstOptions(); + resetExtraReg(); + resetInlineComment(); + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } + + node = new(node) InstNode(this, instId, options, opCount, opCapacity); + node->setExtraReg(extraReg()); + node->setOp(0, o0); + node->setOp(1, o1); + node->setOp(2, o2); + node->setOp(3, o3); + node->setOp(4, o4); + + if (opCapacity > 5) + node->setOp(5, o5); + + const char* comment = inlineComment(); + if (comment) + node->setInlineComment(static_cast(_dataZone.dup(comment, strlen(comment), true))); + + resetInstOptions(); + resetExtraReg(); + resetInlineComment(); + + addNode(node); + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Align] +// ============================================================================ + +Error BaseBuilder::align(uint32_t alignMode, uint32_t alignment) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + AlignNode* node = newAlignNode(alignMode, alignment); + if (ASMJIT_UNLIKELY(!node)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + + addNode(node); + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Embed] +// ============================================================================ + +Error BaseBuilder::embed(const void* data, uint32_t dataSize) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + EmbedDataNode* node = newEmbedDataNode(data, dataSize); + if (ASMJIT_UNLIKELY(!node)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + + addNode(node); + return kErrorOk; +} + +Error BaseBuilder::embedLabel(const Label& label) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + EmbedLabelNode* node = newNodeT(label.id()); + if (ASMJIT_UNLIKELY(!node)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + + addNode(node); + return kErrorOk; +} + +Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + EmbedLabelDeltaNode* node = newNodeT(label.id(), base.id(), dataSize); + if (ASMJIT_UNLIKELY(!node)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + + addNode(node); + return kErrorOk; +} + +Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + if (!isLabelValid(label)) + return reportError(DebugUtils::errored(kErrorInvalidLabel)); + + ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment()))); + ASMJIT_PROPAGATE(bind(label)); + + EmbedDataNode* node = newEmbedDataNode(nullptr, uint32_t(pool.size())); + if (ASMJIT_UNLIKELY(!node)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + + pool.fill(node->data()); + addNode(node); + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Comment] +// ============================================================================ + +Error BaseBuilder::comment(const char* data, size_t size) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + CommentNode* node = newCommentNode(data, size); + if (ASMJIT_UNLIKELY(!node)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + + addNode(node); + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Serialize] +// ============================================================================ + +Error BaseBuilder::serialize(BaseEmitter* dst) { + Error err = kErrorOk; + BaseNode* node_ = _firstNode; + + do { + dst->setInlineComment(node_->inlineComment()); + + if (node_->isInst()) { + InstNode* node = node_->as(); + err = dst->emitInst(node->baseInst(), node->operands(), node->opCount()); + } + else if (node_->isLabel()) { + if (node_->isConstPool()) { + ConstPoolNode* node = node_->as(); + err = dst->embedConstPool(node->label(), node->constPool()); + } + else { + LabelNode* node = node_->as(); + err = dst->bind(node->label()); + } + } + else if (node_->isAlign()) { + AlignNode* node = node_->as(); + err = dst->align(node->alignMode(), node->alignment()); + } + else if (node_->isEmbedData()) { + EmbedDataNode* node = node_->as(); + err = dst->embed(node->data(), node->size()); + } + else if (node_->isEmbedLabel()) { + EmbedLabelNode* node = node_->as(); + err = dst->embedLabel(node->label()); + } + else if (node_->isEmbedLabelDelta()) { + EmbedLabelDeltaNode* node = node_->as(); + err = dst->embedLabelDelta(node->label(), node->baseLabel(), node->dataSize()); + } + else if (node_->isSection()) { + SectionNode* node = node_->as(); + err = dst->section(_code->sectionById(node->id())); + } + else if (node_->isComment()) { + CommentNode* node = node_->as(); + err = dst->comment(node->inlineComment()); + } + + if (err) break; + node_ = node_->next(); + } while (node_); + + return err; +} + +// ============================================================================ +// [asmjit::BaseBuilder - Logging] +// ============================================================================ + +#ifndef ASMJIT_NO_LOGGING +Error BaseBuilder::dump(String& sb, uint32_t flags) const noexcept { + BaseNode* node = _firstNode; + while (node) { + ASMJIT_PROPAGATE(Logging::formatNode(sb, flags, this, node)); + sb.appendChar('\n'); + node = node->next(); + } + + return kErrorOk; +} +#endif + +// ============================================================================ +// [asmjit::BaseBuilder - Events] +// ============================================================================ + +Error BaseBuilder::onAttach(CodeHolder* code) noexcept { + ASMJIT_PROPAGATE(Base::onAttach(code)); + + SectionNode* initialSection; + Error err = sectionNodeOf(&initialSection, 0); + + if (!err) + err = _passes.willGrow(&_allocator, 8); + + if (ASMJIT_UNLIKELY(err)) { + onDetach(code); + return err; + } + + + _cursor = initialSection; + _firstNode = initialSection; + _lastNode = initialSection; + initialSection->setFlags(BaseNode::kFlagIsActive); + + return kErrorOk; +} + +Error BaseBuilder::onDetach(CodeHolder* code) noexcept { + _passes.reset(); + _sectionNodes.reset(); + _labelNodes.reset(); + + _allocator.reset(&_codeZone); + _codeZone.reset(); + _dataZone.reset(); + _passZone.reset(); + + _nodeFlags = 0; + + _cursor = nullptr; + _firstNode = nullptr; + _lastNode = nullptr; + + return Base::onDetach(code); +} + +// ============================================================================ +// [asmjit::Pass - Construction / Destruction] +// ============================================================================ + +Pass::Pass(const char* name) noexcept + : _cb(nullptr), + _name(name) {} +Pass::~Pass() noexcept {} + +ASMJIT_END_NAMESPACE + +#endif // !ASMJIT_NO_BUILDER diff --git a/src/asmjit/core/builder.h b/src/asmjit/core/builder.h new file mode 100644 index 0000000..aaa4889 --- /dev/null +++ b/src/asmjit/core/builder.h @@ -0,0 +1,1278 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_BUILDER_H +#define _ASMJIT_CORE_BUILDER_H + +#include "../core/build.h" +#ifndef ASMJIT_NO_BUILDER + +#include "../core/assembler.h" +#include "../core/codeholder.h" +#include "../core/constpool.h" +#include "../core/inst.h" +#include "../core/operand.h" +#include "../core/string.h" +#include "../core/support.h" +#include "../core/zone.h" +#include "../core/zonevector.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_builder +//! \{ + +// ============================================================================ +// [Forward Declarations] +// ============================================================================ + +class BaseBuilder; +class Pass; + +class BaseNode; +class InstNode; +class SectionNode; +class LabelNode; +class AlignNode; +class EmbedDataNode; +class EmbedLabelNode; +class ConstPoolNode; +class CommentNode; +class SentinelNode; +class LabelDeltaNode; + +// ============================================================================ +// [asmjit::BaseBuilder] +// ============================================================================ + +class ASMJIT_VIRTAPI BaseBuilder : public BaseEmitter { +public: + ASMJIT_NONCOPYABLE(BaseBuilder) + typedef BaseEmitter Base; + + //! Base zone used to allocate nodes and passes. + Zone _codeZone; + //! Data zone used to allocate data and names. + Zone _dataZone; + //! Pass zone, passed to `Pass::run()`. + Zone _passZone; + //! Allocator that uses `_codeZone`. + ZoneAllocator _allocator; + + //! Array of `Pass` objects. + ZoneVector _passes; + //! Maps section indexes to `LabelNode` nodes. + ZoneVector _sectionNodes; + //! Maps label indexes to `LabelNode` nodes. + ZoneVector _labelNodes; + + //! Current node (cursor). + BaseNode* _cursor; + //! First node of the current section. + BaseNode* _firstNode; + //! Last node of the current section. + BaseNode* _lastNode; + + //! Flags assigned to each new node. + uint32_t _nodeFlags; + //! The sections links are dirty (used internally). + bool _dirtySectionLinks; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `BaseBuilder` instance. + ASMJIT_API BaseBuilder() noexcept; + //! Destroys the `BaseBuilder` instance. + ASMJIT_API virtual ~BaseBuilder() noexcept; + + //! \} + + //! \name Node Management + //! \{ + + //! Returns the first node. + inline BaseNode* firstNode() const noexcept { return _firstNode; } + //! Returns the last node. + inline BaseNode* lastNode() const noexcept { return _lastNode; } + + //! Allocates and instantiates a new node of type `T` and returns its instance. + //! If the allocation fails `nullptr` is returned. + //! + //! The template argument `T` must be a type that is extends \ref BaseNode. + //! + //! \remarks The pointer returned (if non-null) is owned by the Builder or + //! Compiler. When the Builder/Compiler is destroyed it destroys all nodes + //! it created so no manual memory management is required. + template + inline T* newNodeT() noexcept { + return _allocator.newT(this); + } + + //! \overload + template + inline T* newNodeT(ArgsT&&... args) noexcept { + return _allocator.newT(this, std::forward(args)...); + } + + //! Creates a new `LabelNode`. + ASMJIT_API LabelNode* newLabelNode() noexcept; + //! Creates a new `AlignNode`. + ASMJIT_API AlignNode* newAlignNode(uint32_t alignMode, uint32_t alignment) noexcept; + //! Creates a new `EmbedDataNode`. + ASMJIT_API EmbedDataNode* newEmbedDataNode(const void* data, uint32_t size) noexcept; + //! Creates a new `ConstPoolNode`. + ASMJIT_API ConstPoolNode* newConstPoolNode() noexcept; + //! Creates a new `CommentNode`. + ASMJIT_API CommentNode* newCommentNode(const char* data, size_t size) noexcept; + + ASMJIT_API InstNode* newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0) noexcept; + ASMJIT_API InstNode* newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1) noexcept; + ASMJIT_API InstNode* newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept; + ASMJIT_API InstNode* newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept; + ASMJIT_API InstNode* newInstNodeRaw(uint32_t instId, uint32_t instOptions, uint32_t opCount) noexcept; + + //! Adds `node` after the current and sets the current node to the given `node`. + ASMJIT_API BaseNode* addNode(BaseNode* node) noexcept; + //! Inserts the given `node` after `ref`. + ASMJIT_API BaseNode* addAfter(BaseNode* node, BaseNode* ref) noexcept; + //! Inserts the given `node` before `ref`. + ASMJIT_API BaseNode* addBefore(BaseNode* node, BaseNode* ref) noexcept; + //! Removes the given `node`. + ASMJIT_API BaseNode* removeNode(BaseNode* node) noexcept; + //! Removes multiple nodes. + ASMJIT_API void removeNodes(BaseNode* first, BaseNode* last) noexcept; + + //! Returns the cursor. + //! + //! When the Builder/Compiler is created it automatically creates a '.text' + //! \ref SectionNode, which will be the initial one. When instructions are + //! added they are always added after the cursor and the cursor is changed + //! to be that newly added node. Use `setCursor()` to change where new nodes + //! are inserted. + inline BaseNode* cursor() const noexcept { return _cursor; } + + //! Sets the current node to `node` and return the previous one. + ASMJIT_API BaseNode* setCursor(BaseNode* node) noexcept; + + //! Sets the current node without returning the previous node. + //! + //! Only use this function if you are concerned about performance and want + //! this inlined (for example if you set the cursor in a loop, etc...). + inline void _setCursor(BaseNode* node) noexcept { _cursor = node; } + + //! \} + + //! \name Section Management + //! \{ + + //! Returns a vector of SectionNode objects. + //! + //! \note If a section of some id is not associated with the Builder/Compiler + //! it would be null, so always check for nulls if you iterate over the vector. + inline const ZoneVector& sectionNodes() const noexcept { return _sectionNodes; } + + //! Tests whether the `SectionNode` of the given `sectionId` was registered. + inline bool hasRegisteredSectionNode(uint32_t sectionId) const noexcept { + return sectionId < _sectionNodes.size() && _sectionNodes[sectionId] != nullptr; + } + + //! Returns or creates a `SectionNode` that matches the given `sectionId`. + //! + //! \remarks This function will either get the existing `SectionNode` or create + //! it in case it wasn't created before. You can check whether a section has a + //! registered `SectionNode` by using `BaseBuilder::hasRegisteredSectionNode()`. + ASMJIT_API Error sectionNodeOf(SectionNode** pOut, uint32_t sectionId) noexcept; + + ASMJIT_API Error section(Section* section) override; + + //! Returns whether the section links of active section nodes are dirty. You can + //! update these links by calling `updateSectionLinks()` in such case. + inline bool hasDirtySectionLinks() const noexcept { return _dirtySectionLinks; } + + //! Updates links of all active section nodes. + ASMJIT_API void updateSectionLinks() noexcept; + + //! \} + + //! \name Label Management + //! \{ + + //! Returns a vector of LabelNode nodes. + //! + //! \note If a label of some id is not associated with the Builder/Compiler + //! it would be null, so always check for nulls if you iterate over the vector. + inline const ZoneVector& labelNodes() const noexcept { return _labelNodes; } + + //! Tests whether the `LabelNode` of the given `labelId` was registered. + inline bool hasRegisteredLabelNode(uint32_t labelId) const noexcept { + return labelId < _labelNodes.size() && _labelNodes[labelId] != nullptr; + } + + //! \overload + inline bool hasRegisteredLabelNode(const Label& label) const noexcept { + return hasRegisteredLabelNode(label.id()); + } + + //! Gets or creates a `LabelNode` that matches the given `labelId`. + //! + //! \remarks This function will either get the existing `LabelNode` or create + //! it in case it wasn't created before. You can check whether a label has a + //! registered `LabelNode` by using `BaseBuilder::hasRegisteredLabelNode()`. + ASMJIT_API Error labelNodeOf(LabelNode** pOut, uint32_t labelId) noexcept; + + //! \overload + inline Error labelNodeOf(LabelNode** pOut, const Label& label) noexcept { + return labelNodeOf(pOut, label.id()); + } + + //! Registers this label node [Internal]. + //! + //! This function is used internally to register a newly created `LabelNode` + //! with this instance of Builder/Compiler. Use `labelNodeOf()` functions to + //! get back `LabelNode` from a label or its identifier. + ASMJIT_API Error registerLabelNode(LabelNode* node) noexcept; + + ASMJIT_API Label newLabel() override; + ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override; + ASMJIT_API Error bind(const Label& label) override; + + //! \} + + //! \name Passes + //! \{ + + //! Returns a vector of `Pass` instances that will be executed by `runPasses()`. + inline const ZoneVector& passes() const noexcept { return _passes; } + + //! Allocates and instantiates a new pass of type `T` and returns its instance. + //! If the allocation fails `nullptr` is returned. + //! + //! The template argument `T` must be a type that is extends \ref Pass. + //! + //! \remarks The pointer returned (if non-null) is owned by the Builder or + //! Compiler. When the Builder/Compiler is destroyed it destroys all passes + //! it created so no manual memory management is required. + template + inline T* newPassT() noexcept { return _codeZone.newT(); } + + //! \overload + template + inline T* newPassT(ArgsT&&... args) noexcept { return _codeZone.newT(std::forward(args)...); } + + template + inline Error addPassT() noexcept { return addPass(newPassT()); } + + template + inline Error addPassT(ArgsT&&... args) noexcept { return addPass(newPassT(std::forward(args)...)); } + + //! Returns `Pass` by name. + ASMJIT_API Pass* passByName(const char* name) const noexcept; + //! Adds `pass` to the list of passes. + ASMJIT_API Error addPass(Pass* pass) noexcept; + //! Removes `pass` from the list of passes and delete it. + ASMJIT_API Error deletePass(Pass* pass) noexcept; + + //! Runs all passes in order. + ASMJIT_API Error runPasses(); + + //! \} + + //! \name Emit + //! \{ + + ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) override; + ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) override; + + //! \} + + //! \name Align + //! \{ + + ASMJIT_API Error align(uint32_t alignMode, uint32_t alignment) override; + + //! \} + + //! \name Embed + //! \{ + + ASMJIT_API Error embed(const void* data, uint32_t dataSize) override; + ASMJIT_API Error embedLabel(const Label& label) override; + ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) override; + ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override; + + //! \} + + //! \name Comment + //! \{ + + ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override; + + //! \} + + //! \name Serialization + //! \{ + + //! Serializes everything the given emitter `dst`. + //! + //! Although not explicitly required the emitter will most probably be of + //! Assembler type. The reason is that there is no known use of serializing + //! nodes held by Builder/Compiler into another Builder-like emitter. + ASMJIT_API Error serialize(BaseEmitter* dst); + + //! \} + + //! \name Logging + //! \{ + + #ifndef ASMJIT_NO_LOGGING + ASMJIT_API Error dump(String& sb, uint32_t flags = 0) const noexcept; + #endif + + //! \} + + //! \name Events + //! \{ + + ASMJIT_API Error onAttach(CodeHolder* code) noexcept override; + ASMJIT_API Error onDetach(CodeHolder* code) noexcept override; + + //! \} +}; + +// ============================================================================ +// [asmjit::BaseNode] +// ============================================================================ + +//! Base node. +//! +//! Every node represents a building-block used by `BaseBuilder`. It can be +//! instruction, data, label, comment, directive, or any other high-level +//! representation that can be transformed to the building blocks mentioned. +//! Every class that inherits `BaseBuilder` can define its own nodes that it +//! can lower to basic nodes. +class BaseNode { +public: + ASMJIT_NONCOPYABLE(BaseNode) + + union { + struct { + //! Previous node. + BaseNode* _prev; + //! Next node. + BaseNode* _next; + }; + //! Links (previous and next nodes). + BaseNode* _links[2]; + }; + + //! Data shared between all types of nodes. + struct AnyData { + //! Node type, see \ref NodeType. + uint8_t _nodeType; + //! Node flags, see \ref Flags. + uint8_t _nodeFlags; + //! Not used by BaseNode. + uint8_t _reserved0; + //! Not used by BaseNode. + uint8_t _reserved1; + }; + + struct InstData { + //! Node type, see \ref NodeType. + uint8_t _nodeType; + //! Node flags, see \ref Flags. + uint8_t _nodeFlags; + //! Instruction operands count (used). + uint8_t _opCount; + //! Instruction operands capacity (allocated). + uint8_t _opCapacity; + }; + + struct SentinelData { + //! Node type, see \ref NodeType. + uint8_t _nodeType; + //! Node flags, see \ref Flags. + uint8_t _nodeFlags; + //! Sentinel type. + uint8_t _sentinelType; + //! Not used by BaseNode. + uint8_t _reserved1; + }; + + union { + AnyData _any; + InstData _inst; + SentinelData _sentinel; + }; + + //! Node position in code (should be unique). + uint32_t _position; + + //! Value reserved for AsmJit users never touched by AsmJit itself. + union { + uint64_t _userDataU64; + void* _userDataPtr; + }; + + //! Data used exclusively by the current `Pass`. + void* _passData; + + //! Inline comment/annotation or nullptr if not used. + const char* _inlineComment; + + //! Type of `BaseNode`. + enum NodeType : uint32_t { + //! Invalid node (internal, don't use). + kNodeNone = 0, + + // [BaseBuilder] + + //! Node is `InstNode` or `InstExNode`. + kNodeInst = 1, + //! Node is `SectionNode`. + kNodeSection = 2, + //! Node is `LabelNode`. + kNodeLabel = 3, + //! Node is `AlignNode`. + kNodeAlign = 4, + //! Node is `EmbedDataNode`. + kNodeEmbedData = 5, + //! Node is `EmbedLabelNode`. + kNodeEmbedLabel = 6, + //! Node is `EmbedLabelDeltaNode`. + kNodeEmbedLabelDelta = 7, + //! Node is `ConstPoolNode`. + kNodeConstPool = 8, + //! Node is `CommentNode`. + kNodeComment = 9, + //! Node is `SentinelNode`. + kNodeSentinel = 10, + + // [BaseCompiler] + + //! Node is `FuncNode` (acts as LabelNode). + kNodeFunc = 16, + //! Node is `FuncRetNode` (acts as InstNode). + kNodeFuncRet = 17, + //! Node is `FuncCallNode` (acts as InstNode). + kNodeFuncCall = 18, + + // [UserDefined] + + //! First id of a user-defined node. + kNodeUser = 32 + }; + + //! Node flags, specify what the node is and/or does. + enum Flags : uint32_t { + kFlagIsCode = 0x01u, //!< Node is code that can be executed (instruction, label, align, etc...). + kFlagIsData = 0x02u, //!< Node is data that cannot be executed (data, const-pool, etc...). + kFlagIsInformative = 0x04u, //!< Node is informative, can be removed and ignored. + kFlagIsRemovable = 0x08u, //!< Node can be safely removed if unreachable. + kFlagHasNoEffect = 0x10u, //!< Node does nothing when executed (label, align, explicit nop). + kFlagActsAsInst = 0x20u, //!< Node is an instruction or acts as it. + kFlagActsAsLabel = 0x40u, //!< Node is a label or acts as it. + kFlagIsActive = 0x80u //!< Node is active (part of the code). + }; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `BaseNode` - always use `BaseBuilder` to allocate nodes. + ASMJIT_INLINE BaseNode(BaseBuilder* cb, uint32_t type, uint32_t flags = 0) noexcept { + _prev = nullptr; + _next = nullptr; + _any._nodeType = uint8_t(type); + _any._nodeFlags = uint8_t(flags | cb->_nodeFlags); + _any._reserved0 = 0; + _any._reserved1 = 0; + _position = 0; + _userDataU64 = 0; + _passData = nullptr; + _inlineComment = nullptr; + } + + //! \} + + //! \name Accessors + //! \{ + + //! Casts this node to `T*`. + template + inline T* as() noexcept { return static_cast(this); } + //! Casts this node to `const T*`. + template + inline const T* as() const noexcept { return static_cast(this); } + + //! Returns previous node or `nullptr` if this node is either first or not + //! part of Builder/Compiler node-list. + inline BaseNode* prev() const noexcept { return _prev; } + //! Returns next node or `nullptr` if this node is either last or not part + //! of Builder/Compiler node-list. + inline BaseNode* next() const noexcept { return _next; } + + //! Returns the type of the node, see `NodeType`. + inline uint32_t type() const noexcept { return _any._nodeType; } + + //! Sets the type of the node, see `NodeType` (internal). + //! + //! \remarks You should never set a type of a node to anything else than the + //! initial value. This function is only provided for users that use custom + //! nodes and need to change the type either during construction or later. + inline void setType(uint32_t type) noexcept { _any._nodeType = uint8_t(type); } + + //! Tests whether this node is either `InstNode` or extends it. + inline bool isInst() const noexcept { return hasFlag(kFlagActsAsInst); } + //! Tests whether this node is `SectionNode`. + inline bool isSection() const noexcept { return type() == kNodeSection; } + //! Tests whether this node is either `LabelNode` or extends it. + inline bool isLabel() const noexcept { return hasFlag(kFlagActsAsLabel); } + //! Tests whether this node is `AlignNode`. + inline bool isAlign() const noexcept { return type() == kNodeAlign; } + //! Tests whether this node is `EmbedDataNode`. + inline bool isEmbedData() const noexcept { return type() == kNodeEmbedData; } + //! Tests whether this node is `EmbedLabelNode`. + inline bool isEmbedLabel() const noexcept { return type() == kNodeEmbedLabel; } + //! Tests whether this node is `EmbedLabelDeltaNode`. + inline bool isEmbedLabelDelta() const noexcept { return type() == kNodeEmbedLabelDelta; } + //! Tests whether this node is `ConstPoolNode`. + inline bool isConstPool() const noexcept { return type() == kNodeConstPool; } + //! Tests whether this node is `CommentNode`. + inline bool isComment() const noexcept { return type() == kNodeComment; } + //! Tests whether this node is `SentinelNode`. + inline bool isSentinel() const noexcept { return type() == kNodeSentinel; } + + //! Tests whether this node is `FuncNode`. + inline bool isFunc() const noexcept { return type() == kNodeFunc; } + //! Tests whether this node is `FuncRetNode`. + inline bool isFuncRet() const noexcept { return type() == kNodeFuncRet; } + //! Tests whether this node is `FuncCallNode`. + inline bool isFuncCall() const noexcept { return type() == kNodeFuncCall; } + + //! Returns the node flags, see \ref Flags. + inline uint32_t flags() const noexcept { return _any._nodeFlags; } + //! Tests whether the node has the given `flag` set. + inline bool hasFlag(uint32_t flag) const noexcept { return (uint32_t(_any._nodeFlags) & flag) != 0; } + //! Replaces node flags with `flags`. + inline void setFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(flags); } + //! Adds the given `flags` to node flags. + inline void addFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(_any._nodeFlags | flags); } + //! Clears the given `flags` from node flags. + inline void clearFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(_any._nodeFlags & (flags ^ 0xFF)); } + + //! Tests whether the node is code that can be executed. + inline bool isCode() const noexcept { return hasFlag(kFlagIsCode); } + //! Tests whether the node is data that cannot be executed. + inline bool isData() const noexcept { return hasFlag(kFlagIsData); } + //! Tests whether the node is informative only (is never encoded like comment, etc...). + inline bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); } + //! Tests whether the node is removable if it's in an unreachable code block. + inline bool isRemovable() const noexcept { return hasFlag(kFlagIsRemovable); } + //! Tests whether the node has no effect when executed (label, .align, nop, ...). + inline bool hasNoEffect() const noexcept { return hasFlag(kFlagHasNoEffect); } + //! Tests whether the node is part of the code. + inline bool isActive() const noexcept { return hasFlag(kFlagIsActive); } + + //! Tests whether the node has a position assigned. + //! + //! \remarks Returns `true` if node position is non-zero. + inline bool hasPosition() const noexcept { return _position != 0; } + //! Returns node position. + inline uint32_t position() const noexcept { return _position; } + //! Sets node position. + //! + //! Node position is a 32-bit unsigned integer that is used by Compiler to + //! track where the node is relatively to the start of the function. It doesn't + //! describe a byte position in a binary, instead it's just a pseudo position + //! used by liveness analysis and other tools around Compiler. + //! + //! If you don't use Compiler then you may use `position()` and `setPosition()` + //! freely for your own purposes if the 32-bit value limit is okay for you. + inline void setPosition(uint32_t position) noexcept { _position = position; } + + //! Returns user data casted to `T*`. + //! + //! User data is decicated to be used only by AsmJit users and not touched + //! by the library. The data has a pointer size so you can either store a + //! pointer or `intptr_t` value through `setUserDataAsIntPtr()`. + template + inline T* userDataAsPtr() const noexcept { return static_cast(_userDataPtr); } + //! Returns user data casted to `int64_t`. + inline int64_t userDataAsInt64() const noexcept { return int64_t(_userDataU64); } + //! Returns user data casted to `uint64_t`. + inline uint64_t userDataAsUInt64() const noexcept { return _userDataU64; } + + //! Sets user data to `data`. + template + inline void setUserDataAsPtr(T* data) noexcept { _userDataPtr = static_cast(data); } + //! Sets used data to the given 64-bit signed `value`. + inline void setUserDataAsInt64(int64_t value) noexcept { _userDataU64 = uint64_t(value); } + //! Sets used data to the given 64-bit unsigned `value`. + inline void setUserDataAsUInt64(uint64_t value) noexcept { _userDataU64 = value; } + + //! Resets user data to zero / nullptr. + inline void resetUserData() noexcept { _userDataU64 = 0; } + + //! Tests whether the node has an associated pass data. + inline bool hasPassData() const noexcept { return _passData != nullptr; } + //! Returns the node pass data - data used during processing & transformations. + template + inline T* passData() const noexcept { return (T*)_passData; } + //! Sets the node pass data to `data`. + template + inline void setPassData(T* data) noexcept { _passData = (void*)data; } + //! Resets the node pass data to nullptr. + inline void resetPassData() noexcept { _passData = nullptr; } + + //! Tests whether the node has an inline comment/annotation. + inline bool hasInlineComment() const noexcept { return _inlineComment != nullptr; } + //! Returns an inline comment/annotation string. + inline const char* inlineComment() const noexcept { return _inlineComment; } + //! Sets an inline comment/annotation string to `s`. + inline void setInlineComment(const char* s) noexcept { _inlineComment = s; } + //! Resets an inline comment/annotation string to nullptr. + inline void resetInlineComment() noexcept { _inlineComment = nullptr; } + + //! \} +}; + +// ============================================================================ +// [asmjit::InstNode] +// ============================================================================ + +//! Instruction node. +//! +//! Wraps an instruction with its options and operands. +class InstNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(InstNode) + + enum : uint32_t { + //! Count of embedded operands per `InstNode` that are always allocated as + //! a part of the instruction. Minimum embedded operands is 4, but in 32-bit + //! more pointers are smaller and we can embed 5. The rest (up to 6 operands) + //! is always stored in `InstExNode`. + kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_)) + }; + + //! Base instruction data. + BaseInst _baseInst; + //! First 4 or 5 operands (indexed from 0). + Operand_ _opArray[kBaseOpCapacity]; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `InstNode` instance. + ASMJIT_INLINE InstNode(BaseBuilder* cb, uint32_t instId, uint32_t options, uint32_t opCount, uint32_t opCapacity = kBaseOpCapacity) noexcept + : BaseNode(cb, kNodeInst, kFlagIsCode | kFlagIsRemovable | kFlagActsAsInst), + _baseInst(instId, options) { + _inst._opCapacity = uint8_t(opCapacity); + _inst._opCount = uint8_t(opCount); + } + + //! Reset all built-in operands, including `extraReg`. + inline void _resetOps() noexcept { + _baseInst.resetExtraReg(); + for (uint32_t i = 0, count = opCapacity(); i < count; i++) + _opArray[i].reset(); + } + + //! \} + + //! \name Accessors + //! \{ + + inline BaseInst& baseInst() noexcept { return _baseInst; } + inline const BaseInst& baseInst() const noexcept { return _baseInst; } + + //! Returns the instruction id, see `BaseInst::Id`. + inline uint32_t id() const noexcept { return _baseInst.id(); } + //! Sets the instruction id to `id`, see `BaseInst::Id`. + inline void setId(uint32_t id) noexcept { _baseInst.setId(id); } + + //! Returns instruction options. + inline uint32_t instOptions() const noexcept { return _baseInst.options(); } + //! Sets instruction options. + inline void setInstOptions(uint32_t options) noexcept { _baseInst.setOptions(options); } + //! Adds instruction options. + inline void addInstOptions(uint32_t options) noexcept { _baseInst.addOptions(options); } + //! Clears instruction options. + inline void clearInstOptions(uint32_t options) noexcept { _baseInst.clearOptions(options); } + + //! Tests whether the node has an extra register operand. + inline bool hasExtraReg() const noexcept { return _baseInst.hasExtraReg(); } + //! Returns extra register operand. + inline RegOnly& extraReg() noexcept { return _baseInst.extraReg(); } + //! \overload + inline const RegOnly& extraReg() const noexcept { return _baseInst.extraReg(); } + //! Sets extra register operand to `reg`. + inline void setExtraReg(const BaseReg& reg) noexcept { _baseInst.setExtraReg(reg); } + //! Sets extra register operand to `reg`. + inline void setExtraReg(const RegOnly& reg) noexcept { _baseInst.setExtraReg(reg); } + //! Resets extra register operand. + inline void resetExtraReg() noexcept { _baseInst.resetExtraReg(); } + + //! Returns operands count. + inline uint32_t opCount() const noexcept { return _inst._opCount; } + //! Returns operands capacity. + inline uint32_t opCapacity() const noexcept { return _inst._opCapacity; } + + //! Sets operands count. + inline void setOpCount(uint32_t opCount) noexcept { _inst._opCount = uint8_t(opCount); } + + //! Returns operands array. + inline Operand* operands() noexcept { return (Operand*)_opArray; } + //! Returns operands array (const). + inline const Operand* operands() const noexcept { return (const Operand*)_opArray; } + + inline Operand& opType(uint32_t index) noexcept { + ASMJIT_ASSERT(index < opCapacity()); + return _opArray[index].as(); + } + + inline const Operand& opType(uint32_t index) const noexcept { + ASMJIT_ASSERT(index < opCapacity()); + return _opArray[index].as(); + } + + inline void setOp(uint32_t index, const Operand_& op) noexcept { + ASMJIT_ASSERT(index < opCapacity()); + _opArray[index].copyFrom(op); + } + + inline void resetOp(uint32_t index) noexcept { + ASMJIT_ASSERT(index < opCapacity()); + _opArray[index].reset(); + } + + //! \} + + //! \name Utilities + //! \{ + + inline bool hasOpType(uint32_t opType) const noexcept { + for (uint32_t i = 0, count = opCount(); i < count; i++) + if (_opArray[i].opType() == opType) + return true; + return false; + } + + inline bool hasRegOp() const noexcept { return hasOpType(Operand::kOpReg); } + inline bool hasMemOp() const noexcept { return hasOpType(Operand::kOpMem); } + inline bool hasImmOp() const noexcept { return hasOpType(Operand::kOpImm); } + inline bool hasLabelOp() const noexcept { return hasOpType(Operand::kOpLabel); } + + inline uint32_t indexOfOpType(uint32_t opType) const noexcept { + uint32_t i = 0; + uint32_t count = opCount(); + + while (i < count) { + if (_opArray[i].opType() == opType) + break; + i++; + } + + return i; + } + + inline uint32_t indexOfMemOp() const noexcept { return indexOfOpType(Operand::kOpMem); } + inline uint32_t indexOfImmOp() const noexcept { return indexOfOpType(Operand::kOpImm); } + inline uint32_t indexOfLabelOp() const noexcept { return indexOfOpType(Operand::kOpLabel); } + + //! \} + + //! \name Rewriting + //! \{ + + inline uint32_t* _getRewriteArray() noexcept { return &_baseInst._extraReg._id; } + inline const uint32_t* _getRewriteArray() const noexcept { return &_baseInst._extraReg._id; } + + ASMJIT_INLINE uint32_t getRewriteIndex(const uint32_t* id) const noexcept { + const uint32_t* array = _getRewriteArray(); + ASMJIT_ASSERT(array <= id); + + size_t index = (size_t)(id - array); + ASMJIT_ASSERT(index < 32); + + return uint32_t(index); + } + + ASMJIT_INLINE void rewriteIdAtIndex(uint32_t index, uint32_t id) noexcept { + uint32_t* array = _getRewriteArray(); + array[index] = id; + } + + //! \} + + //! \name Static Functions + //! \{ + + static inline uint32_t capacityOfOpCount(uint32_t opCount) noexcept { + return opCount <= kBaseOpCapacity ? kBaseOpCapacity : Globals::kMaxOpCount; + } + + static inline size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept { + size_t base = sizeof(InstNode) - kBaseOpCapacity * sizeof(Operand); + return base + opCapacity * sizeof(Operand); + } + + //! \} +}; + +// ============================================================================ +// [asmjit::InstExNode] +// ============================================================================ + +//! Instruction node with maximum number of operands.. +//! +//! This node is created automatically by Builder/Compiler in case that the +//! required number of operands exceeds the default capacity of `InstNode`. +class InstExNode : public InstNode { +public: + ASMJIT_NONCOPYABLE(InstExNode) + + //! Continued `_opArray[]` to hold up to `kMaxOpCount` operands. + Operand_ _opArrayEx[Globals::kMaxOpCount - kBaseOpCapacity]; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `InstExNode` instance. + inline InstExNode(BaseBuilder* cb, uint32_t instId, uint32_t options, uint32_t opCapacity = Globals::kMaxOpCount) noexcept + : InstNode(cb, instId, options, opCapacity) {} + + //! \} +}; + +// ============================================================================ +// [asmjit::SectionNode] +// ============================================================================ + +//! Section node. +class SectionNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(SectionNode) + + //! Section id. + uint32_t _id; + + //! Next section node that follows this section. + //! + //! This link is only valid when the section is active (is part of the code) + //! and when `Builder::hasDirtySectionLinks()` returns `false`. If you intend + //! to use this field you should always call `Builder::updateSectionLinks()` + //! before you do so. + SectionNode* _nextSection; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `SectionNode` instance. + inline SectionNode(BaseBuilder* cb, uint32_t id = 0) noexcept + : BaseNode(cb, kNodeSection, kFlagHasNoEffect), + _id(id), + _nextSection(nullptr) {} + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the section id. + inline uint32_t id() const noexcept { return _id; } + + //! \} +}; + +// ============================================================================ +// [asmjit::LabelNode] +// ============================================================================ + +//! Label node. +class LabelNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(LabelNode) + + uint32_t _id; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `LabelNode` instance. + inline LabelNode(BaseBuilder* cb, uint32_t id = 0) noexcept + : BaseNode(cb, kNodeLabel, kFlagHasNoEffect | kFlagActsAsLabel), + _id(id) {} + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the id of the label. + inline uint32_t id() const noexcept { return _id; } + //! Returns the label as `Label` operand. + inline Label label() const noexcept { return Label(_id); } + + //! \} +}; + +// ============================================================================ +// [asmjit::AlignNode] +// ============================================================================ + +//! Align directive (BaseBuilder). +//! +//! Wraps `.align` directive. +class AlignNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(AlignNode) + + //! Align mode, see `AlignMode`. + uint32_t _alignMode; + //! Alignment (in bytes). + uint32_t _alignment; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `AlignNode` instance. + inline AlignNode(BaseBuilder* cb, uint32_t alignMode, uint32_t alignment) noexcept + : BaseNode(cb, kNodeAlign, kFlagIsCode | kFlagHasNoEffect), + _alignMode(alignMode), + _alignment(alignment) {} + + //! \} + + //! \name Accessors + //! \{ + + //! Returns align mode. + inline uint32_t alignMode() const noexcept { return _alignMode; } + //! Sets align mode to `alignMode`. + inline void setAlignMode(uint32_t alignMode) noexcept { _alignMode = alignMode; } + + //! Returns align offset in bytes. + inline uint32_t alignment() const noexcept { return _alignment; } + //! Sets align offset in bytes to `offset`. + inline void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; } + + //! \} +}; + +// ============================================================================ +// [asmjit::EmbedDataNode] +// ============================================================================ + +//! Embed data node. +//! +//! Wraps `.data` directive. The node contains data that will be placed at the +//! node's position in the assembler stream. The data is considered to be RAW; +//! no analysis nor byte-order conversion is performed on RAW data. +class EmbedDataNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(EmbedDataNode) + + enum : uint32_t { + kInlineBufferSize = uint32_t(64 - sizeof(BaseNode) - 4) + }; + + union { + struct { + //! Embedded data buffer. + uint8_t _buf[kInlineBufferSize]; + //! Size of the data. + uint32_t _size; + }; + struct { + //! Pointer to external data. + uint8_t* _externalPtr; + }; + }; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `EmbedDataNode` instance. + inline EmbedDataNode(BaseBuilder* cb, void* data, uint32_t size) noexcept + : BaseNode(cb, kNodeEmbedData, kFlagIsData) { + + if (size <= kInlineBufferSize) { + if (data) + memcpy(_buf, data, size); + } + else { + _externalPtr = static_cast(data); + } + _size = size; + } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns pointer to the data. + inline uint8_t* data() const noexcept { return _size <= kInlineBufferSize ? const_cast(_buf) : _externalPtr; } + //! Returns size of the data. + inline uint32_t size() const noexcept { return _size; } + + //! \} +}; + +// ============================================================================ +// [asmjit::EmbedLabelNode] +// ============================================================================ + +//! Label data node. +class EmbedLabelNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(EmbedLabelNode) + + uint32_t _id; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `EmbedLabelNode` instance. + inline EmbedLabelNode(BaseBuilder* cb, uint32_t id = 0) noexcept + : BaseNode(cb, kNodeEmbedLabel, kFlagIsData), + _id(id) {} + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the id of the label. + inline uint32_t id() const noexcept { return _id; } + //! Sets the label id (use with caution, improper use can break a lot of things). + inline void setId(uint32_t id) noexcept { _id = id; } + + //! Returns the label as `Label` operand. + inline Label label() const noexcept { return Label(_id); } + //! Sets the label id from `label` operand. + inline void setLabel(const Label& label) noexcept { setId(label.id()); } + + //! \} +}; + +// ============================================================================ +// [asmjit::EmbedLabelDeltaNode] +// ============================================================================ + +//! Label data node. +class EmbedLabelDeltaNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(EmbedLabelDeltaNode) + + uint32_t _id; + uint32_t _baseId; + uint32_t _dataSize; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `EmbedLabelDeltaNode` instance. + inline EmbedLabelDeltaNode(BaseBuilder* cb, uint32_t id = 0, uint32_t baseId = 0, uint32_t dataSize = 0) noexcept + : BaseNode(cb, kNodeEmbedLabelDelta, kFlagIsData), + _id(id), + _baseId(baseId), + _dataSize(dataSize) {} + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the id of the label. + inline uint32_t id() const noexcept { return _id; } + //! Sets the label id. + inline void setId(uint32_t id) noexcept { _id = id; } + //! Returns the label as `Label` operand. + inline Label label() const noexcept { return Label(_id); } + //! Sets the label id from `label` operand. + inline void setLabel(const Label& label) noexcept { setId(label.id()); } + + //! Returns the id of the base label. + inline uint32_t baseId() const noexcept { return _baseId; } + //! Sets the base label id. + inline void setBaseId(uint32_t baseId) noexcept { _baseId = baseId; } + //! Returns the base label as `Label` operand. + inline Label baseLabel() const noexcept { return Label(_baseId); } + //! Sets the base label id from `label` operand. + inline void setBaseLabel(const Label& baseLabel) noexcept { setBaseId(baseLabel.id()); } + + inline uint32_t dataSize() const noexcept { return _dataSize; } + inline void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; } + + //! \} +}; + +// ============================================================================ +// [asmjit::ConstPoolNode] +// ============================================================================ + +//! A node that wraps `ConstPool`. +class ConstPoolNode : public LabelNode { +public: + ASMJIT_NONCOPYABLE(ConstPoolNode) + + ConstPool _constPool; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `ConstPoolNode` instance. + inline ConstPoolNode(BaseBuilder* cb, uint32_t id = 0) noexcept + : LabelNode(cb, id), + _constPool(&cb->_codeZone) { + + setType(kNodeConstPool); + addFlags(kFlagIsData); + clearFlags(kFlagIsCode | kFlagHasNoEffect); + } + + //! \} + + //! \name Accessors + //! \{ + + //! Tests whether the constant-pool is empty. + inline bool empty() const noexcept { return _constPool.empty(); } + //! Returns the size of the constant-pool in bytes. + inline size_t size() const noexcept { return _constPool.size(); } + //! Returns minimum alignment. + inline size_t alignment() const noexcept { return _constPool.alignment(); } + + //! Returns the wrapped `ConstPool` instance. + inline ConstPool& constPool() noexcept { return _constPool; } + //! Returns the wrapped `ConstPool` instance (const). + inline const ConstPool& constPool() const noexcept { return _constPool; } + + //! \} + + //! \name Utilities + //! \{ + + //! See `ConstPool::add()`. + inline Error add(const void* data, size_t size, size_t& dstOffset) noexcept { + return _constPool.add(data, size, dstOffset); + } + + //! \} +}; + +// ============================================================================ +// [asmjit::CommentNode] +// ============================================================================ + +//! Comment node. +class CommentNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(CommentNode) + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `CommentNode` instance. + inline CommentNode(BaseBuilder* cb, const char* comment) noexcept + : BaseNode(cb, kNodeComment, kFlagIsInformative | kFlagHasNoEffect | kFlagIsRemovable) { + _inlineComment = comment; + } + + //! \} +}; + +// ============================================================================ +// [asmjit::SentinelNode] +// ============================================================================ + +//! Sentinel node. +//! +//! Sentinel is a marker that is completely ignored by the code builder. It's +//! used to remember a position in a code as it never gets removed by any pass. +class SentinelNode : public BaseNode { +public: + ASMJIT_NONCOPYABLE(SentinelNode) + + //! Type of the sentinel (purery informative purpose). + enum SentinelType : uint32_t { + kSentinelUnknown = 0u, + kSentinelFuncEnd = 1u + }; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `SentinelNode` instance. + inline SentinelNode(BaseBuilder* cb, uint32_t sentinelType = kSentinelUnknown) noexcept + : BaseNode(cb, kNodeSentinel, kFlagIsInformative | kFlagHasNoEffect) { + + _sentinel._sentinelType = uint8_t(sentinelType); + } + + //! \} + + //! \name Accessors + //! \{ + + inline uint32_t sentinelType() const noexcept { return _sentinel._sentinelType; } + inline void setSentinelType(uint32_t type) noexcept { _sentinel._sentinelType = uint8_t(type); } + + //! \} +}; + +// ============================================================================ +// [asmjit::Pass] +// ============================================================================ + +//! Pass can be used to implement code transformations, analysis, and lowering. +class ASMJIT_VIRTAPI Pass { +public: + ASMJIT_BASE_CLASS(Pass) + ASMJIT_NONCOPYABLE(Pass) + + //! BaseBuilder this pass is assigned to. + BaseBuilder* _cb; + //! Name of the pass. + const char* _name; + + //! \name Construction & Destruction + //! \{ + + ASMJIT_API Pass(const char* name) noexcept; + ASMJIT_API virtual ~Pass() noexcept; + + //! \} + + //! \name Accessors + //! \{ + + inline const BaseBuilder* cb() const noexcept { return _cb; } + inline const char* name() const noexcept { return _name; } + + //! \} + + //! \name Pass Interface + //! \{ + + //! Processes the code stored in Builder or Compiler. + //! + //! This is the only function that is called by the `BaseBuilder` to process + //! the code. It passes `zone`, which will be reset after the `run()` finishes. + virtual Error run(Zone* zone, Logger* logger) noexcept = 0; + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // !ASMJIT_NO_BUILDER +#endif // _ASMJIT_CORE_BUILDER_H diff --git a/src/asmjit/core/callconv.cpp b/src/asmjit/core/callconv.cpp new file mode 100644 index 0000000..b9d6d65 --- /dev/null +++ b/src/asmjit/core/callconv.cpp @@ -0,0 +1,43 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/arch.h" +#include "../core/func.h" +#include "../core/type.h" + +#ifdef ASMJIT_BUILD_X86 + #include "../x86/x86callconv_p.h" +#endif + +#ifdef ASMJIT_BUILD_ARM + #include "../arm/armcallconv_p.h" +#endif + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::CallConv - Init / Reset] +// ============================================================================ + +ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId) noexcept { + reset(); + + #ifdef ASMJIT_BUILD_X86 + if (CallConv::isX86Family(ccId)) + return x86::CallConvInternal::init(*this, ccId); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (CallConv::isArmFamily(ccId)) + return arm::CallConvInternal::init(*this, ccId); + #endif + + return DebugUtils::errored(kErrorInvalidArgument); +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/callconv.h b/src/asmjit/core/callconv.h new file mode 100644 index 0000000..723f812 --- /dev/null +++ b/src/asmjit/core/callconv.h @@ -0,0 +1,394 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_CALLCONV_H +#define _ASMJIT_CORE_CALLCONV_H + +#include "../core/arch.h" +#include "../core/operand.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_func +//! \{ + +// ============================================================================ +// [asmjit::CallConv] +// ============================================================================ + +//! Function calling convention. +//! +//! Function calling convention is a scheme that defines how function parameters +//! are passed and how function returns its result. AsmJit defines a variety of +//! architecture and OS specific calling conventions and also provides a compile +//! time detection to make the code-generation easier. +struct CallConv { + //! Calling convention id, see `Id`. + uint8_t _id; + //! Architecture id (see `ArchInfo::Id`). + uint8_t _archId; + //! Register assignment strategy. + uint8_t _strategy; + //! Flags. + uint8_t _flags; + + //! Red zone size (AMD64 == 128 bytes). + uint8_t _redZoneSize; + //! Spill zone size (WIN64 == 32 bytes). + uint8_t _spillZoneSize; + //! Natural stack alignment as defined by OS/ABI. + uint8_t _naturalStackAlignment; + uint8_t _reserved[1]; + + //! Mask of all passed registers, per group. + uint32_t _passedRegs[BaseReg::kGroupVirt]; + //! Mask of all preserved registers, per group. + uint32_t _preservedRegs[BaseReg::kGroupVirt]; + + //! Internal limits of AsmJit's CallConv. + enum Limits : uint32_t { + kMaxRegArgsPerGroup = 16 + }; + + //! Passed registers' order. + union RegOrder { + //! Passed registers, ordered. + uint8_t id[kMaxRegArgsPerGroup]; + uint32_t packed[(kMaxRegArgsPerGroup + 3) / 4]; + }; + + //! Passed registers' order, per group. + RegOrder _passedOrder[BaseReg::kGroupVirt]; + + //! Calling convention id. + enum Id : uint32_t { + //! None or invalid (can't be used). + kIdNone = 0, + + // ------------------------------------------------------------------------ + // [Universal] + // ------------------------------------------------------------------------ + + // TODO: To make this possible we need to know target ARCH and ABI. + + /* + + // Universal calling conventions are applicable to any target and are + // converted to target dependent conventions at runtime. The purpose of + // these conventions is to make using functions less target dependent. + + kIdCDecl = 1, + kIdStdCall = 2, + kIdFastCall = 3, + + //! AsmJit specific calling convention designed for calling functions + //! inside a multimedia code that don't use many registers internally, + //! but are long enough to be called and not inlined. These functions are + //! usually used to calculate trigonometric functions, logarithms, etc... + kIdLightCall2 = 10, + kIdLightCall3 = 11, + kIdLightCall4 = 12, + */ + + // ------------------------------------------------------------------------ + // [X86] + // ------------------------------------------------------------------------ + + //! X86 `__cdecl` calling convention (used by C runtime and libraries). + kIdX86CDecl = 16, + //! X86 `__stdcall` calling convention (used mostly by WinAPI). + kIdX86StdCall = 17, + //! X86 `__thiscall` calling convention (MSVC/Intel). + kIdX86MsThisCall = 18, + //! X86 `__fastcall` convention (MSVC/Intel). + kIdX86MsFastCall = 19, + //! X86 `__fastcall` convention (GCC and Clang). + kIdX86GccFastCall = 20, + //! X86 `regparm(1)` convention (GCC and Clang). + kIdX86GccRegParm1 = 21, + //! X86 `regparm(2)` convention (GCC and Clang). + kIdX86GccRegParm2 = 22, + //! X86 `regparm(3)` convention (GCC and Clang). + kIdX86GccRegParm3 = 23, + + kIdX86LightCall2 = 29, + kIdX86LightCall3 = 30, + kIdX86LightCall4 = 31, + + //! X64 calling convention - WIN64-ABI. + kIdX86Win64 = 32, + //! X64 calling convention - SystemV / AMD64-ABI. + kIdX86SysV64 = 33, + + kIdX64LightCall2 = 45, + kIdX64LightCall3 = 46, + kIdX64LightCall4 = 47, + + // ------------------------------------------------------------------------ + // [ARM] + // ------------------------------------------------------------------------ + + //! Legacy calling convention, floating point arguments are passed via GP registers. + kIdArm32SoftFP = 48, + //! Modern calling convention, uses VFP registers to pass floating point arguments. + kIdArm32HardFP = 49, + + // ------------------------------------------------------------------------ + // [Internal] + // ------------------------------------------------------------------------ + + //! \cond INTERNAL + + _kIdX86Start = 16, + _kIdX86End = 31, + + _kIdX64Start = 32, + _kIdX64End = 47, + + _kIdArmStart = 48, + _kIdArmEnd = 49, + + //! \endcond + + // ------------------------------------------------------------------------ + // [Host] + // ------------------------------------------------------------------------ + + #if defined(ASMJIT_DOCGEN) + + //! Default calling convention based on the current C++ compiler's settings. + //! + //! \note This should be always the same as `kIdHostCDecl`, but some + //! compilers allow to override the default calling convention. Overriding + //! is not detected at the moment. + kIdHost = DETECTED_AT_COMPILE_TIME, + + //! Default CDECL calling convention based on the current C++ compiler's settings. + kIdHostCDecl = DETECTED_AT_COMPILE_TIME, + + //! Default STDCALL calling convention based on the current C++ compiler's settings. + //! + //! \note If not defined by the host then it's the same as `kIdHostCDecl`. + kIdHostStdCall = DETECTED_AT_COMPILE_TIME, + + //! Compatibility for `__fastcall` calling convention. + //! + //! \note If not defined by the host then it's the same as `kIdHostCDecl`. + kIdHostFastCall = DETECTED_AT_COMPILE_TIME + + #elif ASMJIT_ARCH_X86 == 32 + + kIdHost = kIdX86CDecl, + kIdHostCDecl = kIdX86CDecl, + kIdHostStdCall = kIdX86StdCall, + + #if defined(_MSC_VER) + kIdHostFastCall = kIdX86MsFastCall, + #elif defined(__GNUC__) + kIdHostFastCall = kIdX86GccFastCall, + #else + kIdHostFastCall = kIdHost, + #endif + + kIdHostLightCall2 = kIdX86LightCall2, + kIdHostLightCall3 = kIdX86LightCall3, + kIdHostLightCall4 = kIdX86LightCall4 + + #elif ASMJIT_ARCH_X86 == 64 + + #if defined(_WIN32) + kIdHost = kIdX86Win64, + #else + kIdHost = kIdX86SysV64, + #endif + + kIdHostCDecl = kIdHost, // Doesn't exist, redirected to host. + kIdHostStdCall = kIdHost, // Doesn't exist, redirected to host. + kIdHostFastCall = kIdHost, // Doesn't exist, redirected to host. + + kIdHostLightCall2 = kIdX64LightCall2, + kIdHostLightCall3 = kIdX64LightCall3, + kIdHostLightCall4 = kIdX64LightCall4 + + #elif ASMJIT_ARCH_ARM == 32 + + #if defined(__SOFTFP__) + kIdHost = kIdArm32SoftFP, + #else + kIdHost = kIdArm32HardFP, + #endif + // These don't exist on ARM. + kIdHostCDecl = kIdHost, // Doesn't exist, redirected to host. + kIdHostStdCall = kIdHost, // Doesn't exist, redirected to host. + kIdHostFastCall = kIdHost // Doesn't exist, redirected to host. + + #else + + kIdHost = kIdNone, + kIdHostCDecl = kIdHost, + kIdHostStdCall = kIdHost, + kIdHostFastCall = kIdHost + + #endif + }; + + //! Strategy used to assign registers to function arguments. + //! + //! This is AsmJit specific. It basically describes how AsmJit should convert + //! the function arguments defined by `FuncSignature` into register IDs and + //! stack offsets. The default strategy `kStrategyDefault` assigns registers + //! and then stack whereas `kStrategyWin64` strategy does register shadowing + //! as defined by WIN64 calling convention - it applies to 64-bit calling + //! conventions only. + enum Strategy : uint32_t { + kStrategyDefault = 0, //!< Default register assignment strategy. + kStrategyWin64 = 1 //!< WIN64 specific register assignment strategy. + }; + + //! Calling convention flags. + enum Flags : uint32_t { + kFlagCalleePopsStack = 0x01, //!< Callee is responsible for cleaning up the stack. + kFlagPassFloatsByVec = 0x02, //!< Pass F32 and F64 arguments by VEC128 register. + kFlagVectorCall = 0x04, //!< This is a '__vectorcall' calling convention. + kFlagIndirectVecArgs = 0x08 //!< Pass vector arguments indirectly (as a pointer). + }; + + //! \name Construction & Destruction + //! \{ + + ASMJIT_API Error init(uint32_t ccId) noexcept; + + inline void reset() noexcept { + memset(this, 0, sizeof(*this)); + memset(_passedOrder, 0xFF, sizeof(_passedOrder)); + } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the calling convention id, see `Id`. + inline uint32_t id() const noexcept { return _id; } + //! Sets the calling convention id, see `Id`. + inline void setId(uint32_t id) noexcept { _id = uint8_t(id); } + + //! Returns the calling function architecture id. + inline uint32_t archId() const noexcept { return _archId; } + //! Sets the calling function architecture id. + inline void setArchType(uint32_t archId) noexcept { _archId = uint8_t(archId); } + + //! Returns the strategy used to assign registers to arguments, see `Strategy`. + inline uint32_t strategy() const noexcept { return _strategy; } + //! Sets the strategy used to assign registers to arguments, see `Strategy`. + inline void setStrategy(uint32_t strategy) noexcept { _strategy = uint8_t(strategy); } + + //! Tests whether the calling convention has the given `flag` set. + inline bool hasFlag(uint32_t flag) const noexcept { return (uint32_t(_flags) & flag) != 0; } + //! Returns the calling convention flags, see `Flags`. + inline uint32_t flags() const noexcept { return _flags; } + //! Adds the calling convention flags, see `Flags`. + inline void setFlags(uint32_t flag) noexcept { _flags = uint8_t(flag); }; + //! Adds the calling convention flags, see `Flags`. + inline void addFlags(uint32_t flags) noexcept { _flags = uint8_t(_flags | flags); }; + + //! Tests whether this calling convention specifies 'RedZone'. + inline bool hasRedZone() const noexcept { return _redZoneSize != 0; } + //! Tests whether this calling convention specifies 'SpillZone'. + inline bool hasSpillZone() const noexcept { return _spillZoneSize != 0; } + + //! Returns size of 'RedZone'. + inline uint32_t redZoneSize() const noexcept { return _redZoneSize; } + //! Returns size of 'SpillZone'. + inline uint32_t spillZoneSize() const noexcept { return _spillZoneSize; } + + //! Sets size of 'RedZone'. + inline void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = uint8_t(size); } + //! Sets size of 'SpillZone'. + inline void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = uint8_t(size); } + + //! Returns a natural stack alignment. + inline uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; } + //! Sets a natural stack alignment. + //! + //! This function can be used to override the default stack alignment in case + //! that you know that it's alignment is different. For example it allows to + //! implement custom calling conventions that guarantee higher stack alignment. + inline void setNaturalStackAlignment(uint32_t value) noexcept { _naturalStackAlignment = uint8_t(value); } + + inline const uint8_t* passedOrder(uint32_t group) const noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + return _passedOrder[group].id; + } + + inline uint32_t passedRegs(uint32_t group) const noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + return _passedRegs[group]; + } + + inline void _setPassedPacked(uint32_t group, uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + + _passedOrder[group].packed[0] = p0; + _passedOrder[group].packed[1] = p1; + _passedOrder[group].packed[2] = p2; + _passedOrder[group].packed[3] = p3; + } + + inline void setPassedToNone(uint32_t group) noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + + _setPassedPacked(group, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu); + _passedRegs[group] = 0u; + } + + inline void setPassedOrder(uint32_t group, uint32_t a0, uint32_t a1 = 0xFF, uint32_t a2 = 0xFF, uint32_t a3 = 0xFF, uint32_t a4 = 0xFF, uint32_t a5 = 0xFF, uint32_t a6 = 0xFF, uint32_t a7 = 0xFF) noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + + // NOTE: This should always be called with all arguments known at compile time, + // so even if it looks scary it should be translated into few instructions. + _setPassedPacked(group, Support::bytepack32_4x8(a0, a1, a2, a3), + Support::bytepack32_4x8(a4, a5, a6, a7), + 0xFFFFFFFFu, + 0xFFFFFFFFu); + + _passedRegs[group] = (a0 != 0xFF ? 1u << a0 : 0u) | + (a1 != 0xFF ? 1u << a1 : 0u) | + (a2 != 0xFF ? 1u << a2 : 0u) | + (a3 != 0xFF ? 1u << a3 : 0u) | + (a4 != 0xFF ? 1u << a4 : 0u) | + (a5 != 0xFF ? 1u << a5 : 0u) | + (a6 != 0xFF ? 1u << a6 : 0u) | + (a7 != 0xFF ? 1u << a7 : 0u) ; + } + + inline uint32_t preservedRegs(uint32_t group) const noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + return _preservedRegs[group]; + } + + inline void setPreservedRegs(uint32_t group, uint32_t regs) noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + _preservedRegs[group] = regs; + } + + //! \} + + //! \name Static Functions + //! \{ + + static inline bool isX86Family(uint32_t ccId) noexcept { return ccId >= _kIdX86Start && ccId <= _kIdX64End; } + static inline bool isArmFamily(uint32_t ccId) noexcept { return ccId >= _kIdArmStart && ccId <= _kIdArmEnd; } + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_CALLCONV_H diff --git a/src/asmjit/core/codebufferwriter_p.h b/src/asmjit/core/codebufferwriter_p.h new file mode 100644 index 0000000..d6f44fc --- /dev/null +++ b/src/asmjit/core/codebufferwriter_p.h @@ -0,0 +1,171 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_CODEBUFFERWRITER_P_H +#define _ASMJIT_CORE_CODEBUFFERWRITER_P_H + +#include "../core/assembler.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \cond INTERNAL +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [asmjit::CodeBufferWriter] +// ============================================================================ + +//! Helper that is used to write into a `CodeBuffer` held by `BaseAssembler`. +class CodeBufferWriter { +public: + uint8_t* _cursor; + + ASMJIT_INLINE explicit CodeBufferWriter(BaseAssembler* a) noexcept + : _cursor(a->_bufferPtr) {} + + ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept { + size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor); + if (ASMJIT_UNLIKELY(remainingSpace < n)) { + CodeBuffer& buffer = a->_section->_buffer; + Error err = a->_code->growBuffer(&buffer, n); + if (ASMJIT_UNLIKELY(err)) + return a->reportError(err); + _cursor = a->_bufferPtr; + } + return kErrorOk; + } + + ASMJIT_INLINE uint8_t* cursor() const noexcept { return _cursor; } + ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; } + ASMJIT_INLINE void advance(size_t n) noexcept { _cursor += n; } + + ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept { + ASMJIT_ASSERT(_cursor >= from); + return (size_t)(_cursor - from); + } + + template + ASMJIT_INLINE void emit8(T val) noexcept { + typedef typename std::make_unsigned::type U; + _cursor[0] = uint8_t(U(val) & U(0xFF)); + _cursor++; + } + + template + ASMJIT_INLINE void emit8If(T val, Y cond) noexcept { + typedef typename std::make_unsigned::type U; + ASMJIT_ASSERT(size_t(cond) <= 1u); + + _cursor[0] = uint8_t(U(val) & U(0xFF)); + _cursor += size_t(cond); + } + + template + ASMJIT_INLINE void emit16uLE(T val) noexcept { + typedef typename std::make_unsigned::type U; + Support::writeU16uLE(_cursor, uint32_t(U(val) & 0xFFFFu)); + _cursor += 2; + } + + template + ASMJIT_INLINE void emit16uBE(T val) noexcept { + typedef typename std::make_unsigned::type U; + Support::writeU16uBE(_cursor, uint32_t(U(val) & 0xFFFFu)); + _cursor += 2; + } + + template + ASMJIT_INLINE void emit32uLE(T val) noexcept { + typedef typename std::make_unsigned::type U; + Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu)); + _cursor += 4; + } + + template + ASMJIT_INLINE void emit32uBE(T val) noexcept { + typedef typename std::make_unsigned::type U; + Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu)); + _cursor += 4; + } + + ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept { + ASMJIT_ASSERT(size != 0); + memcpy(_cursor, data, size); + _cursor += size; + } + + template + ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept { + typedef typename std::make_unsigned::type U; + ASMJIT_ASSERT(size <= sizeof(T)); + + U v = U(value); + for (uint32_t i = 0; i < size; i++) { + _cursor[i] = uint8_t(v & 0xFFu); + v >>= 8; + } + _cursor += size; + } + + template + ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept { + typedef typename std::make_unsigned::type U; + ASMJIT_ASSERT(size <= sizeof(T)); + + U v = U(value); + for (uint32_t i = 0; i < size; i++) { + _cursor[i] = uint8_t(v >> (sizeof(T) - 8)); + v <<= 8; + } + _cursor += size; + } + + ASMJIT_INLINE void emitZeros(size_t size) noexcept { + ASMJIT_ASSERT(size != 0); + memset(_cursor, 0, size); + _cursor += size; + } + + ASMJIT_INLINE void remove8(uint8_t* where) noexcept { + ASMJIT_ASSERT(where < _cursor); + + uint8_t* p = where; + while (++p != _cursor) + p[-1] = p[0]; + _cursor--; + } + + template + ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept { + uint8_t* p = _cursor; + + while (p != where) { + p[0] = p[-1]; + p--; + } + + *p = uint8_t(val & 0xFF); + _cursor++; + } + + ASMJIT_INLINE void done(BaseAssembler* a) noexcept { + CodeBuffer& buffer = a->_section->_buffer; + size_t newSize = (size_t)(_cursor - a->_bufferData); + ASMJIT_ASSERT(newSize <= buffer.capacity()); + + a->_bufferPtr = _cursor; + buffer._size = Support::max(buffer._size, newSize); + } +}; + +//! \} +//! \endcond + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_CODEBUFFERWRITER_P_H diff --git a/src/asmjit/core/codeholder.cpp b/src/asmjit/core/codeholder.cpp new file mode 100644 index 0000000..4f98d88 --- /dev/null +++ b/src/asmjit/core/codeholder.cpp @@ -0,0 +1,1109 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/assembler.h" +#include "../core/logging.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [Globals] +// ============================================================================ + +static const char CodeHolder_addrTabName[] = ".addrtab"; + +//! Encode MOD byte. +static inline uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept { + return (m << 6) | (o << 3) | rm; +} + +// ============================================================================ +// [asmjit::LabelLinkIterator] +// ============================================================================ + +class LabelLinkIterator { +public: + ASMJIT_INLINE LabelLinkIterator(LabelEntry* le) noexcept { reset(le); } + + ASMJIT_INLINE explicit operator bool() const noexcept { return isValid(); } + ASMJIT_INLINE bool isValid() const noexcept { return _link != nullptr; } + + ASMJIT_INLINE LabelLink* link() const noexcept { return _link; } + ASMJIT_INLINE LabelLink* operator->() const noexcept { return _link; } + + ASMJIT_INLINE void reset(LabelEntry* le) noexcept { + _pPrev = &le->_links; + _link = *_pPrev; + } + + ASMJIT_INLINE void next() noexcept { + _pPrev = &_link->next; + _link = *_pPrev; + } + + ASMJIT_INLINE void resolveAndNext(CodeHolder* code) noexcept { + LabelLink* linkToDelete = _link; + + _link = _link->next; + *_pPrev = _link; + + code->_unresolvedLinkCount--; + code->_allocator.release(linkToDelete, sizeof(LabelLink)); + } + + LabelLink** _pPrev; + LabelLink* _link; +}; + +// ============================================================================ +// [asmjit::ErrorHandler] +// ============================================================================ + +ErrorHandler::ErrorHandler() noexcept {} +ErrorHandler::~ErrorHandler() noexcept {} + +// ============================================================================ +// [asmjit::CodeHolder - Utilities] +// ============================================================================ + +static void CodeHolder_resetInternal(CodeHolder* self, uint32_t resetPolicy) noexcept { + uint32_t i; + const ZoneVector& emitters = self->emitters(); + + i = emitters.size(); + while (i) + self->detach(emitters[--i]); + + // Reset everything into its construction state. + self->_codeInfo.reset(); + self->_emitterOptions = 0; + self->_logger = nullptr; + self->_errorHandler = nullptr; + + // Reset all sections. + uint32_t numSections = self->_sections.size(); + for (i = 0; i < numSections; i++) { + Section* section = self->_sections[i]; + if (section->_buffer.data() && !section->_buffer.isExternal()) + ::free(section->_buffer._data); + section->_buffer._data = nullptr; + section->_buffer._capacity = 0; + } + + // Reset zone allocator and all containers using it. + ZoneAllocator* allocator = self->allocator(); + + self->_emitters.reset(); + self->_namedLabels.reset(); + self->_relocations.reset(); + self->_labelEntries.reset(); + self->_sections.reset(); + + self->_unresolvedLinkCount = 0; + self->_addressTableSection = nullptr; + self->_addressTableEntries.reset(); + + allocator->reset(&self->_zone); + self->_zone.reset(resetPolicy); +} + +static void CodeHolder_modifyEmitterOptions(CodeHolder* self, uint32_t clear, uint32_t add) noexcept { + uint32_t oldOpt = self->_emitterOptions; + uint32_t newOpt = (oldOpt & ~clear) | add; + + if (oldOpt == newOpt) + return; + + // Modify emitter options of `CodeHolder` itself. + self->_emitterOptions = newOpt; + + // Modify emitter options of all attached emitters. + for (BaseEmitter* emitter : self->emitters()) { + emitter->_emitterOptions = (emitter->_emitterOptions & ~clear) | add; + emitter->onUpdateGlobalInstOptions(); + } +} + +// ============================================================================ +// [asmjit::CodeHolder - Construction / Destruction] +// ============================================================================ + +CodeHolder::CodeHolder() noexcept + : _codeInfo(), + _emitterOptions(0), + _logger(nullptr), + _errorHandler(nullptr), + _zone(16384 - Zone::kBlockOverhead), + _allocator(&_zone), + _unresolvedLinkCount(0), + _addressTableSection(nullptr) {} + +CodeHolder::~CodeHolder() noexcept { + CodeHolder_resetInternal(this, Globals::kResetHard); +} + +// ============================================================================ +// [asmjit::CodeHolder - Init / Reset] +// ============================================================================ + +inline void CodeHolder_setSectionDefaultName( + Section* section, + char c0 = 0, char c1 = 0, char c2 = 0, char c3 = 0, + char c4 = 0, char c5 = 0, char c6 = 0, char c7 = 0) noexcept { + + section->_name.u32[0] = Support::bytepack32_4x8(uint8_t(c0), uint8_t(c1), uint8_t(c2), uint8_t(c3)); + section->_name.u32[1] = Support::bytepack32_4x8(uint8_t(c4), uint8_t(c5), uint8_t(c6), uint8_t(c7)); +} + +Error CodeHolder::init(const CodeInfo& info) noexcept { + // Cannot reinitialize if it's locked or there is one or more emitter attached. + if (isInitialized()) + return DebugUtils::errored(kErrorAlreadyInitialized); + + // If we are just initializing there should be no emitters attached. + ASMJIT_ASSERT(_emitters.empty()); + + // Create the default section and insert it to the `_sections` array. + Error err = _sections.willGrow(&_allocator); + if (err == kErrorOk) { + Section* section = _allocator.allocZeroedT
(); + if (ASMJIT_LIKELY(section)) { + section->_flags = Section::kFlagExec | Section::kFlagConst; + CodeHolder_setSectionDefaultName(section, '.', 't', 'e', 'x', 't'); + _sections.appendUnsafe(section); + } + else { + err = DebugUtils::errored(kErrorOutOfMemory); + } + } + + if (ASMJIT_UNLIKELY(err)) { + _zone.reset(); + return err; + } + else { + _codeInfo = info; + return kErrorOk; + } +} + +void CodeHolder::reset(uint32_t resetPolicy) noexcept { + CodeHolder_resetInternal(this, resetPolicy); +} + +// ============================================================================ +// [asmjit::CodeHolder - Attach / Detach] +// ============================================================================ + +Error CodeHolder::attach(BaseEmitter* emitter) noexcept { + // Catch a possible misuse of the API. + if (ASMJIT_UNLIKELY(!emitter)) + return DebugUtils::errored(kErrorInvalidArgument); + + // Invalid emitter, this should not be possible. + uint32_t type = emitter->emitterType(); + if (ASMJIT_UNLIKELY(type == BaseEmitter::kTypeNone || type >= BaseEmitter::kTypeCount)) + return DebugUtils::errored(kErrorInvalidState); + + // This is suspicious, but don't fail if `emitter` is already attached + // to this code holder. This is not error, but it's not recommended. + if (emitter->_code != nullptr) { + if (emitter->_code == this) + return kErrorOk; + return DebugUtils::errored(kErrorInvalidState); + } + + // Reserve the space now as we cannot fail after `onAttach()` succeeded. + ASMJIT_PROPAGATE(_emitters.willGrow(&_allocator, 1)); + ASMJIT_PROPAGATE(emitter->onAttach(this)); + + // Connect CodeHolder <-> BaseEmitter. + ASMJIT_ASSERT(emitter->_code == this); + _emitters.appendUnsafe(emitter); + + return kErrorOk; +} + +Error CodeHolder::detach(BaseEmitter* emitter) noexcept { + if (ASMJIT_UNLIKELY(!emitter)) + return DebugUtils::errored(kErrorInvalidArgument); + + if (ASMJIT_UNLIKELY(emitter->_code != this)) + return DebugUtils::errored(kErrorInvalidState); + + // NOTE: We always detach if we were asked to, if error happens during + // `emitter->onDetach()` we just propagate it, but the BaseEmitter will + // be detached. + Error err = kErrorOk; + if (!emitter->isDestroyed()) + err = emitter->onDetach(this); + + // Disconnect CodeHolder <-> BaseEmitter. + uint32_t index = _emitters.indexOf(emitter); + ASMJIT_ASSERT(index != Globals::kNotFound); + + _emitters.removeAt(index); + emitter->_code = nullptr; + + return err; +} + +// ============================================================================ +// [asmjit::CodeHolder - Emitter Options] +// ============================================================================ + +static constexpr uint32_t kEmitterOptionsFilter = ~uint32_t(BaseEmitter::kOptionLoggingEnabled); + +void CodeHolder::addEmitterOptions(uint32_t options) noexcept { + CodeHolder_modifyEmitterOptions(this, 0, options & kEmitterOptionsFilter); +} + +void CodeHolder::clearEmitterOptions(uint32_t options) noexcept { + CodeHolder_modifyEmitterOptions(this, options & kEmitterOptionsFilter, 0); +} + +// ============================================================================ +// [asmjit::CodeHolder - Logging & Error Handling] +// ============================================================================ + +void CodeHolder::setLogger(Logger* logger) noexcept { + #ifndef ASMJIT_NO_LOGGING + _logger = logger; + uint32_t option = !logger ? uint32_t(0) : uint32_t(BaseEmitter::kOptionLoggingEnabled); + CodeHolder_modifyEmitterOptions(this, BaseEmitter::kOptionLoggingEnabled, option); + #else + ASMJIT_UNUSED(logger); + #endif +} + +// ============================================================================ +// [asmjit::CodeHolder - Code Buffer] +// ============================================================================ + +static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept { + uint8_t* oldData = cb->_data; + uint8_t* newData; + + if (oldData && !cb->isExternal()) + newData = static_cast(::realloc(oldData, n)); + else + newData = static_cast(::malloc(n)); + + if (ASMJIT_UNLIKELY(!newData)) + return DebugUtils::errored(kErrorOutOfMemory); + + cb->_data = newData; + cb->_capacity = n; + + // Update pointers used by assemblers, if attached. + for (BaseEmitter* emitter : self->emitters()) { + if (emitter->isAssembler()) { + BaseAssembler* a = static_cast(emitter); + if (&a->_section->_buffer == cb) { + size_t offset = a->offset(); + + a->_bufferData = newData; + a->_bufferEnd = newData + n; + a->_bufferPtr = newData + offset; + } + } + } + + return kErrorOk; +} + +Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept { + // The size of the section must be valid. + size_t size = cb->size(); + if (ASMJIT_UNLIKELY(n > std::numeric_limits::max() - size)) + return DebugUtils::errored(kErrorOutOfMemory); + + // We can now check if growing the buffer is really necessary. It's unlikely + // that this function is called while there is still room for `n` bytes. + size_t capacity = cb->capacity(); + size_t required = cb->size() + n; + if (ASMJIT_UNLIKELY(required <= capacity)) + return kErrorOk; + + if (cb->isFixed()) + return DebugUtils::errored(kErrorTooLarge); + + size_t kInitialCapacity = 8096; + if (capacity < kInitialCapacity) + capacity = kInitialCapacity; + else + capacity += Globals::kAllocOverhead; + + do { + size_t old = capacity; + if (capacity < Globals::kGrowThreshold) + capacity *= 2; + else + capacity += Globals::kGrowThreshold; + + // Overflow. + if (ASMJIT_UNLIKELY(old > capacity)) + return DebugUtils::errored(kErrorOutOfMemory); + } while (capacity - Globals::kAllocOverhead < required); + + return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead); +} + +Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept { + size_t capacity = cb->capacity(); + if (n <= capacity) return kErrorOk; + + if (cb->isFixed()) + return DebugUtils::errored(kErrorTooLarge); + + return CodeHolder_reserveInternal(this, cb, n); +} + +// ============================================================================ +// [asmjit::CodeHolder - Sections] +// ============================================================================ + +Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, uint32_t flags, uint32_t alignment) noexcept { + *sectionOut = nullptr; + + if (nameSize == SIZE_MAX) + nameSize = strlen(name); + + if (alignment == 0) + alignment = 1; + + if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment))) + return DebugUtils::errored(kErrorInvalidArgument); + + if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize)) + return DebugUtils::errored(kErrorInvalidSectionName); + + uint32_t sectionId = _sections.size(); + if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId)) + return DebugUtils::errored(kErrorTooManySections); + + ASMJIT_PROPAGATE(_sections.willGrow(&_allocator)); + Section* section = _allocator.allocZeroedT
(); + + if (ASMJIT_UNLIKELY(!section)) + return DebugUtils::errored(kErrorOutOfMemory); + + section->_id = sectionId; + section->_flags = flags; + section->_alignment = alignment; + memcpy(section->_name.str, name, nameSize); + _sections.appendUnsafe(section); + + *sectionOut = section; + return kErrorOk; +} + +Section* CodeHolder::sectionByName(const char* name, size_t nameSize) const noexcept { + if (nameSize == SIZE_MAX) + nameSize = strlen(name); + + // This could be also put in a hash-table similarly like we do with labels, + // however it's questionable as the number of sections should be pretty low + // in general. Create an issue if this becomes a problem. + if (ASMJIT_UNLIKELY(nameSize <= Globals::kMaxSectionNameSize)) { + for (Section* section : _sections) + if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0') + return section; + } + + return nullptr; +} + +Section* CodeHolder::ensureAddressTableSection() noexcept { + if (_addressTableSection) + return _addressTableSection; + + newSection(&_addressTableSection, CodeHolder_addrTabName, sizeof(CodeHolder_addrTabName) - 1, 0, _codeInfo.gpSize()); + return _addressTableSection; +} + +Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept { + AddressTableEntry* entry = _addressTableEntries.get(address); + if (entry) + return kErrorOk; + + Section* section = ensureAddressTableSection(); + if (ASMJIT_UNLIKELY(!section)) + return DebugUtils::errored(kErrorOutOfMemory); + + entry = _zone.newT(address); + if (ASMJIT_UNLIKELY(!entry)) + return DebugUtils::errored(kErrorOutOfMemory); + + _addressTableEntries.insert(entry); + section->_virtualSize += _codeInfo.gpSize(); + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::CodeHolder - Labels / Symbols] +// ============================================================================ + +//! Only used to lookup a label from `_namedLabels`. +class LabelByName { +public: + inline LabelByName(const char* key, size_t keySize, uint32_t hashCode) noexcept + : _key(key), + _keySize(uint32_t(keySize)), + _hashCode(hashCode) {} + + inline uint32_t hashCode() const noexcept { return _hashCode; } + + inline bool matches(const LabelEntry* entry) const noexcept { + return entry->nameSize() == _keySize && ::memcmp(entry->name(), _key, _keySize) == 0; + } + + const char* _key; + uint32_t _keySize; + uint32_t _hashCode; +}; + +// Returns a hash of `name` and fixes `nameSize` if it's `SIZE_MAX`. +static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize) noexcept { + uint32_t hashCode = 0; + if (nameSize == SIZE_MAX) { + size_t i = 0; + for (;;) { + uint8_t c = uint8_t(name[i]); + if (!c) break; + hashCode = Support::hashRound(hashCode, c); + i++; + } + nameSize = i; + } + else { + for (size_t i = 0; i < nameSize; i++) { + uint8_t c = uint8_t(name[i]); + if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName); + hashCode = Support::hashRound(hashCode, c); + } + } + return hashCode; +} + +static bool CodeHolder_writeDisplacement(void* dst, int64_t displacement, uint32_t displacementSize) { + if (displacementSize == 4 && Support::isInt32(displacement)) { + Support::writeI32uLE(dst, int32_t(displacement)); + return true; + } + else if (displacementSize == 1 && Support::isInt8(displacement)) { + Support::writeI8(dst, int8_t(displacement)); + return true; + } + + return false; +} + +LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept { + LabelLink* link = _allocator.allocT(); + if (ASMJIT_UNLIKELY(!link)) return nullptr; + + link->next = le->_links; + le->_links = link; + + link->sectionId = sectionId; + link->relocId = Globals::kInvalidId; + link->offset = offset; + link->rel = rel; + + _unresolvedLinkCount++; + return link; +} + +Error CodeHolder::newLabelEntry(LabelEntry** entryOut) noexcept { + *entryOut = 0; + + uint32_t labelId = _labelEntries.size(); + if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) + return DebugUtils::errored(kErrorTooManyLabels); + + ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator)); + LabelEntry* le = _allocator.allocZeroedT(); + + if (ASMJIT_UNLIKELY(!le)) + return DebugUtils::errored(kErrorOutOfMemory); + + le->_setId(labelId); + le->_parentId = Globals::kInvalidId; + le->_offset = 0; + _labelEntries.appendUnsafe(le); + + *entryOut = le; + return kErrorOk; +} + +Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId) noexcept { + *entryOut = 0; + uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize); + + if (ASMJIT_UNLIKELY(nameSize == 0)) + return DebugUtils::errored(kErrorInvalidLabelName); + + if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize)) + return DebugUtils::errored(kErrorLabelNameTooLong); + + switch (type) { + case Label::kTypeLocal: + if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size())) + return DebugUtils::errored(kErrorInvalidParentLabel); + + hashCode ^= parentId; + break; + + case Label::kTypeGlobal: + if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) + return DebugUtils::errored(kErrorNonLocalLabelCantHaveParent); + + break; + + default: + return DebugUtils::errored(kErrorInvalidArgument); + } + + // Don't allow to insert duplicates. Local labels allow duplicates that have + // different id, this is already accomplished by having a different hashes + // between the same label names having different parent labels. + LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode)); + if (ASMJIT_UNLIKELY(le)) + return DebugUtils::errored(kErrorLabelAlreadyDefined); + + Error err = kErrorOk; + uint32_t labelId = _labelEntries.size(); + + if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) + return DebugUtils::errored(kErrorTooManyLabels); + + ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator)); + le = _allocator.allocZeroedT(); + + if (ASMJIT_UNLIKELY(!le)) + return DebugUtils::errored(kErrorOutOfMemory); + + le->_hashCode = hashCode; + le->_setId(labelId); + le->_type = uint8_t(type); + le->_parentId = Globals::kInvalidId; + le->_offset = 0; + ASMJIT_PROPAGATE(le->_name.setData(&_zone, name, nameSize)); + + _labelEntries.appendUnsafe(le); + _namedLabels.insert(allocator(), le); + + *entryOut = le; + return err; +} + +uint32_t CodeHolder::labelIdByName(const char* name, size_t nameSize, uint32_t parentId) noexcept { + // TODO: Finalize - parent id is not used here? + ASMJIT_UNUSED(parentId); + + uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize); + if (ASMJIT_UNLIKELY(!nameSize)) return 0; + + LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode)); + return le ? le->id() : uint32_t(Globals::kInvalidId); +} + +ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept { + if (!hasUnresolvedLinks()) + return kErrorOk; + + Error err = kErrorOk; + for (LabelEntry* le : labelEntries()) { + if (!le->isBound()) + continue; + + LabelLinkIterator link(le); + if (link) { + Support::FastUInt8 of = 0; + Section* toSection = le->section(); + uint64_t toOffset = Support::addOverflow(toSection->offset(), le->offset(), &of); + + do { + uint32_t linkSectionId = link->sectionId; + if (link->relocId == Globals::kInvalidId) { + Section* fromSection = sectionById(linkSectionId); + size_t linkOffset = link->offset; + + CodeBuffer& buf = _sections[linkSectionId]->buffer(); + ASMJIT_ASSERT(linkOffset < buf.size()); + + // Calculate the offset relative to the start of the virtual base. + uint64_t fromOffset = Support::addOverflow(fromSection->offset(), linkOffset, &of); + int64_t displacement = int64_t(toOffset - fromOffset + uint64_t(int64_t(link->rel))); + + if (!of) { + ASMJIT_ASSERT(size_t(linkOffset) < buf.size()); + + // Size of the value we are going to patch. Only BYTE/DWORD is allowed. + uint32_t displacementSize = buf._data[linkOffset]; + ASMJIT_ASSERT(buf.size() - size_t(linkOffset) >= displacementSize); + + // Overwrite a real displacement in the CodeBuffer. + if (CodeHolder_writeDisplacement(buf._data + linkOffset, displacement, displacementSize)) { + link.resolveAndNext(this); + continue; + } + } + + err = DebugUtils::errored(kErrorInvalidDisplacement); + // Falls through to `link.next()`. + } + + link.next(); + } while (link); + } + } + + return err; +} + +ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId, uint64_t toOffset) noexcept { + LabelEntry* le = labelEntry(label); + if (ASMJIT_UNLIKELY(!le)) + return DebugUtils::errored(kErrorInvalidLabel); + + if (ASMJIT_UNLIKELY(toSectionId > _sections.size())) + return DebugUtils::errored(kErrorInvalidSection); + + // Label can be bound only once. + if (ASMJIT_UNLIKELY(le->isBound())) + return DebugUtils::errored(kErrorLabelAlreadyBound); + + // Bind the label. + Section* section = _sections[toSectionId]; + le->_section = section; + le->_offset = toOffset; + + Error err = kErrorOk; + CodeBuffer& buf = section->buffer(); + + // Fix all links to this label we have collected so far if they are within + // the same section. We ignore any inter-section links as these have to be + // fixed later. + LabelLinkIterator link(le); + while (link) { + uint32_t linkSectionId = link->sectionId; + size_t linkOffset = link->offset; + + uint32_t relocId = link->relocId; + if (relocId != Globals::kInvalidId) { + // Adjust relocation data only. + RelocEntry* re = _relocations[relocId]; + re->_payload += toOffset; + re->_targetSectionId = toSectionId; + } + else { + if (linkSectionId != toSectionId) { + link.next(); + continue; + } + + ASMJIT_ASSERT(linkOffset < buf.size()); + int64_t displacement = int64_t(toOffset - uint64_t(linkOffset) + uint64_t(int64_t(link->rel))); + + // Size of the value we are going to patch. Only BYTE/DWORD is allowed. + uint32_t displacementSize = buf._data[linkOffset]; + ASMJIT_ASSERT(buf.size() - size_t(linkOffset) >= displacementSize); + + // Overwrite a real displacement in the CodeBuffer. + if (!CodeHolder_writeDisplacement(buf._data + linkOffset, displacement, displacementSize)) { + err = DebugUtils::errored(kErrorInvalidDisplacement); + link.next(); + continue; + } + } + + link.resolveAndNext(this); + } + + return err; +} + +// ============================================================================ +// [asmjit::BaseEmitter - Relocations] +// ============================================================================ + +Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t relocType, uint32_t valueSize) noexcept { + ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator)); + + uint32_t relocId = _relocations.size(); + if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId)) + return DebugUtils::errored(kErrorTooManyRelocations); + + RelocEntry* re = _allocator.allocZeroedT(); + if (ASMJIT_UNLIKELY(!re)) + return DebugUtils::errored(kErrorOutOfMemory); + + re->_id = relocId; + re->_relocType = uint8_t(relocType); + re->_valueSize = uint8_t(valueSize); + re->_sourceSectionId = Globals::kInvalidId; + re->_targetSectionId = Globals::kInvalidId; + _relocations.appendUnsafe(re); + + *dst = re; + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseEmitter - Expression Evaluation] +// ============================================================================ + +static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, uint64_t* out) noexcept { + uint64_t value[2]; + for (size_t i = 0; i < 2; i++) { + uint64_t v; + switch (exp->valueType[i]) { + case Expression::kValueNone: { + v = 0; + break; + } + + case Expression::kValueConstant: { + v = exp->value[i].constant; + break; + } + + case Expression::kValueLabel: { + LabelEntry* le = exp->value[i].label; + if (!le->isBound()) + return DebugUtils::errored(kErrorExpressionLabelNotBound); + v = le->section()->offset() + le->offset(); + break; + } + + case Expression::kValueExpression: { + Expression* nested = exp->value[i].expression; + ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(self, nested, &v)); + break; + } + + default: + return DebugUtils::errored(kErrorInvalidState); + } + + value[i] = v; + } + + uint64_t result; + uint64_t& a = value[0]; + uint64_t& b = value[1]; + + switch (exp->opType) { + case Expression::kOpAdd: + result = a + b; + break; + + case Expression::kOpSub: + result = a - b; + break; + + case Expression::kOpMul: + result = a * b; + break; + + case Expression::kOpSll: + result = (b > 63) ? uint64_t(0) : uint64_t(a << b); + break; + + case Expression::kOpSrl: + result = (b > 63) ? uint64_t(0) : uint64_t(a >> b); + break; + + case Expression::kOpSra: + result = Support::sar(a, Support::min(b, 63)); + break; + + default: + return DebugUtils::errored(kErrorInvalidState); + } + + *out = result; + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseEmitter - Utilities] +// ============================================================================ + +Error CodeHolder::flatten() noexcept { + uint64_t offset = 0; + for (Section* section : _sections) { + uint64_t realSize = section->realSize(); + if (realSize) { + uint64_t alignedOffset = Support::alignUp(offset, section->alignment()); + if (ASMJIT_UNLIKELY(alignedOffset < offset)) + return DebugUtils::errored(kErrorTooLarge); + + Support::FastUInt8 of = 0; + offset = Support::addOverflow(alignedOffset, realSize, &of); + + if (ASMJIT_UNLIKELY(of)) + return DebugUtils::errored(kErrorTooLarge); + } + } + + // Now we know that we can assign offsets of all sections properly. + Section* prev = nullptr; + offset = 0; + for (Section* section : _sections) { + uint64_t realSize = section->realSize(); + if (realSize) + offset = Support::alignUp(offset, section->alignment()); + section->_offset = offset; + + // Make sure the previous section extends a bit to cover the alignment. + if (prev) + prev->_virtualSize = offset - prev->_offset; + + prev = section; + offset += realSize; + } + + return kErrorOk; +} + +size_t CodeHolder::codeSize() const noexcept { + Support::FastUInt8 of = 0; + uint64_t offset = 0; + + for (Section* section : _sections) { + uint64_t realSize = section->realSize(); + + if (realSize) { + uint64_t alignedOffset = Support::alignUp(offset, section->alignment()); + ASMJIT_ASSERT(alignedOffset >= offset); + offset = Support::addOverflow(alignedOffset, realSize, &of); + } + } + + // TODO: Not nice, maybe changing `codeSize()` to return `uint64_t` instead? + if ((sizeof(uint64_t) > sizeof(size_t) && offset > SIZE_MAX) || of) + return SIZE_MAX; + + return size_t(offset); +} + +Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { + // Base address must be provided. + if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress)) + return DebugUtils::errored(kErrorInvalidArgument); + + _codeInfo.setBaseAddress(baseAddress); + uint32_t gpSize = _codeInfo.gpSize(); + + Section* addressTableSection = _addressTableSection; + uint32_t addressTableEntryCount = 0; + uint8_t* addressTableEntryData = nullptr; + + if (addressTableSection) { + ASMJIT_PROPAGATE( + reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize()))); + addressTableEntryData = addressTableSection->_buffer.data(); + } + + // Relocate all recorded locations. + for (const RelocEntry* re : _relocations) { + // Possibly deleted or optimized-out entry. + if (re->relocType() == RelocEntry::kTypeNone) + continue; + + Section* sourceSection = sectionById(re->sourceSectionId()); + Section* targetSection = nullptr; + + if (re->targetSectionId() != Globals::kInvalidId) + targetSection = sectionById(re->targetSectionId()); + + uint64_t value = re->payload(); + uint64_t sectionOffset = sourceSection->offset(); + uint64_t sourceOffset = re->sourceOffset(); + + // Make sure that the `RelocEntry` doesn't go out of bounds. + size_t regionSize = re->leadingSize() + re->valueSize() + re->trailingSize(); + if (ASMJIT_UNLIKELY(re->sourceOffset() >= sourceSection->bufferSize() || + sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize)) + return DebugUtils::errored(kErrorInvalidRelocEntry); + + uint8_t* buffer = sourceSection->data(); + size_t valueOffset = size_t(re->sourceOffset()) + re->leadingSize(); + + switch (re->relocType()) { + case RelocEntry::kTypeExpression: { + Expression* expression = (Expression*)(uintptr_t(value)); + ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(this, expression, &value)); + break; + } + + case RelocEntry::kTypeAbsToAbs: { + break; + } + + case RelocEntry::kTypeRelToAbs: { + // Value is currently a relative offset from the start of its section. + // We have to convert it to an absolute offset (including base address). + if (ASMJIT_UNLIKELY(!targetSection)) + return DebugUtils::errored(kErrorInvalidRelocEntry); + + //value += baseAddress + sectionOffset + sourceOffset + regionSize; + value += baseAddress + targetSection->offset(); + break; + } + + case RelocEntry::kTypeAbsToRel: { + value -= baseAddress + sectionOffset + sourceOffset + regionSize; + if (gpSize > 4 && !Support::isInt32(int64_t(value))) + return DebugUtils::errored(kErrorRelocOffsetOutOfRange); + break; + } + + case RelocEntry::kTypeX64AddressEntry: { + if (re->valueSize() != 4 || re->leadingSize() < 2) + return DebugUtils::errored(kErrorInvalidRelocEntry); + + // First try whether a relative 32-bit displacement would work. + value -= baseAddress + sectionOffset + sourceOffset + regionSize; + if (!Support::isInt32(int64_t(value))) { + // Relative 32-bit displacement is not possible, use '.addrtab' section. + AddressTableEntry* atEntry = _addressTableEntries.get(re->payload()); + if (ASMJIT_UNLIKELY(!atEntry)) + return DebugUtils::errored(kErrorInvalidRelocEntry); + + // Cannot be null as we have just matched the `AddressTableEntry`. + ASMJIT_ASSERT(addressTableSection != nullptr); + + if (!atEntry->hasAssignedSlot()) + atEntry->_slot = addressTableEntryCount++; + + size_t atEntryIndex = size_t(atEntry->slot()) * gpSize; + uint64_t addrSrc = sectionOffset + sourceOffset + regionSize; + uint64_t addrDst = addressTableSection->offset() + uint64_t(atEntryIndex); + + value = addrDst - addrSrc; + if (!Support::isInt32(int64_t(value))) + return DebugUtils::errored(kErrorRelocOffsetOutOfRange); + + // Bytes that replace [REX, OPCODE] bytes. + uint32_t byte0 = 0xFF; + uint32_t byte1 = buffer[valueOffset - 1]; + + if (byte1 == 0xE8) { + // Patch CALL/MOD byte to FF /2 (-> 0x15). + byte1 = x86EncodeMod(0, 2, 5); + } + else if (byte1 == 0xE9) { + // Patch JMP/MOD byte to FF /4 (-> 0x25). + byte1 = x86EncodeMod(0, 4, 5); + } + else { + return DebugUtils::errored(kErrorInvalidRelocEntry); + } + + // Patch `jmp/call` instruction. + buffer[valueOffset - 2] = uint8_t(byte0); + buffer[valueOffset - 1] = uint8_t(byte1); + + Support::writeU64uLE(addressTableEntryData + atEntryIndex, re->payload()); + } + break; + } + + default: + return DebugUtils::errored(kErrorInvalidRelocEntry); + } + + switch (re->valueSize()) { + case 1: + Support::writeU8(buffer + valueOffset, uint32_t(value & 0xFFu)); + break; + + case 2: + Support::writeU16uLE(buffer + valueOffset, uint32_t(value & 0xFFFFu)); + break; + + case 4: + Support::writeU32uLE(buffer + valueOffset, uint32_t(value & 0xFFFFFFFFu)); + break; + + case 8: + Support::writeU64uLE(buffer + valueOffset, value); + break; + + default: + return DebugUtils::errored(kErrorInvalidRelocEntry); + } + } + + // Fixup the virtual size of the address table if it's the last section. + if (_sections.last() == addressTableSection) { + size_t addressTableSize = addressTableEntryCount * gpSize; + addressTableSection->_buffer._size = addressTableSize; + addressTableSection->_virtualSize = addressTableSize; + } + + return kErrorOk; +} + +Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t options) noexcept { + if (ASMJIT_UNLIKELY(!isSectionValid(sectionId))) + return DebugUtils::errored(kErrorInvalidSection); + + Section* section = sectionById(sectionId); + size_t bufferSize = section->bufferSize(); + + if (ASMJIT_UNLIKELY(dstSize < bufferSize)) + return DebugUtils::errored(kErrorInvalidArgument); + + memcpy(dst, section->data(), bufferSize); + + if (bufferSize < dstSize && (options & kCopyWithPadding)) { + size_t paddingSize = dstSize - bufferSize; + memset(static_cast(dst) + bufferSize, 0, paddingSize); + } + + return kErrorOk; +} + +Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, uint32_t options) noexcept { + size_t end = 0; + for (Section* section : _sections) { + if (section->offset() > dstSize) + return DebugUtils::errored(kErrorInvalidArgument); + + size_t bufferSize = section->bufferSize(); + size_t offset = size_t(section->offset()); + + if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize)) + return DebugUtils::errored(kErrorInvalidArgument); + + uint8_t* dstTarget = static_cast(dst) + offset; + size_t paddingSize = 0; + memcpy(dstTarget, section->data(), bufferSize); + + if ((options & kCopyWithPadding) && bufferSize < section->virtualSize()) { + paddingSize = Support::min(dstSize - offset, size_t(section->virtualSize())) - bufferSize; + memset(dstTarget + bufferSize, 0, paddingSize); + } + + end = Support::max(end, offset + bufferSize + paddingSize); + } + + // TODO: `end` is not used atm, we need an option to also pad anything beyond + // the code in case that the destination was much larger (for example page-size). + + return kErrorOk; +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/codeholder.h b/src/asmjit/core/codeholder.h new file mode 100644 index 0000000..3275fdb --- /dev/null +++ b/src/asmjit/core/codeholder.h @@ -0,0 +1,887 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_CODEHOLDER_H +#define _ASMJIT_CORE_CODEHOLDER_H + +#include "../core/arch.h" +#include "../core/datatypes.h" +#include "../core/operand.h" +#include "../core/string.h" +#include "../core/support.h" +#include "../core/target.h" +#include "../core/zone.h" +#include "../core/zonehash.h" +#include "../core/zonestring.h" +#include "../core/zonetree.h" +#include "../core/zonevector.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [Forward Declarations] +// ============================================================================ + +class BaseEmitter; +class CodeHolder; +class LabelEntry; +class Logger; + +// ============================================================================ +// [asmjit::AlignMode] +// ============================================================================ + +//! Align mode. +enum AlignMode : uint32_t { + kAlignCode = 0, //!< Align executable code. + kAlignData = 1, //!< Align non-executable code. + kAlignZero = 2, //!< Align by a sequence of zeros. + kAlignCount = 3 //!< Count of alignment modes. +}; + +// ============================================================================ +// [asmjit::ErrorHandler] +// ============================================================================ + +//! Error handler can be used to override the default behavior of error handling +//! available to all classes that inherit `BaseEmitter`. +//! +//! Override `ErrorHandler::handleError()` to implement your own error handler. +class ASMJIT_VIRTAPI ErrorHandler { +public: + ASMJIT_BASE_CLASS(ErrorHandler) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Creates a new `ErrorHandler` instance. + ASMJIT_API ErrorHandler() noexcept; + //! Destroys the `ErrorHandler` instance. + ASMJIT_API virtual ~ErrorHandler() noexcept; + + // -------------------------------------------------------------------------- + // [Handle Error] + // -------------------------------------------------------------------------- + + //! Error handler (must be reimplemented). + //! + //! Error handler is called after an error happened and before it's propagated + //! to the caller. There are multiple ways how the error handler can be used: + //! + //! 1. User-based error handling without throwing exception or using C's + //! `longjmp()`. This is for users that don't use exceptions and want + //! customized error handling. + //! + //! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely + //! exception-safe, but you can throw exception from your error handler if + //! this way is the preferred way of handling errors in your project. + //! + //! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts + //! `BaseEmitter` to a consistent state before calling `handleError()` + //! so `longjmp()` can be used without any issues to cancel the code + //! generation if an error occurred. There is no difference between + //! exceptions and `longjmp()` from AsmJit's perspective, however, + //! never jump outside of `CodeHolder` and `BaseEmitter` scope as you + //! would leak memory. + virtual void handleError(Error err, const char* message, BaseEmitter* origin) = 0; +}; + +// ============================================================================ +// [asmjit::CodeBuffer] +// ============================================================================ + +//! Code or data buffer. +struct CodeBuffer { + //! The content of the buffer (data). + uint8_t* _data; + //! Number of bytes of `data` used. + size_t _size; + //! Buffer capacity (in bytes). + size_t _capacity; + //! Buffer flags. + uint32_t _flags; + + enum Flags : uint32_t { + //! Buffer is external (not allocated by asmjit). + kFlagIsExternal = 0x00000001u, + //! Buffer is fixed (cannot be reallocated). + kFlagIsFixed = 0x00000002u + }; + + //! \name Accessors + //! \{ + + inline uint32_t flags() const noexcept { return _flags; } + inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; } + + inline bool isAllocated() const noexcept { return _data != nullptr; } + inline bool isFixed() const noexcept { return hasFlag(kFlagIsFixed); } + inline bool isExternal() const noexcept { return hasFlag(kFlagIsExternal); } + + inline uint8_t* data() noexcept { return _data; } + inline const uint8_t* data() const noexcept { return _data; } + + inline bool empty() const noexcept { return !_size; } + inline size_t size() const noexcept { return _size; } + inline size_t capacity() const noexcept { return _capacity; } + + //! \} +}; + +// ============================================================================ +// [asmjit::Section] +// ============================================================================ + +//! Section entry. +class Section { +public: + //! Section id. + uint32_t _id; + //! Section flags. + uint32_t _flags; + //! Section alignment requirements (0 if no requirements). + uint32_t _alignment; + //! Reserved for future use (padding). + uint32_t _reserved; + //! Offset of this section from base-address. + uint64_t _offset; + //! Virtual size of the section (zero initialized sections). + uint64_t _virtualSize; + //! Section name (max 35 characters, PE allows max 8). + FixedString _name; + //! Code or data buffer. + CodeBuffer _buffer; + + //! Section flags. + enum Flags : uint32_t { + kFlagExec = 0x00000001u, //!< Executable (.text sections). + kFlagConst = 0x00000002u, //!< Read-only (.text and .data sections). + kFlagZero = 0x00000004u, //!< Zero initialized by the loader (BSS). + kFlagInfo = 0x00000008u, //!< Info / comment flag. + kFlagImplicit = 0x80000000u //!< Section created implicitly and can be deleted by `Target`. + }; + + //! \name Accessors + //! \{ + + inline uint32_t id() const noexcept { return _id; } + inline const char* name() const noexcept { return _name.str; } + + inline uint8_t* data() noexcept { return _buffer.data(); } + inline const uint8_t* data() const noexcept { return _buffer.data(); } + + inline uint32_t flags() const noexcept { return _flags; } + inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; } + inline void addFlags(uint32_t flags) noexcept { _flags |= flags; } + inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; } + + inline uint32_t alignment() const noexcept { return _alignment; } + inline void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; } + + inline uint64_t offset() const noexcept { return _offset; } + inline void setOffset(uint64_t offset) noexcept { _offset = offset; } + + //! Returns the virtual size of the section. + //! + //! Virtual size is initially zero and is never changed by AsmJit. It's normal + //! if virtual size is smaller than size returned by `bufferSize()` as the buffer + //! stores real data emitted by assemblers or appended by users. + //! + //! Use `realSize()` to get the real and final size of this section. + inline uint64_t virtualSize() const noexcept { return _virtualSize; } + //! Sets the virtual size of the section. + inline void setVirtualSize(uint64_t virtualSize) noexcept { _virtualSize = virtualSize; } + + //! Returns the buffer size of the section. + inline size_t bufferSize() const noexcept { return _buffer.size(); } + //! Returns the real size of the section calculated from virtual and buffer sizes. + inline uint64_t realSize() const noexcept { return Support::max(virtualSize(), bufferSize()); } + + //! Returns the `CodeBuffer` used by this section. + inline CodeBuffer& buffer() noexcept { return _buffer; } + //! Returns the `CodeBuffer` used by this section (const). + inline const CodeBuffer& buffer() const noexcept { return _buffer; } + + //! \} +}; + +// ============================================================================ +// [asmjit::LabelLink] +// ============================================================================ + +//! Data structure used to link either unbound labels or cross-section links. +struct LabelLink { + //! Next link (single-linked list). + LabelLink* next; + //! Section id where the label is bound. + uint32_t sectionId; + //! Relocation id or Globals::kInvalidId. + uint32_t relocId; + //! Label offset relative to the start of the section. + size_t offset; + //! Inlined rel8/rel32. + intptr_t rel; +}; + +// ============================================================================ +// [asmjit::Expression] +// ============================================================================ + +struct Expression { + enum OpType : uint8_t { + kOpAdd = 0, + kOpSub = 1, + kOpMul = 2, + kOpSll = 3, + kOpSrl = 4, + kOpSra = 5 + }; + + enum ValueType : uint8_t { + kValueNone = 0, + kValueConstant = 1, + kValueLabel = 2, + kValueExpression = 3 + }; + + union Value { + uint64_t constant; + Expression* expression; + LabelEntry* label; + }; + + uint8_t opType; + uint8_t valueType[2]; + uint8_t reserved[5]; + Value value[2]; + + inline void reset() noexcept { memset(this, 0, sizeof(*this)); } + + inline void setValueAsConstant(size_t index, uint64_t constant) noexcept { + valueType[index] = kValueConstant; + value[index].constant = constant; + } + + inline void setValueAsLabel(size_t index, LabelEntry* label) noexcept { + valueType[index] = kValueLabel; + value[index].label = label; + } + + inline void setValueAsExpression(size_t index, Expression* expression) noexcept { + valueType[index] = kValueLabel; + value[index].expression = expression; + } +}; + +// ============================================================================ +// [asmjit::LabelEntry] +// ============================================================================ + +//! Label entry. +//! +//! Contains the following properties: +//! * Label id - This is the only thing that is set to the `Label` operand. +//! * Label name - Optional, used mostly to create executables and libraries. +//! * Label type - Type of the label, default `Label::kTypeAnonymous`. +//! * Label parent id - Derived from many assemblers that allow to define a +//! local label that falls under a global label. This allows to define +//! many labels of the same name that have different parent (global) label. +//! * Offset - offset of the label bound by `Assembler`. +//! * Links - single-linked list that contains locations of code that has +//! to be patched when the label gets bound. Every use of unbound label +//! adds one link to `_links` list. +//! * HVal - Hash value of label's name and optionally parentId. +//! * HashNext - Hash-table implementation detail. +class LabelEntry : public ZoneHashNode { +public: + // Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has + // granularity of 32 bytes anyway). This gives `_name` the remaining space, + // which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures. + static constexpr uint32_t kStaticNameSize = + 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*)); + + //! Label type, see `Label::LabelType`. + uint8_t _type; + //! Must be zero. + uint8_t _flags; + //! Reserved. + uint16_t _reserved16; + //! Label parent id or zero. + uint32_t _parentId; + //! Label offset relative to the start of the `_section`. + uint64_t _offset; + //! Section where the label was bound. + Section* _section; + //! Label links. + LabelLink* _links; + //! Label name. + ZoneString _name; + + //! \name Accessors + //! \{ + + // NOTE: Label id is stored in `_customData`, which is provided by ZoneHashNode + // to fill a padding that a C++ compiler targeting 64-bit CPU will add to align + // the structure to 64-bits. + + //! Returns label id. + inline uint32_t id() const noexcept { return _customData; } + //! Sets label id (internal, used only by `CodeHolder`). + inline void _setId(uint32_t id) noexcept { _customData = id; } + + //! Returns label type, see `Label::LabelType`. + inline uint32_t type() const noexcept { return _type; } + //! Returns label flags, returns 0 at the moment. + inline uint32_t flags() const noexcept { return _flags; } + + //! Tests whether the label has a parent label. + inline bool hasParent() const noexcept { return _parentId != Globals::kInvalidId; } + //! Returns label's parent id. + inline uint32_t parentId() const noexcept { return _parentId; } + + //! Returns the section where the label was bound. + //! + //! If the label was not yet bound the return value is `nullptr`. + inline Section* section() const noexcept { return _section; } + + //! Tests whether the label has name. + inline bool hasName() const noexcept { return !_name.empty(); } + + //! Returns the label's name. + //! + //! \note Local labels will return their local name without their parent + //! part, for example ".L1". + inline const char* name() const noexcept { return _name.data(); } + + //! Returns size of label's name. + //! + //! \note Label name is always null terminated, so you can use `strlen()` to + //! get it, however, it's also cached in `LabelEntry` itself, so if you want + //! to know the size the fastest way is to call `LabelEntry::nameSize()`. + inline uint32_t nameSize() const noexcept { return _name.size(); } + + //! Returns links associated with this label. + inline LabelLink* links() const noexcept { return _links; } + + //! Tests whether the label is bound. + inline bool isBound() const noexcept { return _section != nullptr; } + //! Tests whether the label is bound to a the given `sectionId`. + inline bool isBoundTo(Section* section) const noexcept { return _section == section; } + + //! Returns the label offset (only useful if the label is bound). + inline uint64_t offset() const noexcept { return _offset; } + + //! Returns the hash-value of label's name and its parent label (if any). + //! + //! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function + //! is implemented in `Support::hashString()` and `Support::hashRound()`. + inline uint32_t hashCode() const noexcept { return _hashCode; } + + //! \} +}; + +// ============================================================================ +// [asmjit::RelocEntry] +// ============================================================================ + +//! Relocation entry. +//! +//! We describe relocation data in the following way: +//! +//! ``` +//! +- Start of the buffer +- End of the data +//! | |*PATCHED*| | or instruction +//! |xxxxxxxxxxxxxxxxxxxxxx|LeadSize|ValueSize|TrailSize|xxxxxxxxxxxxxxxxxxxx-> +//! | +//! +- Source offset +//! ``` +struct RelocEntry { + //! Relocation id. + uint32_t _id; + //! Type of the relocation. + uint8_t _relocType; + //! Size of the relocation data/value (1, 2, 4 or 8 bytes). + uint8_t _valueSize; + //! Number of bytes after `_sourceOffset` to reach the value to be patched. + uint8_t _leadingSize; + //! Number of bytes after `_sourceOffset + _valueSize` to reach end of the + //! instruction. + uint8_t _trailingSize; + //! Source section id. + uint32_t _sourceSectionId; + //! Target section id. + uint32_t _targetSectionId; + //! Source offset (relative to start of the section). + uint64_t _sourceOffset; + //! Payload (target offset, target address, expression, etc). + uint64_t _payload; + + //! Relocation type. + enum RelocType : uint32_t { + //! None/deleted (no relocation). + kTypeNone = 0, + //! Expression evaluation, `_payload` is pointer to `Expression`. + kTypeExpression = 1, + //! Relocate absolute to absolute. + kTypeAbsToAbs = 2, + //! Relocate relative to absolute. + kTypeRelToAbs = 3, + //! Relocate absolute to relative. + kTypeAbsToRel = 4, + //! Relocate absolute to relative or use trampoline. + kTypeX64AddressEntry = 5 + }; + + //! \name Accessors + //! \{ + + inline uint32_t id() const noexcept { return _id; } + + inline uint32_t relocType() const noexcept { return _relocType; } + inline uint32_t valueSize() const noexcept { return _valueSize; } + + inline uint32_t leadingSize() const noexcept { return _leadingSize; } + inline uint32_t trailingSize() const noexcept { return _trailingSize; } + + inline uint32_t sourceSectionId() const noexcept { return _sourceSectionId; } + inline uint32_t targetSectionId() const noexcept { return _targetSectionId; } + + inline uint64_t sourceOffset() const noexcept { return _sourceOffset; } + inline uint64_t payload() const noexcept { return _payload; } + + Expression* payloadAsExpression() const noexcept { + return reinterpret_cast(uintptr_t(_payload)); + } + + //! \} +}; + +// ============================================================================ +// [asmjit::AddressTableEntry] +// ============================================================================ + +class AddressTableEntry : public ZoneTreeNodeT { +public: + ASMJIT_NONCOPYABLE(AddressTableEntry) + + uint64_t _address; + uint32_t _slot; + + //! \name Construction & Destruction + //! \{ + + inline explicit AddressTableEntry(uint64_t address) noexcept + : _address(address), + _slot(0xFFFFFFFFu) {} + + //! \} + + //! \name Accessors + //! \{ + + inline uint64_t address() const noexcept { return _address; } + inline uint32_t slot() const noexcept { return _slot; } + + inline bool hasAssignedSlot() const noexcept { return _slot != 0xFFFFFFFFu; } + + inline bool operator<(const AddressTableEntry& other) const noexcept { return _address < other._address; } + inline bool operator>(const AddressTableEntry& other) const noexcept { return _address > other._address; } + + inline bool operator<(uint64_t queryAddress) const noexcept { return _address < queryAddress; } + inline bool operator>(uint64_t queryAddress) const noexcept { return _address > queryAddress; } + + //! \} +}; + +// ============================================================================ +// [asmjit::CodeHolder] +// ============================================================================ + +//! Contains basic information about the target architecture plus its settings, +//! and holds code & data (including sections, labels, and relocation information). +//! CodeHolder can store both binary and intermediate representation of assembly, +//! which can be generated by `BaseAssembler` and/or `BaseBuilder`. +//! +//! \note `CodeHolder` has ability to attach an `ErrorHandler`, however, the +//! error handler is not triggered by `CodeHolder` itself, it's only used by +//! emitters attached to `CodeHolder`. +class CodeHolder { +public: + ASMJIT_NONCOPYABLE(CodeHolder) + + //! Basic information about the code (architecture and other info). + CodeInfo _codeInfo; + //! Emitter options, propagated to all emitters when changed. + uint32_t _emitterOptions; + + //! Attached `Logger`, used by all consumers. + Logger* _logger; + //! Attached `ErrorHandler`. + ErrorHandler* _errorHandler; + + //! Code zone (used to allocate core structures). + Zone _zone; + //! Zone allocator, used to manage internal containers. + ZoneAllocator _allocator; + + //! Attached code emitters. + ZoneVector _emitters; + //! Section entries. + ZoneVector _sections; + //! Label entries. + ZoneVector _labelEntries; + //! Relocation entries. + ZoneVector _relocations; + //! Label name -> LabelEntry (only named labels). + ZoneHash _namedLabels; + + //! Count of label links, which are not resolved. + size_t _unresolvedLinkCount; + //! Pointer to an address table section (or null if this section doesn't exist). + Section* _addressTableSection; + //! Address table entries. + ZoneTree _addressTableEntries; + + //! \name Construction & Destruction + //! \{ + + //! Creates an uninitialized CodeHolder (you must init() it before it can be used). + ASMJIT_API CodeHolder() noexcept; + //! Destroys the CodeHolder. + ASMJIT_API ~CodeHolder() noexcept; + + inline bool isInitialized() const noexcept { return _codeInfo.isInitialized(); } + + //! Initializes CodeHolder to hold code described by `codeInfo`. + ASMJIT_API Error init(const CodeInfo& info) noexcept; + //! Detaches all code-generators attached and resets the `CodeHolder`. + ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept; + + //! \} + + //! \name Attach & Detach + //! \{ + + //! Attaches an emitter to this `CodeHolder`. + ASMJIT_API Error attach(BaseEmitter* emitter) noexcept; + //! Detaches an emitter from this `CodeHolder`. + ASMJIT_API Error detach(BaseEmitter* emitter) noexcept; + + //! \} + + //! \name Allocators + //! \{ + + inline ZoneAllocator* allocator() const noexcept { return const_cast(&_allocator); } + + //! \} + + //! \name Code Emitter + //! \{ + + inline const ZoneVector& emitters() const noexcept { return _emitters; } + + //! Returns global emitter options, internally propagated to all attached emitters. + inline uint32_t emitterOptions() const noexcept { return _emitterOptions; } + + //! Enables the given global emitter `options` and propagates the resulting + //! options to all attached emitters. + ASMJIT_API void addEmitterOptions(uint32_t options) noexcept; + + //! Disables the given global emitter `options` and propagates the resulting + //! options to all attached emitters. + ASMJIT_API void clearEmitterOptions(uint32_t options) noexcept; + + //! \} + + //! \name Code & Architecture + //! \{ + + //! Returns the target architecture information, see `ArchInfo`. + inline const ArchInfo& archInfo() const noexcept { return _codeInfo.archInfo(); } + //! Returns the target code information, see `CodeInfo`. + inline const CodeInfo& codeInfo() const noexcept { return _codeInfo; } + + //! Returns the target architecture id. + inline uint32_t archId() const noexcept { return archInfo().archId(); } + //! Returns the target architecture sub-id. + inline uint32_t archSubId() const noexcept { return archInfo().archSubId(); } + + //! Tests whether a static base-address is set. + inline bool hasBaseAddress() const noexcept { return _codeInfo.hasBaseAddress(); } + //! Returns a static base-address (uint64_t). + inline uint64_t baseAddress() const noexcept { return _codeInfo.baseAddress(); } + + //! \} + + //! \name Logging & Error Handling + //! \{ + + //! Returns the attached logger. + inline Logger* logger() const noexcept { return _logger; } + //! Attaches a `logger` to CodeHolder and propagates it to all attached emitters. + ASMJIT_API void setLogger(Logger* logger) noexcept; + //! Resets the logger to none. + inline void resetLogger() noexcept { setLogger(nullptr); } + + //! Tests whether the global error handler is attached. + inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; } + //! Returns the global error handler. + inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; } + //! Sets the global error handler. + inline void setErrorHandler(ErrorHandler* handler) noexcept { _errorHandler = handler; } + //! Resets the global error handler to none. + inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); } + + //! \} + + //! \name Code Buffer + //! \{ + + ASMJIT_API Error growBuffer(CodeBuffer* cb, size_t n) noexcept; + ASMJIT_API Error reserveBuffer(CodeBuffer* cb, size_t n) noexcept; + + //! \} + + //! \name Sections + //! \{ + + //! Returns an array of `Section*` records. + inline const ZoneVector& sections() const noexcept { return _sections; } + //! Returns the number of sections. + inline uint32_t sectionCount() const noexcept { return _sections.size(); } + + //! Tests whether the given `sectionId` is valid. + inline bool isSectionValid(uint32_t sectionId) const noexcept { return sectionId < _sections.size(); } + + //! Creates a new section and return its pointer in `sectionOut`. + //! + //! Returns `Error`, does not report a possible error to `ErrorHandler`. + ASMJIT_API Error newSection(Section** sectionOut, const char* name, size_t nameSize = SIZE_MAX, uint32_t flags = 0, uint32_t alignment = 1) noexcept; + + //! Returns a section entry of the given index. + inline Section* sectionById(uint32_t sectionId) const noexcept { return _sections[sectionId]; } + + //! Returns section-id that matches the given `name`. + //! + //! If there is no such section `Section::kInvalidId` is returned. + ASMJIT_API Section* sectionByName(const char* name, size_t nameSize = SIZE_MAX) const noexcept; + + //! Returns '.text' section (section that commonly represents code). + //! + //! \note Text section is always the first section in `CodeHolder::sections()` array. + inline Section* textSection() const noexcept { return _sections[0]; } + + //! Tests whether '.addrtab' section exists. + inline bool hasAddressTable() const noexcept { return _addressTableSection != nullptr; } + + //! Returns '.addrtab' section. + //! + //! This section is used exclusively by AsmJit to store absolute 64-bit + //! addresses that cannot be encoded in instructions like 'jmp' or 'call'. + inline Section* addressTableSection() const noexcept { return _addressTableSection; } + + //! Ensures that '.addrtab' section exists (creates it if it doesn't) and + //! returns it. Can return `nullptr` on out of memory condition. + ASMJIT_API Section* ensureAddressTableSection() noexcept; + + //! Used to add an address to an address table. + //! + //! This implicitly calls `ensureAddressTableSection()` and then creates + //! `AddressTableEntry` that is inserted to `_addressTableEntries`. If the + //! address already exists this operation does nothing as the same addresses + //! use the same slot. + //! + //! This function should be considered internal as it's used by assemblers to + //! insert an absolute address into the address table. Inserting address into + //! address table without creating a particula relocation entry makes no sense. + ASMJIT_API Error addAddressToAddressTable(uint64_t address) noexcept; + + //! \} + + //! \name Labels & Symbols + //! \{ + + //! Returns array of `LabelEntry*` records. + inline const ZoneVector& labelEntries() const noexcept { return _labelEntries; } + + //! Returns number of labels created. + inline uint32_t labelCount() const noexcept { return _labelEntries.size(); } + + //! Tests whether the label having `id` is valid (i.e. created by `newLabelEntry()`). + inline bool isLabelValid(uint32_t labelId) const noexcept { + return labelId < _labelEntries.size(); + } + + //! Tests whether the `label` is valid (i.e. created by `newLabelEntry()`). + inline bool isLabelValid(const Label& label) const noexcept { + return label.id() < _labelEntries.size(); + } + + //! \overload + inline bool isLabelBound(uint32_t labelId) const noexcept { + return isLabelValid(labelId) && _labelEntries[labelId]->isBound(); + } + + //! Tests whether the `label` is already bound. + //! + //! Returns `false` if the `label` is not valid. + inline bool isLabelBound(const Label& label) const noexcept { + return isLabelBound(label.id()); + } + + //! Returns LabelEntry of the given label `id`. + inline LabelEntry* labelEntry(uint32_t labelId) const noexcept { + return isLabelValid(labelId) ? _labelEntries[labelId] : static_cast(nullptr); + } + + //! Returns LabelEntry of the given `label`. + inline LabelEntry* labelEntry(const Label& label) const noexcept { + return labelEntry(label.id()); + } + + //! Returns offset of a `Label` by its `labelId`. + //! + //! The offset returned is relative to the start of the section. Zero offset + //! is returned for unbound labels, which is their initial offset value. + inline uint64_t labelOffset(uint32_t labelId) const noexcept { + ASMJIT_ASSERT(isLabelValid(labelId)); + return _labelEntries[labelId]->offset(); + } + + //! \overload + inline uint64_t labelOffset(const Label& label) const noexcept { + return labelOffset(label.id()); + } + + //! Returns offset of a label by it's `labelId` relative to the base offset. + //! + //! \remarks The offset of the section where the label is bound must be valid + //! in order to use this function, otherwise the value returned will not be + //! reliable. + inline uint64_t labelOffsetFromBase(uint32_t labelId) const noexcept { + ASMJIT_ASSERT(isLabelValid(labelId)); + const LabelEntry* le = _labelEntries[labelId]; + return (le->isBound() ? le->section()->offset() : uint64_t(0)) + le->offset(); + } + + //! \overload + inline uint64_t labelOffsetFromBase(const Label& label) const noexcept { + return labelOffsetFromBase(label.id()); + } + + //! Creates a new anonymous label and return its id in `idOut`. + //! + //! Returns `Error`, does not report error to `ErrorHandler`. + ASMJIT_API Error newLabelEntry(LabelEntry** entryOut) noexcept; + + //! Creates a new named label label-type `type`. + //! + //! Returns `Error`, does not report a possible error to `ErrorHandler`. + ASMJIT_API Error newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId = Globals::kInvalidId) noexcept; + + //! Returns a label id by name. + ASMJIT_API uint32_t labelIdByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept; + + inline Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept { + return Label(labelIdByName(name, nameSize, parentId)); + } + + //! Tests whether there are any unresolved label links. + inline bool hasUnresolvedLinks() const noexcept { return _unresolvedLinkCount != 0; } + //! Returns the number of label links, which are unresolved. + inline size_t unresolvedLinkCount() const noexcept { return _unresolvedLinkCount; } + + //! Creates a new label-link used to store information about yet unbound labels. + //! + //! Returns `null` if the allocation failed. + ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept; + + //! Resolves cross-section links (`LabelLink`) associated with each label that + //! was used as a destination in code of a different section. It's only useful + //! to people that use multiple sections as it will do nothing if the code only + //! contains a single section in which cross-section links are not possible. + ASMJIT_API Error resolveUnresolvedLinks() noexcept; + + //! Binds a label to a given `sectionId` and `offset` (relative to start of the section). + //! + //! This function is generally used by `BaseAssembler::bind()` to do the heavy lifting. + ASMJIT_API Error bindLabel(const Label& label, uint32_t sectionId, uint64_t offset) noexcept; + + //! \} + + //! \name Relocations + //! \{ + + //! Tests whether the code contains relocation entries. + inline bool hasRelocEntries() const noexcept { return !_relocations.empty(); } + //! Returns array of `RelocEntry*` records. + inline const ZoneVector& relocEntries() const noexcept { return _relocations; } + + //! Returns a RelocEntry of the given `id`. + inline RelocEntry* relocEntry(uint32_t id) const noexcept { return _relocations[id]; } + + //! Creates a new relocation entry of type `relocType` and size `valueSize`. + //! + //! Additional fields can be set after the relocation entry was created. + ASMJIT_API Error newRelocEntry(RelocEntry** dst, uint32_t relocType, uint32_t valueSize) noexcept; + + //! \} + + //! \name Utilities + //! \{ + + //! Flattens all sections by recalculating their offsets, starting at 0. + //! + //! \note This should never be called more than once. + ASMJIT_API Error flatten() noexcept; + + //! Returns computed the size of code & data of all sections. + //! + //! \note All sections will be iterated over and the code size returned + //! would represent the minimum code size of all combined sections after + //! applying minimum alignment. Code size may decrease after calling + //! `flatten()` and `relocateToBase()`. + ASMJIT_API size_t codeSize() const noexcept; + + //! Relocates the code to the given `baseAddress`. + //! + //! \param baseAddress Absolute base address where the code will be relocated + //! to. Please note that nothing is copied to such base address, it's just an + //! absolute value used by the relocator to resolve all stored relocations. + //! + //! \note This should never be called more than once. + ASMJIT_API Error relocateToBase(uint64_t baseAddress) noexcept; + + //! Options that can be used with \ref copySectionData(). + enum CopyOptions : uint32_t { + //! If virtual size of the section is larger than the size of its buffer + //! then all bytes between buffer size and virtual size will be zeroed. + kCopyWithPadding = 0x1 + }; + + //! Copies a single section into `dst`. + ASMJIT_API Error copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t options = 0) noexcept; + + //! Copies all sections into `dst`. + //! + //! This should only be used if the data was flattened and there are no gaps + //! between the sections. The `dstSize` is always checked and the copy will + //! never write anything outside the provided buffer. + ASMJIT_API Error copyFlattenedData(void* dst, size_t dstSize, uint32_t options = 0) noexcept; + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_CODEHOLDER_H diff --git a/src/asmjit/core/compiler.cpp b/src/asmjit/core/compiler.cpp new file mode 100644 index 0000000..69f1f86 --- /dev/null +++ b/src/asmjit/core/compiler.cpp @@ -0,0 +1,556 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/build.h" +#ifndef ASMJIT_NO_COMPILER + +#include "../core/assembler.h" +#include "../core/compiler.h" +#include "../core/cpuinfo.h" +#include "../core/logging.h" +#include "../core/rapass_p.h" +#include "../core/support.h" +#include "../core/type.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::GlobalConstPoolPass] +// ============================================================================ + +class GlobalConstPoolPass : public Pass { + ASMJIT_NONCOPYABLE(GlobalConstPoolPass) + typedef Pass Base; + + GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {} + + Error run(Zone* zone, Logger* logger) noexcept override { + ASMJIT_UNUSED(zone); + ASMJIT_UNUSED(logger); + + // Flush the global constant pool. + BaseCompiler* compiler = static_cast(_cb); + if (compiler->_globalConstPool) { + compiler->addAfter(compiler->_globalConstPool, compiler->lastNode()); + compiler->_globalConstPool = nullptr; + } + return kErrorOk; + } +}; + +// ============================================================================ +// [asmjit::FuncCallNode - Arg / Ret] +// ============================================================================ + +bool FuncCallNode::_setArg(uint32_t i, const Operand_& op) noexcept { + if ((i & ~kFuncArgHi) >= _funcDetail.argCount()) + return false; + + _args[i] = op; + return true; +} + +bool FuncCallNode::_setRet(uint32_t i, const Operand_& op) noexcept { + if (i >= 2) + return false; + + _rets[i] = op; + return true; +} + +// ============================================================================ +// [asmjit::BaseCompiler - Construction / Destruction] +// ============================================================================ + +BaseCompiler::BaseCompiler() noexcept + : BaseBuilder(), + _func(nullptr), + _vRegZone(4096 - Zone::kBlockOverhead), + _vRegArray(), + _localConstPool(nullptr), + _globalConstPool(nullptr) { + + _type = kTypeCompiler; +} +BaseCompiler::~BaseCompiler() noexcept {} + +// ============================================================================ +// [asmjit::BaseCompiler - Function API] +// ============================================================================ + +FuncNode* BaseCompiler::newFunc(const FuncSignature& sign) noexcept { + Error err; + + FuncNode* func = newNodeT(); + if (ASMJIT_UNLIKELY(!func)) { + reportError(DebugUtils::errored(kErrorOutOfMemory)); + return nullptr; + } + + err = registerLabelNode(func); + if (ASMJIT_UNLIKELY(err)) { + // TODO: Calls reportError, maybe rethink noexcept? + reportError(err); + return nullptr; + } + + // Create helper nodes. + func->_exitNode = newLabelNode(); + func->_end = newNodeT(SentinelNode::kSentinelFuncEnd); + + if (ASMJIT_UNLIKELY(!func->_exitNode || !func->_end)) { + reportError(DebugUtils::errored(kErrorOutOfMemory)); + return nullptr; + } + + // Initialize the function info. + err = func->detail().init(sign); + if (ASMJIT_UNLIKELY(err)) { + reportError(err); + return nullptr; + } + + // If the Target guarantees greater stack alignment than required by the + // calling convention then override it as we can prevent having to perform + // dynamic stack alignment + if (func->_funcDetail._callConv.naturalStackAlignment() < _codeInfo.stackAlignment()) + func->_funcDetail._callConv.setNaturalStackAlignment(_codeInfo.stackAlignment()); + + // Initialize the function frame. + err = func->_frame.init(func->_funcDetail); + if (ASMJIT_UNLIKELY(err)) { + reportError(err); + return nullptr; + } + + // Allocate space for function arguments. + func->_args = nullptr; + if (func->argCount() != 0) { + func->_args = _allocator.allocT(func->argCount() * sizeof(VirtReg*)); + if (ASMJIT_UNLIKELY(!func->_args)) { + reportError(DebugUtils::errored(kErrorOutOfMemory)); + return nullptr; + } + + memset(func->_args, 0, func->argCount() * sizeof(VirtReg*)); + } + + return func; +} + +FuncNode* BaseCompiler::addFunc(FuncNode* func) { + ASMJIT_ASSERT(_func == nullptr); + _func = func; + + addNode(func); // Function node. + BaseNode* prev = cursor(); // {CURSOR}. + addNode(func->exitNode()); // Function exit label. + addNode(func->endNode()); // Function end marker. + + _setCursor(prev); + return func; +} + +FuncNode* BaseCompiler::addFunc(const FuncSignature& sign) { + FuncNode* func = newFunc(sign); + + if (!func) { + reportError(DebugUtils::errored(kErrorOutOfMemory)); + return nullptr; + } + + return addFunc(func); +} + +Error BaseCompiler::endFunc() { + FuncNode* func = _func; + if (ASMJIT_UNLIKELY(!func)) + return reportError(DebugUtils::errored(kErrorInvalidState)); + + // Add the local constant pool at the end of the function (if exists). + if (_localConstPool) { + setCursor(func->endNode()->prev()); + addNode(_localConstPool); + _localConstPool = nullptr; + } + + // Mark as finished. + _func = nullptr; + + SentinelNode* end = func->endNode(); + setCursor(end); + return kErrorOk; +} + +Error BaseCompiler::setArg(uint32_t argIndex, const BaseReg& r) { + FuncNode* func = _func; + + if (ASMJIT_UNLIKELY(!func)) + return reportError(DebugUtils::errored(kErrorInvalidState)); + + if (ASMJIT_UNLIKELY(!isVirtRegValid(r))) + return reportError(DebugUtils::errored(kErrorInvalidVirtId)); + + VirtReg* vReg = virtRegByReg(r); + func->setArg(argIndex, vReg); + + return kErrorOk; +} + +FuncRetNode* BaseCompiler::newRet(const Operand_& o0, const Operand_& o1) noexcept { + FuncRetNode* node = newNodeT(); + if (!node) { + reportError(DebugUtils::errored(kErrorOutOfMemory)); + return nullptr; + } + + node->setOp(0, o0); + node->setOp(1, o1); + node->setOpCount(!o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u); + + return node; +} + +FuncRetNode* BaseCompiler::addRet(const Operand_& o0, const Operand_& o1) noexcept { + FuncRetNode* node = newRet(o0, o1); + if (!node) return nullptr; + return addNode(node)->as(); +} + +// ============================================================================ +// [asmjit::BaseCompiler - Call] +// ============================================================================ + +FuncCallNode* BaseCompiler::newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept { + FuncCallNode* node = newNodeT(instId, 0u); + if (ASMJIT_UNLIKELY(!node)) { + reportError(DebugUtils::errored(kErrorOutOfMemory)); + return nullptr; + } + + node->setOpCount(1); + node->setOp(0, o0); + node->resetOp(1); + node->resetOp(2); + node->resetOp(3); + + Error err = node->detail().init(sign); + if (ASMJIT_UNLIKELY(err)) { + reportError(err); + return nullptr; + } + + // If there are no arguments skip the allocation. + uint32_t nArgs = sign.argCount(); + if (!nArgs) return node; + + node->_args = static_cast(_allocator.alloc(nArgs * sizeof(Operand))); + if (!node->_args) { + reportError(DebugUtils::errored(kErrorOutOfMemory)); + return nullptr; + } + + memset(node->_args, 0, nArgs * sizeof(Operand)); + return node; +} + +FuncCallNode* BaseCompiler::addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept { + FuncCallNode* node = newCall(instId, o0, sign); + if (!node) return nullptr; + return addNode(node)->as(); +} + +// ============================================================================ +// [asmjit::BaseCompiler - Vars] +// ============================================================================ + +static void CodeCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) { + uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id)); + + char buf[64]; + int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index)); + + ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf))); + vReg->_name.setData(&self->_dataZone, buf, unsigned(size)); +} + +VirtReg* BaseCompiler::newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept { + uint32_t index = _vRegArray.size(); + if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount))) + return nullptr; + + if (_vRegArray.willGrow(&_allocator) != kErrorOk) + return nullptr; + + VirtReg* vReg = _vRegZone.allocZeroedT(); + if (ASMJIT_UNLIKELY(!vReg)) return nullptr; + + uint32_t size = Type::sizeOf(typeId); + uint32_t alignment = Support::min(size, 64); + + vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId); + + #ifndef ASMJIT_NO_LOGGING + if (name && name[0] != '\0') + vReg->_name.setData(&_dataZone, name, SIZE_MAX); + else + CodeCompiler_assignGenericName(this, vReg); + #endif + + _vRegArray.appendUnsafe(vReg); + return vReg; +} + +Error BaseCompiler::_newReg(BaseReg& out, uint32_t typeId, const char* name) { + RegInfo regInfo; + + Error err = ArchUtils::typeIdToRegInfo(archId(), typeId, regInfo); + if (ASMJIT_UNLIKELY(err)) return reportError(err); + + VirtReg* vReg = newVirtReg(typeId, regInfo.signature(), name); + if (ASMJIT_UNLIKELY(!vReg)) { + out.reset(); + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } + + out._initReg(regInfo.signature(), vReg->id()); + return kErrorOk; +} + +Error BaseCompiler::_newReg(BaseReg& out, uint32_t typeId, const char* fmt, va_list ap) { + StringTmp<256> sb; + sb.appendVFormat(fmt, ap); + return _newReg(out, typeId, sb.data()); +} + +Error BaseCompiler::_newReg(BaseReg& out, const BaseReg& ref, const char* name) { + RegInfo regInfo; + uint32_t typeId; + + if (isVirtRegValid(ref)) { + VirtReg* vRef = virtRegByReg(ref); + typeId = vRef->typeId(); + + // NOTE: It's possible to cast one register type to another if it's the + // same register group. However, VirtReg always contains the TypeId that + // was used to create the register. This means that in some cases we may + // end up having different size of `ref` and `vRef`. In such case we + // adjust the TypeId to match the `ref` register type instead of the + // original register type, which should be the expected behavior. + uint32_t typeSize = Type::sizeOf(typeId); + uint32_t refSize = ref.size(); + + if (typeSize != refSize) { + if (Type::isInt(typeId)) { + // GP register - change TypeId to match `ref`, but keep sign of `vRef`. + switch (refSize) { + case 1: typeId = Type::kIdI8 | (typeId & 1); break; + case 2: typeId = Type::kIdI16 | (typeId & 1); break; + case 4: typeId = Type::kIdI32 | (typeId & 1); break; + case 8: typeId = Type::kIdI64 | (typeId & 1); break; + default: typeId = Type::kIdVoid; break; + } + } + else if (Type::isMmx(typeId)) { + // MMX register - always use 64-bit. + typeId = Type::kIdMmx64; + } + else if (Type::isMask(typeId)) { + // Mask register - change TypeId to match `ref` size. + switch (refSize) { + case 1: typeId = Type::kIdMask8; break; + case 2: typeId = Type::kIdMask16; break; + case 4: typeId = Type::kIdMask32; break; + case 8: typeId = Type::kIdMask64; break; + default: typeId = Type::kIdVoid; break; + } + } + else { + // VEC register - change TypeId to match `ref` size, keep vector metadata. + uint32_t elementTypeId = Type::baseOf(typeId); + + switch (refSize) { + case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break; + case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break; + case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break; + default: typeId = Type::kIdVoid; break; + } + } + + if (typeId == Type::kIdVoid) + return reportError(DebugUtils::errored(kErrorInvalidState)); + } + } + else { + typeId = ref.type(); + } + + Error err = ArchUtils::typeIdToRegInfo(archId(), typeId, regInfo); + if (ASMJIT_UNLIKELY(err)) return reportError(err); + + VirtReg* vReg = newVirtReg(typeId, regInfo.signature(), name); + if (ASMJIT_UNLIKELY(!vReg)) { + out.reset(); + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } + + out._initReg(regInfo.signature(), vReg->id()); + return kErrorOk; +} + +Error BaseCompiler::_newReg(BaseReg& out, const BaseReg& ref, const char* fmt, va_list ap) { + StringTmp<256> sb; + sb.appendVFormat(fmt, ap); + return _newReg(out, ref, sb.data()); +} + +Error BaseCompiler::_newStack(BaseMem& out, uint32_t size, uint32_t alignment, const char* name) { + if (size == 0) + return reportError(DebugUtils::errored(kErrorInvalidArgument)); + + if (alignment == 0) + alignment = 1; + + if (!Support::isPowerOf2(alignment)) + return reportError(DebugUtils::errored(kErrorInvalidArgument)); + + if (alignment > 64) + alignment = 64; + + VirtReg* vReg = newVirtReg(0, 0, name); + if (ASMJIT_UNLIKELY(!vReg)) { + out.reset(); + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + } + + vReg->_virtSize = size; + vReg->_isStack = true; + vReg->_alignment = uint8_t(alignment); + + // Set the memory operand to GPD/GPQ and its id to VirtReg. + out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag }); + return kErrorOk; +} + +Error BaseCompiler::_newConst(BaseMem& out, uint32_t scope, const void* data, size_t size) { + ConstPoolNode** pPool; + if (scope == ConstPool::kScopeLocal) + pPool = &_localConstPool; + else if (scope == ConstPool::kScopeGlobal) + pPool = &_globalConstPool; + else + return reportError(DebugUtils::errored(kErrorInvalidArgument)); + + ConstPoolNode* pool = *pPool; + if (!pool) { + pool = newConstPoolNode(); + if (ASMJIT_UNLIKELY(!pool)) + return reportError(DebugUtils::errored(kErrorOutOfMemory)); + *pPool = pool; + } + + size_t off; + Error err = pool->add(data, size, off); + + if (ASMJIT_UNLIKELY(err)) + return reportError(err); + + out = BaseMem(BaseMem::Decomposed { + Label::kLabelTag, // Base type. + pool->id(), // Base id. + 0, // Index type. + 0, // Index id. + int32_t(off), // Offset. + uint32_t(size), // Size. + 0 // Flags. + }); + return kErrorOk; +} + +void BaseCompiler::rename(BaseReg& reg, const char* fmt, ...) { + if (!reg.isVirtReg()) return; + + VirtReg* vReg = virtRegById(reg.id()); + if (!vReg) return; + + if (fmt && fmt[0] != '\0') { + char buf[128]; + va_list ap; + + va_start(ap, fmt); + vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); + va_end(ap); + + vReg->_name.setData(&_dataZone, buf, SIZE_MAX); + } + else { + CodeCompiler_assignGenericName(this, vReg); + } +} + +// ============================================================================ +// [asmjit::BaseCompiler - Events] +// ============================================================================ + +Error BaseCompiler::onAttach(CodeHolder* code) noexcept { + ASMJIT_PROPAGATE(Base::onAttach(code)); + + Error err = addPassT(); + if (ASMJIT_UNLIKELY(err)) { + onDetach(code); + return err; + } + + return kErrorOk; +} + +Error BaseCompiler::onDetach(CodeHolder* code) noexcept { + _func = nullptr; + _localConstPool = nullptr; + _globalConstPool = nullptr; + + _vRegArray.reset(); + _vRegZone.reset(); + + return Base::onDetach(code); +} + +// ============================================================================ +// [asmjit::FuncPass - Construction / Destruction] +// ============================================================================ + +FuncPass::FuncPass(const char* name) noexcept + : Pass(name) {} + +// ============================================================================ +// [asmjit::FuncPass - Run] +// ============================================================================ + +Error FuncPass::run(Zone* zone, Logger* logger) noexcept { + BaseNode* node = cb()->firstNode(); + if (!node) return kErrorOk; + + do { + if (node->type() == BaseNode::kNodeFunc) { + FuncNode* func = node->as(); + node = func->endNode(); + ASMJIT_PROPAGATE(runOnFunction(zone, logger, func)); + } + + // Find a function by skipping all nodes that are not `kNodeFunc`. + do { + node = node->next(); + } while (node && node->type() != BaseNode::kNodeFunc); + } while (node); + + return kErrorOk; +} + +ASMJIT_END_NAMESPACE + +#endif // !ASMJIT_NO_COMPILER diff --git a/src/asmjit/core/compiler.h b/src/asmjit/core/compiler.h new file mode 100644 index 0000000..293dcf2 --- /dev/null +++ b/src/asmjit/core/compiler.h @@ -0,0 +1,563 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_COMPILER_H +#define _ASMJIT_CORE_COMPILER_H + +#include "../core/build.h" +#ifndef ASMJIT_NO_COMPILER + +#include "../core/assembler.h" +#include "../core/builder.h" +#include "../core/constpool.h" +#include "../core/func.h" +#include "../core/inst.h" +#include "../core/operand.h" +#include "../core/support.h" +#include "../core/zone.h" +#include "../core/zonevector.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [Forward Declarations] +// ============================================================================ + +struct RATiedReg; +class RAWorkReg; + +class FuncNode; +class FuncRetNode; +class FuncCallNode; + +//! \addtogroup asmjit_compiler +//! \{ + +// ============================================================================ +// [asmjit::VirtReg] +// ============================================================================ + +//! Virtual register data (BaseCompiler). +class VirtReg { +public: + ASMJIT_NONCOPYABLE(VirtReg) + + //! Virtual register id. + uint32_t _id; + //! Virtual register info (signature). + RegInfo _info; + //! Virtual register size (can be smaller than `regInfo._size`). + uint32_t _virtSize; + //! Virtual register alignment (for spilling). + uint8_t _alignment; + //! Type-id. + uint8_t _typeId; + //! Virtual register weight for alloc/spill decisions. + uint8_t _weight; + //! True if this is a fixed register, never reallocated. + uint8_t _isFixed : 1; + //! True if the virtual register is only used as a stack (never accessed as register). + uint8_t _isStack : 1; + uint8_t _reserved : 6; + + //! Virtual register name (user provided or automatically generated). + ZoneString<16> _name; + + // ------------------------------------------------------------------------- + // The following members are used exclusively by RAPass. They are initialized + // when the VirtReg is created to NULL pointers and then changed during RAPass + // execution. RAPass sets them back to NULL before it returns. + // ------------------------------------------------------------------------- + + //! Reference to `RAWorkReg`, used during register allocation. + RAWorkReg* _workReg; + + //! \name Construction & Destruction + //! \{ + + inline VirtReg(uint32_t id, uint32_t signature, uint32_t virtSize, uint32_t alignment, uint32_t typeId) noexcept + : _id(id), + _virtSize(virtSize), + _alignment(uint8_t(alignment)), + _typeId(uint8_t(typeId)), + _weight(1), + _isFixed(false), + _isStack(false), + _reserved(0), + _name(), + _workReg(nullptr) { _info._signature = signature; } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the virtual register id. + inline uint32_t id() const noexcept { return _id; } + + //! Returns the virtual register name. + inline const char* name() const noexcept { return _name.data(); } + //! Returns the size of the virtual register name. + inline uint32_t nameSize() const noexcept { return _name.size(); } + + //! Returns a register information that wraps the register signature. + inline const RegInfo& info() const noexcept { return _info; } + //! Returns a virtual register type (maps to the physical register type as well). + inline uint32_t type() const noexcept { return _info.type(); } + //! Returns a virtual register group (maps to the physical register group as well). + inline uint32_t group() const noexcept { return _info.group(); } + + //! Returns a real size of the register this virtual register maps to. + //! + //! For example if this is a 128-bit SIMD register used for a scalar single + //! precision floating point value then its virtSize would be 4, however, the + //! `regSize` would still say 16 (128-bits), because it's the smallest size + //! of that register type. + inline uint32_t regSize() const noexcept { return _info.size(); } + + //! Returns a register signature of this virtual register. + inline uint32_t signature() const noexcept { return _info.signature(); } + + //! Returns the virtual register size. + //! + //! The virtual register size describes how many bytes the virtual register + //! needs to store its content. It can be smaller than the physical register + //! size, see `regSize()`. + inline uint32_t virtSize() const noexcept { return _virtSize; } + + //! Returns the virtual register alignment. + inline uint32_t alignment() const noexcept { return _alignment; } + + //! Returns the virtual register type id, see `Type::Id`. + inline uint32_t typeId() const noexcept { return _typeId; } + + //! Returns the virtual register weight - the register allocator can use it + //! as explicit hint for alloc/spill decisions. + inline uint32_t weight() const noexcept { return _weight; } + //! Sets the virtual register weight (0 to 255) - the register allocator can + //! use it as explicit hint for alloc/spill decisions and initial bin-packing. + inline void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); } + + //! Returns whether the virtual register is always allocated to a fixed + //! physical register (and never reallocated). + //! + //! \note This is only used for special purposes and it's mostly internal. + inline bool isFixed() const noexcept { return bool(_isFixed); } + + //! Returns whether the virtual register is indeed a stack that only uses + //! the virtual register id for making it accessible. + //! + //! \note It's an error if a stack is accessed as a register. + inline bool isStack() const noexcept { return bool(_isStack); } + + inline bool hasWorkReg() const noexcept { return _workReg != nullptr; } + inline RAWorkReg* workReg() const noexcept { return _workReg; } + inline void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; } + inline void resetWorkReg() noexcept { _workReg = nullptr; } + + //! \} +}; + +// ============================================================================ +// [asmjit::BaseCompiler] +// ============================================================================ + +//! Code emitter that uses virtual registers and performs register allocation. +//! +//! Compiler is a high-level code-generation tool that provides register +//! allocation and automatic handling of function calling conventions. It was +//! primarily designed for merging multiple parts of code into a function +//! without worrying about registers and function calling conventions. +//! +//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit +//! code at the same time. +//! +//! BaseCompiler is based on BaseBuilder and contains all the features it +//! provides. It means that the code it stores can be modified (removed, added, +//! injected) and analyzed. When the code is finalized the compiler can emit +//! the code into an Assembler to translate the abstract representation into a +//! machine code. +class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder { +public: + ASMJIT_NONCOPYABLE(BaseCompiler) + typedef BaseBuilder Base; + + //! Current function. + FuncNode* _func; + //! Allocates `VirtReg` objects. + Zone _vRegZone; + //! Stores array of `VirtReg` pointers. + ZoneVector _vRegArray; + + //! Local constant pool, flushed at the end of each function. + ConstPoolNode* _localConstPool; + //! Global constant pool, flushed by `finalize()`. + ConstPoolNode* _globalConstPool; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `BaseCompiler` instance. + ASMJIT_API BaseCompiler() noexcept; + //! Destroys the `BaseCompiler` instance. + ASMJIT_API virtual ~BaseCompiler() noexcept; + + //! \} + + //! \name Function API + //! \{ + + //! Returns the current function. + inline FuncNode* func() const noexcept { return _func; } + + //! Creates a new `FuncNode`. + ASMJIT_API FuncNode* newFunc(const FuncSignature& sign) noexcept; + //! Adds a function `node` to the stream. + ASMJIT_API FuncNode* addFunc(FuncNode* func); + //! Adds a new function. + ASMJIT_API FuncNode* addFunc(const FuncSignature& sign); + //! Emits a sentinel that marks the end of the current function. + ASMJIT_API Error endFunc(); + + //! Sets a function argument at `argIndex` to `reg`. + ASMJIT_API Error setArg(uint32_t argIndex, const BaseReg& reg); + + //! Creates a new `FuncRetNode`. + ASMJIT_API FuncRetNode* newRet(const Operand_& o0, const Operand_& o1) noexcept; + //! Adds a new `FuncRetNode`. + ASMJIT_API FuncRetNode* addRet(const Operand_& o0, const Operand_& o1) noexcept; + + //! \} + + //! \name Function Calls + //! \{ + + //! Creates a new `FuncCallNode`. + ASMJIT_API FuncCallNode* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept; + //! Adds a new `FuncCallNode`. + ASMJIT_API FuncCallNode* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept; + + //! \} + + //! \name Virtual Registers + //! \{ + + //! Creates a new virtual register representing the given `typeId` and `signature`. + ASMJIT_API VirtReg* newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept; + + ASMJIT_API Error _newReg(BaseReg& out, uint32_t typeId, const char* name = nullptr); + ASMJIT_API Error _newReg(BaseReg& out, uint32_t typeId, const char* fmt, va_list ap); + + ASMJIT_API Error _newReg(BaseReg& out, const BaseReg& ref, const char* name = nullptr); + ASMJIT_API Error _newReg(BaseReg& out, const BaseReg& ref, const char* fmt, va_list ap); + + //! Tests whether the given `id` is a valid virtual register id. + inline bool isVirtIdValid(uint32_t id) const noexcept { + uint32_t index = Operand::virtIdToIndex(id); + return index < _vRegArray.size(); + } + //! Tests whether the given `reg` is a virtual register having a valid id. + inline bool isVirtRegValid(const BaseReg& reg) const noexcept { + return isVirtIdValid(reg.id()); + } + + //! Returns `VirtReg` associated with the given `id`. + inline VirtReg* virtRegById(uint32_t id) const noexcept { + ASMJIT_ASSERT(isVirtIdValid(id)); + return _vRegArray[Operand::virtIdToIndex(id)]; + } + //! Returns `VirtReg` associated with the given `reg`. + inline VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); } + //! Returns `VirtReg` associated with the given `index`. + inline VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; } + + //! Returns an array of all virtual registers managed by the Compiler. + inline const ZoneVector& virtRegs() const noexcept { return _vRegArray; } + + //! \name Stack + //! \{ + + ASMJIT_API Error _newStack(BaseMem& out, uint32_t size, uint32_t alignment, const char* name = nullptr); + + //! \} + + //! \name Constants + //! \{ + + ASMJIT_API Error _newConst(BaseMem& out, uint32_t scope, const void* data, size_t size); + + //! \} + + //! \name Miscellaneous + //! \{ + + //! Rename the given virtual register `reg` to a formatted string `fmt`. + //! + //! \note Only new name will appear in the logger. + ASMJIT_API void rename(BaseReg& reg, const char* fmt, ...); + + //! \} + + // TODO: These should be removed + inline void alloc(BaseReg& reg) { ASMJIT_UNUSED(reg); } + inline void spill(BaseReg& reg) { ASMJIT_UNUSED(reg); } + + //! \name Events + //! \{ + + ASMJIT_API Error onAttach(CodeHolder* code) noexcept override; + ASMJIT_API Error onDetach(CodeHolder* code) noexcept override; + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncNode] +// ============================================================================ + +//! Function entry (BaseCompiler). +class FuncNode : public LabelNode { +public: + ASMJIT_NONCOPYABLE(FuncNode) + + //! Function detail. + FuncDetail _funcDetail; + //! Function frame. + FuncFrame _frame; + //! Function exit (label). + LabelNode* _exitNode; + //! Function end (sentinel). + SentinelNode* _end; + //! Arguments array as `VirtReg`. + VirtReg** _args; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `FuncNode` instance. + //! + //! Always use `BaseCompiler::addFunc()` to create `FuncNode`. + ASMJIT_INLINE FuncNode(BaseBuilder* cb) noexcept + : LabelNode(cb), + _funcDetail(), + _frame(), + _exitNode(nullptr), + _end(nullptr), + _args(nullptr) { + setType(kNodeFunc); + } + + //! \} + + //! \{ + //! \name Accessors + + //! Returns function exit `LabelNode`. + inline LabelNode* exitNode() const noexcept { return _exitNode; } + //! Returns function exit label. + inline Label exitLabel() const noexcept { return _exitNode->label(); } + + //! Returns "End of Func" sentinel. + inline SentinelNode* endNode() const noexcept { return _end; } + + //! Returns function declaration. + inline FuncDetail& detail() noexcept { return _funcDetail; } + //! Returns function declaration. + inline const FuncDetail& detail() const noexcept { return _funcDetail; } + + //! Returns function frame. + inline FuncFrame& frame() noexcept { return _frame; } + //! Returns function frame. + inline const FuncFrame& frame() const noexcept { return _frame; } + + //! Returns arguments count. + inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); } + //! Returns returns count. + inline uint32_t retCount() const noexcept { return _funcDetail.retCount(); } + + //! Returns arguments list. + inline VirtReg** args() const noexcept { return _args; } + + //! Returns argument at `i`. + inline VirtReg* arg(uint32_t i) const noexcept { + ASMJIT_ASSERT(i < argCount()); + return _args[i]; + } + + //! Sets argument at `i`. + inline void setArg(uint32_t i, VirtReg* vReg) noexcept { + ASMJIT_ASSERT(i < argCount()); + _args[i] = vReg; + } + + //! Resets argument at `i`. + inline void resetArg(uint32_t i) noexcept { + ASMJIT_ASSERT(i < argCount()); + _args[i] = nullptr; + } + + inline uint32_t attributes() const noexcept { return _frame.attributes(); } + inline void addAttributes(uint32_t attrs) noexcept { _frame.addAttributes(attrs); } + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncRetNode] +// ============================================================================ + +//! Function return (BaseCompiler). +class FuncRetNode : public InstNode { +public: + ASMJIT_NONCOPYABLE(FuncRetNode) + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `FuncRetNode` instance. + inline FuncRetNode(BaseBuilder* cb) noexcept : InstNode(cb, BaseInst::kIdAbstract, 0, 0) { + _any._nodeType = kNodeFuncRet; + } + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncCallNode] +// ============================================================================ + +//! Function call (BaseCompiler). +class FuncCallNode : public InstNode { +public: + ASMJIT_NONCOPYABLE(FuncCallNode) + + //! Function detail. + FuncDetail _funcDetail; + //! Returns. + Operand_ _rets[2]; + //! Arguments. + Operand_* _args; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `FuncCallNode` instance. + inline FuncCallNode(BaseBuilder* cb, uint32_t instId, uint32_t options) noexcept + : InstNode(cb, instId, options, kBaseOpCapacity), + _funcDetail(), + _args(nullptr) { + setType(kNodeFuncCall); + _resetOps(); + _rets[0].reset(); + _rets[1].reset(); + addFlags(kFlagIsRemovable); + } + + //! \} + + //! \name Accessors + //! \{ + + //! Sets the function signature. + inline Error setSignature(const FuncSignature& sign) noexcept { + return _funcDetail.init(sign); + } + + //! Returns the function detail. + inline FuncDetail& detail() noexcept { return _funcDetail; } + //! Returns the function detail. + inline const FuncDetail& detail() const noexcept { return _funcDetail; } + + //! Returns the target operand. + inline Operand& target() noexcept { return _opArray[0].as(); } + //! \overload + inline const Operand& target() const noexcept { return _opArray[0].as(); } + + //! Returns the number of function arguments. + inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); } + //! Returns the number of function return values. + inline uint32_t retCount() const noexcept { return _funcDetail.retCount(); } + + //! Returns the return value at `i`. + inline Operand& ret(uint32_t i = 0) noexcept { + ASMJIT_ASSERT(i < 2); + return _rets[i].as(); + } + //! \overload + inline const Operand& ret(uint32_t i = 0) const noexcept { + ASMJIT_ASSERT(i < 2); + return _rets[i].as(); + } + + //! Returns the function argument at `i`. + inline Operand& arg(uint32_t i) noexcept { + ASMJIT_ASSERT(i < kFuncArgCountLoHi); + return _args[i].as(); + } + //! \overload + inline const Operand& arg(uint32_t i) const noexcept { + ASMJIT_ASSERT(i < kFuncArgCountLoHi); + return _args[i].as(); + } + + //! Sets the function argument at `i` to `op`. + ASMJIT_API bool _setArg(uint32_t i, const Operand_& op) noexcept; + //! Sets the function return value at `i` to `op`. + ASMJIT_API bool _setRet(uint32_t i, const Operand_& op) noexcept; + + //! Sets the function argument at `i` to `reg`. + inline bool setArg(uint32_t i, const BaseReg& reg) noexcept { return _setArg(i, reg); } + //! Sets the function argument at `i` to `imm`. + inline bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); } + + //! Sets the function return value at `i` to `var`. + inline bool setRet(uint32_t i, const BaseReg& reg) noexcept { return _setRet(i, reg); } + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncPass] +// ============================================================================ + +class ASMJIT_VIRTAPI FuncPass : public Pass { +public: + ASMJIT_NONCOPYABLE(FuncPass) + typedef Pass Base; + + //! \name Construction & Destruction + //! \{ + + ASMJIT_API FuncPass(const char* name) noexcept; + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the associated `BaseCompiler`. + inline BaseCompiler* cc() const noexcept { return static_cast(_cb); } + + //! \} + + //! \name Run + //! \{ + + //! Calls `runOnFunction()` on each `FuncNode` node found. + ASMJIT_API Error run(Zone* zone, Logger* logger) noexcept override; + + //! Called once per `FuncNode`. + virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) noexcept = 0; + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // !ASMJIT_NO_COMPILER +#endif // _ASMJIT_CORE_COMPILER_H diff --git a/src/asmjit/core/constpool.cpp b/src/asmjit/core/constpool.cpp new file mode 100644 index 0000000..2c5a513 --- /dev/null +++ b/src/asmjit/core/constpool.cpp @@ -0,0 +1,359 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/constpool.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::ConstPool - Construction / Destruction] +// ============================================================================ + +ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); } +ConstPool::~ConstPool() noexcept {} + +// ============================================================================ +// [asmjit::ConstPool - Reset] +// ============================================================================ + +void ConstPool::reset(Zone* zone) noexcept { + _zone = zone; + + size_t dataSize = 1; + for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { + _tree[i].reset(); + _tree[i].setDataSize(dataSize); + _gaps[i] = nullptr; + dataSize <<= 1; + } + + _gapPool = nullptr; + _size = 0; + _alignment = 0; +} + +// ============================================================================ +// [asmjit::ConstPool - Ops] +// ============================================================================ + +static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept { + ConstPool::Gap* gap = self->_gapPool; + if (!gap) + return self->_zone->allocT(); + + self->_gapPool = gap->_next; + return gap; +} + +static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept { + gap->_next = self->_gapPool; + self->_gapPool = gap; +} + +static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept { + ASMJIT_ASSERT(size > 0); + + while (size > 0) { + size_t gapIndex; + size_t gapSize; + + if (size >= 16 && Support::isAligned(offset, 16)) { + gapIndex = ConstPool::kIndex16; + gapSize = 16; + } + else if (size >= 8 && Support::isAligned(offset, 8)) { + gapIndex = ConstPool::kIndex8; + gapSize = 8; + } + else if (size >= 4 && Support::isAligned(offset, 4)) { + gapIndex = ConstPool::kIndex4; + gapSize = 4; + } + else if (size >= 2 && Support::isAligned(offset, 2)) { + gapIndex = ConstPool::kIndex2; + gapSize = 2; + } + else { + gapIndex = ConstPool::kIndex1; + gapSize = 1; + } + + // We don't have to check for errors here, if this failed nothing really + // happened (just the gap won't be visible) and it will fail again at + // place where the same check would generate `kErrorOutOfMemory` error. + ConstPool::Gap* gap = ConstPool_allocGap(self); + if (!gap) + return; + + gap->_next = self->_gaps[gapIndex]; + self->_gaps[gapIndex] = gap; + + gap->_offset = offset; + gap->_size = gapSize; + + offset += gapSize; + size -= gapSize; + } +} + +Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept { + size_t treeIndex; + + if (size == 32) + treeIndex = kIndex32; + else if (size == 16) + treeIndex = kIndex16; + else if (size == 8) + treeIndex = kIndex8; + else if (size == 4) + treeIndex = kIndex4; + else if (size == 2) + treeIndex = kIndex2; + else if (size == 1) + treeIndex = kIndex1; + else + return DebugUtils::errored(kErrorInvalidArgument); + + ConstPool::Node* node = _tree[treeIndex].get(data); + if (node) { + dstOffset = node->_offset; + return kErrorOk; + } + + // Before incrementing the current offset try if there is a gap that can + // be used for the requested data. + size_t offset = ~size_t(0); + size_t gapIndex = treeIndex; + + while (gapIndex != kIndexCount - 1) { + ConstPool::Gap* gap = _gaps[treeIndex]; + + // Check if there is a gap. + if (gap) { + size_t gapOffset = gap->_offset; + size_t gapSize = gap->_size; + + // Destroy the gap for now. + _gaps[treeIndex] = gap->_next; + ConstPool_freeGap(this, gap); + + offset = gapOffset; + ASMJIT_ASSERT(Support::isAligned(offset, size)); + + gapSize -= size; + if (gapSize > 0) + ConstPool_addGap(this, gapOffset, gapSize); + } + + gapIndex++; + } + + if (offset == ~size_t(0)) { + // Get how many bytes have to be skipped so the address is aligned accordingly + // to the 'size'. + size_t diff = Support::alignUpDiff(_size, size); + + if (diff != 0) { + ConstPool_addGap(this, _size, diff); + _size += diff; + } + + offset = _size; + _size += size; + } + + // Add the initial node to the right index. + node = ConstPool::Tree::_newNode(_zone, data, size, offset, false); + if (!node) return DebugUtils::errored(kErrorOutOfMemory); + + _tree[treeIndex].insert(node); + _alignment = Support::max(_alignment, size); + + dstOffset = offset; + + // Now create a bunch of shared constants that are based on the data pattern. + // We stop at size 4, it probably doesn't make sense to split constants down + // to 1 byte. + size_t pCount = 1; + while (size > 4) { + size >>= 1; + pCount <<= 1; + + ASMJIT_ASSERT(treeIndex != 0); + treeIndex--; + + const uint8_t* pData = static_cast(data); + for (size_t i = 0; i < pCount; i++, pData += size) { + node = _tree[treeIndex].get(pData); + if (node) continue; + + node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true); + _tree[treeIndex].insert(node); + } + } + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::ConstPool - Reset] +// ============================================================================ + +struct ConstPoolFill { + inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept : + _dst(dst), + _dataSize(dataSize) {} + + inline void operator()(const ConstPool::Node* node) noexcept { + if (!node->_shared) + memcpy(_dst + node->_offset, node->data(), _dataSize); + } + + uint8_t* _dst; + size_t _dataSize; +}; + +void ConstPool::fill(void* dst) const noexcept { + // Clears possible gaps, asmjit should never emit garbage to the output. + memset(dst, 0, _size); + + ConstPoolFill filler(static_cast(dst), 1); + for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { + _tree[i].forEach(filler); + filler._dataSize <<= 1; + } +} + +// ============================================================================ +// [asmjit::ConstPool - Unit] +// ============================================================================ + +#if defined(ASMJIT_TEST) +UNIT(asmjit_core_const_pool) { + Zone zone(32384 - Zone::kBlockOverhead); + ConstPool pool(&zone); + + uint32_t i; + uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000; + + INFO("Adding %u constants to the pool.", kCount); + { + size_t prevOffset; + size_t curOffset; + uint64_t c = 0x0101010101010101u; + + EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk); + EXPECT(prevOffset == 0); + + for (i = 1; i < kCount; i++) { + c++; + EXPECT(pool.add(&c, 8, curOffset) == kErrorOk); + EXPECT(prevOffset + 8 == curOffset); + EXPECT(pool.size() == (i + 1) * 8); + prevOffset = curOffset; + } + + EXPECT(pool.alignment() == 8); + } + + INFO("Retrieving %u constants from the pool.", kCount); + { + uint64_t c = 0x0101010101010101u; + + for (i = 0; i < kCount; i++) { + size_t offset; + EXPECT(pool.add(&c, 8, offset) == kErrorOk); + EXPECT(offset == i * 8); + c++; + } + } + + INFO("Checking if the constants were split into 4-byte patterns"); + { + uint32_t c = 0x01010101; + for (i = 0; i < kCount; i++) { + size_t offset; + EXPECT(pool.add(&c, 4, offset) == kErrorOk); + EXPECT(offset == i * 8); + c++; + } + } + + INFO("Adding 2 byte constant to misalign the current offset"); + { + uint16_t c = 0xFFFF; + size_t offset; + + EXPECT(pool.add(&c, 2, offset) == kErrorOk); + EXPECT(offset == kCount * 8); + EXPECT(pool.alignment() == 8); + } + + INFO("Adding 8 byte constant to check if pool gets aligned again"); + { + uint64_t c = 0xFFFFFFFFFFFFFFFFu; + size_t offset; + + EXPECT(pool.add(&c, 8, offset) == kErrorOk); + EXPECT(offset == kCount * 8 + 8); + } + + INFO("Adding 2 byte constant to verify the gap is filled"); + { + uint16_t c = 0xFFFE; + size_t offset; + + EXPECT(pool.add(&c, 2, offset) == kErrorOk); + EXPECT(offset == kCount * 8 + 2); + EXPECT(pool.alignment() == 8); + } + + INFO("Checking reset functionality"); + { + pool.reset(&zone); + zone.reset(); + + EXPECT(pool.size() == 0); + EXPECT(pool.alignment() == 0); + } + + INFO("Checking pool alignment when combined constants are added"); + { + uint8_t bytes[32] = { 0 }; + size_t offset; + + pool.add(bytes, 1, offset); + EXPECT(pool.size() == 1); + EXPECT(pool.alignment() == 1); + EXPECT(offset == 0); + + pool.add(bytes, 2, offset); + EXPECT(pool.size() == 4); + EXPECT(pool.alignment() == 2); + EXPECT(offset == 2); + + pool.add(bytes, 4, offset); + EXPECT(pool.size() == 8); + EXPECT(pool.alignment() == 4); + EXPECT(offset == 4); + + pool.add(bytes, 4, offset); + EXPECT(pool.size() == 8); + EXPECT(pool.alignment() == 4); + EXPECT(offset == 4); + + pool.add(bytes, 32, offset); + EXPECT(pool.size() == 64); + EXPECT(pool.alignment() == 32); + EXPECT(offset == 32); + } +} +#endif + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/constpool.h b/src/asmjit/core/constpool.h new file mode 100644 index 0000000..0ff200e --- /dev/null +++ b/src/asmjit/core/constpool.h @@ -0,0 +1,240 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_CONSTPOOL_H +#define _ASMJIT_CORE_CONSTPOOL_H + +#include "../core/support.h" +#include "../core/zone.h" +#include "../core/zonetree.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [asmjit::ConstPool] +// ============================================================================ + +//! Constant pool. +class ConstPool { +public: + ASMJIT_NONCOPYABLE(ConstPool) + + //! Constant pool scope. + enum Scope : uint32_t { + //! Local constant, always embedded right after the current function. + kScopeLocal = 0, + //! Global constant, embedded at the end of the currently compiled code. + kScopeGlobal = 1 + }; + + //! \cond INTERNAL + + //! Index of a given size in const-pool table. + enum Index : uint32_t { + kIndex1 = 0, + kIndex2 = 1, + kIndex4 = 2, + kIndex8 = 3, + kIndex16 = 4, + kIndex32 = 5, + kIndexCount = 6 + }; + + //! Zone-allocated const-pool gap created by two differently aligned constants. + struct Gap { + Gap* _next; //!< Pointer to the next gap + size_t _offset; //!< Offset of the gap. + size_t _size; //!< Remaining bytes of the gap (basically a gap size). + }; + + //! Zone-allocated const-pool node. + class Node : public ZoneTreeNodeT { + public: + ASMJIT_NONCOPYABLE(Node) + + inline Node(size_t offset, bool shared) noexcept + : ZoneTreeNodeT(), + _shared(shared), + _offset(uint32_t(offset)) {} + + inline void* data() const noexcept { + return static_cast(const_cast(this) + 1); + } + + uint32_t _shared : 1; //!< If this constant is shared with another. + uint32_t _offset; //!< Data offset from the beginning of the pool. + }; + + //! Data comparer used internally. + class Compare { + public: + inline Compare(size_t dataSize) noexcept + : _dataSize(dataSize) {} + + inline int operator()(const Node& a, const Node& b) const noexcept { + return ::memcmp(a.data(), b.data(), _dataSize); + } + + inline int operator()(const Node& a, const void* data) const noexcept { + return ::memcmp(a.data(), data, _dataSize); + } + + size_t _dataSize; + }; + + //! Zone-allocated const-pool tree. + struct Tree { + inline explicit Tree(size_t dataSize = 0) noexcept + : _tree(), + _size(0), + _dataSize(dataSize) {} + + inline void reset() noexcept { + _tree.reset(); + _size = 0; + } + + inline bool empty() const noexcept { return _size == 0; } + inline size_t size() const noexcept { return _size; } + + inline void setDataSize(size_t dataSize) noexcept { + ASMJIT_ASSERT(empty()); + _dataSize = dataSize; + } + + inline Node* get(const void* data) noexcept { + Compare cmp(_dataSize); + return _tree.get(data, cmp); + } + + inline void insert(Node* node) noexcept { + Compare cmp(_dataSize); + _tree.insert(node, cmp); + _size++; + } + + template + inline void forEach(Visitor& visitor) const noexcept { + Node* node = _tree.root(); + if (!node) return; + + Node* stack[Globals::kMaxTreeHeight]; + size_t top = 0; + + for (;;) { + Node* left = node->left(); + if (left != nullptr) { + ASMJIT_ASSERT(top != Globals::kMaxTreeHeight); + stack[top++] = node; + + node = left; + continue; + } + + for (;;) { + visitor(node); + node = node->right(); + + if (node != nullptr) + break; + + if (top == 0) + return; + + node = stack[--top]; + } + } + } + + static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept { + Node* node = zone->allocT(sizeof(Node) + size); + if (ASMJIT_UNLIKELY(!node)) return nullptr; + + node = new(node) Node(offset, shared); + memcpy(node->data(), data, size); + return node; + } + + //! RB tree. + ZoneTree _tree; + //! Size of the tree (number of nodes). + size_t _size; + //! Size of the data. + size_t _dataSize; + }; + + //! \endcond + + //! Zone allocator. + Zone* _zone; + //! Tree per size. + Tree _tree[kIndexCount]; + //! Gaps per size. + Gap* _gaps[kIndexCount]; + //! Gaps pool + Gap* _gapPool; + + //! Size of the pool (in bytes). + size_t _size; + //! Required pool alignment. + size_t _alignment; + + //! \name Construction & Destruction + //! \{ + + ASMJIT_API ConstPool(Zone* zone) noexcept; + ASMJIT_API ~ConstPool() noexcept; + + ASMJIT_API void reset(Zone* zone) noexcept; + + //! \} + + //! \name Accessors + //! \{ + + //! Tests whether the constant-pool is empty. + inline bool empty() const noexcept { return _size == 0; } + //! Returns the size of the constant-pool in bytes. + inline size_t size() const noexcept { return _size; } + //! Returns minimum alignment. + inline size_t alignment() const noexcept { return _alignment; } + + //! \} + + //! \name Utilities + //! \{ + + //! Adds a constant to the constant pool. + //! + //! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes. + //! The constant is added to the pool only if it doesn't not exist, otherwise + //! cached value is returned. + //! + //! AsmJit is able to subdivide added constants, so for example if you add + //! 8-byte constant 0x1122334455667788 it will create the following slots: + //! + //! 8-byte: 0x1122334455667788 + //! 4-byte: 0x11223344, 0x55667788 + //! + //! The reason is that when combining MMX/SSE/AVX code some patterns are used + //! frequently. However, AsmJit is not able to reallocate a constant that has + //! been already added. For example if you try to add 4-byte constant and then + //! 8-byte constant having the same 4-byte pattern as the previous one, two + //! independent slots will be generated by the pool. + ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept; + + //! Fills the destination with the content of this constant pool. + ASMJIT_API void fill(void* dst) const noexcept; +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_CONSTPOOL_H diff --git a/src/asmjit/core/cpuinfo.cpp b/src/asmjit/core/cpuinfo.cpp new file mode 100644 index 0000000..3f41576 --- /dev/null +++ b/src/asmjit/core/cpuinfo.cpp @@ -0,0 +1,81 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/cpuinfo.h" + +#if !defined(_WIN32) + #include + #include + #include +#endif + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::CpuInfo - Detect - CPU NumThreads] +// ============================================================================ + +#if defined(_WIN32) +static inline uint32_t detectHWThreadCount() noexcept { + SYSTEM_INFO info; + ::GetSystemInfo(&info); + return info.dwNumberOfProcessors; +} +#elif defined(_SC_NPROCESSORS_ONLN) +static inline uint32_t detectHWThreadCount() noexcept { + long res = ::sysconf(_SC_NPROCESSORS_ONLN); + return res <= 0 ? uint32_t(1) : uint32_t(res); +} +#else +static inline uint32_t detectHWThreadCount() noexcept { + return 1; +} +#endif + +// ============================================================================ +// [asmjit::CpuInfo - Detect - CPU Features] +// ============================================================================ + +#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86 +namespace x86 { void detectCpu(CpuInfo& cpu) noexcept; } +#endif + +#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM +namespace arm { void detectCpu(CpuInfo& cpu) noexcept; } +#endif + +// ============================================================================ +// [asmjit::CpuInfo - Detect - Static Initializer] +// ============================================================================ + +static uint32_t cpuInfoInitialized; +static CpuInfo cpuInfoGlobal(Globals::NoInit); + +const CpuInfo& CpuInfo::host() noexcept { + // This should never cause a problem as the resulting information should + // always be the same. + if (!cpuInfoInitialized) { + CpuInfo cpuInfoLocal; + + #if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86 + x86::detectCpu(cpuInfoLocal); + #endif + + #if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM + arm::detectCpu(cpuInfoLocal); + #endif + + cpuInfoLocal._hwThreadCount = detectHWThreadCount(); + cpuInfoGlobal = cpuInfoLocal; + cpuInfoInitialized = 1; + } + + return cpuInfoGlobal; +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/cpuinfo.h b/src/asmjit/core/cpuinfo.h new file mode 100644 index 0000000..cf9228f --- /dev/null +++ b/src/asmjit/core/cpuinfo.h @@ -0,0 +1,135 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_CPUINFO_H +#define _ASMJIT_CORE_CPUINFO_H + +#include "../core/arch.h" +#include "../core/features.h" +#include "../core/globals.h" +#include "../core/string.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_support +//! \{ + +// ============================================================================ +// [asmjit::CpuInfo] +// ============================================================================ + +//! CPU information. +class CpuInfo { +public: + //! CPU architecture information. + ArchInfo _archInfo; + //! CPU family ID. + uint32_t _familyId; + //! CPU model ID. + uint32_t _modelId; + //! CPU brand ID. + uint32_t _brandId; + //! CPU stepping. + uint32_t _stepping; + //! Processor type. + uint32_t _processorType; + //! Maximum number of addressable IDs for logical processors. + uint32_t _maxLogicalProcessors; + //! Cache line size (in bytes). + uint32_t _cacheLineSize; + //! Number of hardware threads. + uint32_t _hwThreadCount; + + //! CPU vendor string. + FixedString<16> _vendor; + //! CPU brand string. + FixedString<64> _brand; + //! CPU features. + BaseFeatures _features; + + //! \name Construction & Destruction + //! \{ + + inline CpuInfo() noexcept { reset(); } + inline CpuInfo(const CpuInfo& other) noexcept = default; + + inline explicit CpuInfo(Globals::NoInit_) noexcept + : _archInfo(Globals::NoInit), + _features(Globals::NoInit) {}; + + //! Returns the host CPU information. + ASMJIT_API static const CpuInfo& host() noexcept; + + //! Initializes CpuInfo to the given architecture, see `ArchInfo`. + inline void initArch(uint32_t archId, uint32_t archMode = 0) noexcept { + _archInfo.init(archId, archMode); + } + + inline void reset() noexcept { memset(this, 0, sizeof(*this)); } + + //! \} + + //! \name Overloaded Operators + //! \{ + + inline CpuInfo& operator=(const CpuInfo& other) noexcept = default; + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the CPU architecture information. + inline const ArchInfo& archInfo() const noexcept { return _archInfo; } + //! Returns the CPU architecture id, see `ArchInfo::Id`. + inline uint32_t archId() const noexcept { return _archInfo.archId(); } + //! Returns the CPU architecture sub-id, see `ArchInfo::SubId`. + inline uint32_t archSubId() const noexcept { return _archInfo.archSubId(); } + + //! Returns the CPU family ID. + inline uint32_t familyId() const noexcept { return _familyId; } + //! Returns the CPU model ID. + inline uint32_t modelId() const noexcept { return _modelId; } + //! Returns the CPU brand id. + inline uint32_t brandId() const noexcept { return _brandId; } + //! Returns the CPU stepping. + inline uint32_t stepping() const noexcept { return _stepping; } + //! Returns the processor type. + inline uint32_t processorType() const noexcept { return _processorType; } + //! Returns the number of maximum logical processors. + inline uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; } + + //! Returns the size of a cache line flush. + inline uint32_t cacheLineSize() const noexcept { return _cacheLineSize; } + //! Returns number of hardware threads available. + inline uint32_t hwThreadCount() const noexcept { return _hwThreadCount; } + + //! Returns the CPU vendor. + inline const char* vendor() const noexcept { return _vendor.str; } + //! Tests whether the CPU vendor is equal to `s`. + inline bool isVendor(const char* s) const noexcept { return _vendor.eq(s); } + + //! Returns the CPU brand string. + inline const char* brand() const noexcept { return _brand.str; } + + //! Returns all CPU features as `BaseFeatures`, cast to your arch-specific class + //! if needed. + template + inline const T& features() const noexcept { return _features.as(); } + + //! Tests whether the CPU has the given `feature`. + inline bool hasFeature(uint32_t featureId) const noexcept { return _features.has(featureId); } + //! Adds the given CPU `feature` to the list of this CpuInfo features. + inline CpuInfo& addFeature(uint32_t featureId) noexcept { _features.add(featureId); return *this; } + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_CPUINFO_H diff --git a/src/asmjit/base/simdtypes.h b/src/asmjit/core/datatypes.h similarity index 54% rename from src/asmjit/base/simdtypes.h rename to src/asmjit/core/datatypes.h index 5c1c75a..b262cab 100644 --- a/src/asmjit/base/simdtypes.h +++ b/src/asmjit/core/datatypes.h @@ -1,22 +1,17 @@ // [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. +// Machine Code Generation for C++. // // [License] // Zlib - See LICENSE.md file in the package. -// [Guard] -#ifndef _ASMJIT_BASE_SIMDTYPES_H -#define _ASMJIT_BASE_SIMDTYPES_H +#ifndef _ASMJIT_CORE_DATATYPES_H +#define _ASMJIT_CORE_DATATYPES_H -// [Dependencies] -#include "../base/globals.h" +#include "../core/globals.h" -// [Api-Begin] -#include "../asmjit_apibegin.h" +ASMJIT_BEGIN_NAMESPACE -namespace asmjit { - -//! \addtogroup asmjit_base +//! \addtogroup asmjit_support //! \{ // ============================================================================ @@ -25,248 +20,6 @@ namespace asmjit { //! 64-bit data useful for creating SIMD constants. union Data64 { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Set all eight 8-bit signed integers. - static ASMJIT_INLINE Data64 fromI8(int8_t x0) noexcept { - Data64 self; - self.setI8(x0); - return self; - } - - //! Set all eight 8-bit unsigned integers. - static ASMJIT_INLINE Data64 fromU8(uint8_t x0) noexcept { - Data64 self; - self.setU8(x0); - return self; - } - - //! Set all eight 8-bit signed integers. - static ASMJIT_INLINE Data64 fromI8( - int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept { - - Data64 self; - self.setI8(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } - - //! Set all eight 8-bit unsigned integers. - static ASMJIT_INLINE Data64 fromU8( - uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept { - - Data64 self; - self.setU8(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } - - //! Set all four 16-bit signed integers. - static ASMJIT_INLINE Data64 fromI16(int16_t x0) noexcept { - Data64 self; - self.setI16(x0); - return self; - } - - //! Set all four 16-bit unsigned integers. - static ASMJIT_INLINE Data64 fromU16(uint16_t x0) noexcept { - Data64 self; - self.setU16(x0); - return self; - } - - //! Set all four 16-bit signed integers. - static ASMJIT_INLINE Data64 fromI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept { - Data64 self; - self.setI16(x0, x1, x2, x3); - return self; - } - - //! Set all four 16-bit unsigned integers. - static ASMJIT_INLINE Data64 fromU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept { - Data64 self; - self.setU16(x0, x1, x2, x3); - return self; - } - - //! Set all two 32-bit signed integers. - static ASMJIT_INLINE Data64 fromI32(int32_t x0) noexcept { - Data64 self; - self.setI32(x0); - return self; - } - - //! Set all two 32-bit unsigned integers. - static ASMJIT_INLINE Data64 fromU32(uint32_t x0) noexcept { - Data64 self; - self.setU32(x0); - return self; - } - - //! Set all two 32-bit signed integers. - static ASMJIT_INLINE Data64 fromI32(int32_t x0, int32_t x1) noexcept { - Data64 self; - self.setI32(x0, x1); - return self; - } - - //! Set all two 32-bit unsigned integers. - static ASMJIT_INLINE Data64 fromU32(uint32_t x0, uint32_t x1) noexcept { - Data64 self; - self.setU32(x0, x1); - return self; - } - - //! Set 64-bit signed integer. - static ASMJIT_INLINE Data64 fromI64(int64_t x0) noexcept { - Data64 self; - self.setI64(x0); - return self; - } - - //! Set 64-bit unsigned integer. - static ASMJIT_INLINE Data64 fromU64(uint64_t x0) noexcept { - Data64 self; - self.setU64(x0); - return self; - } - - //! Set all two SP-FP values. - static ASMJIT_INLINE Data64 fromF32(float x0) noexcept { - Data64 self; - self.setF32(x0); - return self; - } - - //! Set all two SP-FP values. - static ASMJIT_INLINE Data64 fromF32(float x0, float x1) noexcept { - Data64 self; - self.setF32(x0, x1); - return self; - } - - //! Set all two SP-FP values. - static ASMJIT_INLINE Data64 fromF64(double x0) noexcept { - Data64 self; - self.setF64(x0); - return self; - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Set all eight 8-bit signed integers. - ASMJIT_INLINE void setI8(int8_t x0) noexcept { - setU8(static_cast(x0)); - } - - //! Set all eight 8-bit unsigned integers. - ASMJIT_INLINE void setU8(uint8_t x0) noexcept { - if (ASMJIT_ARCH_64BIT) { - uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0101010101010101); - uq[0] = xq; - } - else { - uint32_t xd = static_cast(x0) * static_cast(0x01010101U); - ud[0] = xd; - ud[1] = xd; - } - } - - //! Set all eight 8-bit signed integers. - ASMJIT_INLINE void setI8( - int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept { - - sb[0] = x0; sb[1] = x1; sb[2] = x2; sb[3] = x3; - sb[4] = x4; sb[5] = x5; sb[6] = x6; sb[7] = x7; - } - - //! Set all eight 8-bit unsigned integers. - ASMJIT_INLINE void setU8( - uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept { - - ub[0] = x0; ub[1] = x1; ub[2] = x2; ub[3] = x3; - ub[4] = x4; ub[5] = x5; ub[6] = x6; ub[7] = x7; - } - - //! Set all four 16-bit signed integers. - ASMJIT_INLINE void setI16(int16_t x0) noexcept { - setU16(static_cast(x0)); - } - - //! Set all four 16-bit unsigned integers. - ASMJIT_INLINE void setU16(uint16_t x0) noexcept { - if (ASMJIT_ARCH_64BIT) { - uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0001000100010001); - uq[0] = xq; - } - else { - uint32_t xd = static_cast(x0) * static_cast(0x00010001U); - ud[0] = xd; - ud[1] = xd; - } - } - - //! Set all four 16-bit signed integers. - ASMJIT_INLINE void setI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept { - sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3; - } - - //! Set all four 16-bit unsigned integers. - ASMJIT_INLINE void setU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept { - uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3; - } - - //! Set all two 32-bit signed integers. - ASMJIT_INLINE void setI32(int32_t x0) noexcept { - sd[0] = x0; sd[1] = x0; - } - - //! Set all two 32-bit unsigned integers. - ASMJIT_INLINE void setU32(uint32_t x0) noexcept { - ud[0] = x0; ud[1] = x0; - } - - //! Set all two 32-bit signed integers. - ASMJIT_INLINE void setI32(int32_t x0, int32_t x1) noexcept { - sd[0] = x0; sd[1] = x1; - } - - //! Set all two 32-bit unsigned integers. - ASMJIT_INLINE void setU32(uint32_t x0, uint32_t x1) noexcept { - ud[0] = x0; ud[1] = x1; - } - - //! Set 64-bit signed integer. - ASMJIT_INLINE void setI64(int64_t x0) noexcept { - sq[0] = x0; - } - - //! Set 64-bit unsigned integer. - ASMJIT_INLINE void setU64(uint64_t x0) noexcept { - uq[0] = x0; - } - - //! Set all two SP-FP values. - ASMJIT_INLINE void setF32(float x0) noexcept { - sf[0] = x0; sf[1] = x0; - } - - //! Set all two SP-FP values. - ASMJIT_INLINE void setF32(float x0, float x1) noexcept { - sf[0] = x0; sf[1] = x1; - } - - //! Set all two SP-FP values. - ASMJIT_INLINE void setF64(double x0) noexcept { - df[0] = x0; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - //! Array of eight 8-bit signed integers. int8_t sb[8]; //! Array of eight 8-bit unsigned integers. @@ -288,6 +41,244 @@ union Data64 { float sf[2]; //! Array of one DP-FP value. double df[1]; + + //! \name Construction & Destruction + //! \{ + + //! Sets all eight 8-bit signed integers. + static inline Data64 fromI8(int8_t x0) noexcept { + Data64 self; + self.setI8(x0); + return self; + } + + //! Sets all eight 8-bit unsigned integers. + static inline Data64 fromU8(uint8_t x0) noexcept { + Data64 self; + self.setU8(x0); + return self; + } + + //! Sets all eight 8-bit signed integers. + static inline Data64 fromI8( + int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept { + + Data64 self; + self.setI8(x0, x1, x2, x3, x4, x5, x6, x7); + return self; + } + + //! Sets all eight 8-bit unsigned integers. + static inline Data64 fromU8( + uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept { + + Data64 self; + self.setU8(x0, x1, x2, x3, x4, x5, x6, x7); + return self; + } + + //! Sets all four 16-bit signed integers. + static inline Data64 fromI16(int16_t x0) noexcept { + Data64 self; + self.setI16(x0); + return self; + } + + //! Sets all four 16-bit unsigned integers. + static inline Data64 fromU16(uint16_t x0) noexcept { + Data64 self; + self.setU16(x0); + return self; + } + + //! Sets all four 16-bit signed integers. + static inline Data64 fromI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept { + Data64 self; + self.setI16(x0, x1, x2, x3); + return self; + } + + //! Sets all four 16-bit unsigned integers. + static inline Data64 fromU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept { + Data64 self; + self.setU16(x0, x1, x2, x3); + return self; + } + + //! Sets all two 32-bit signed integers. + static inline Data64 fromI32(int32_t x0) noexcept { + Data64 self; + self.setI32(x0); + return self; + } + + //! Sets all two 32-bit unsigned integers. + static inline Data64 fromU32(uint32_t x0) noexcept { + Data64 self; + self.setU32(x0); + return self; + } + + //! Sets all two 32-bit signed integers. + static inline Data64 fromI32(int32_t x0, int32_t x1) noexcept { + Data64 self; + self.setI32(x0, x1); + return self; + } + + //! Sets all two 32-bit unsigned integers. + static inline Data64 fromU32(uint32_t x0, uint32_t x1) noexcept { + Data64 self; + self.setU32(x0, x1); + return self; + } + + //! Sets 64-bit signed integer. + static inline Data64 fromI64(int64_t x0) noexcept { + Data64 self; + self.setI64(x0); + return self; + } + + //! Sets 64-bit unsigned integer. + static inline Data64 fromU64(uint64_t x0) noexcept { + Data64 self; + self.setU64(x0); + return self; + } + + //! Sets all two SP-FP values. + static inline Data64 fromF32(float x0) noexcept { + Data64 self; + self.setF32(x0); + return self; + } + + //! Sets all two SP-FP values. + static inline Data64 fromF32(float x0, float x1) noexcept { + Data64 self; + self.setF32(x0, x1); + return self; + } + + //! Sets all two SP-FP values. + static inline Data64 fromF64(double x0) noexcept { + Data64 self; + self.setF64(x0); + return self; + } + + //! \} + + //! \name Accessors + //! \{ + + //! Sets all eight 8-bit signed integers. + inline void setI8(int8_t x0) noexcept { + setU8(uint8_t(x0)); + } + + //! Sets all eight 8-bit unsigned integers. + inline void setU8(uint8_t x0) noexcept { + if (ASMJIT_ARCH_BITS >= 64) { + uint64_t xq = uint64_t(x0) * 0x0101010101010101u; + uq[0] = xq; + } + else { + uint32_t xd = uint32_t(x0) * 0x01010101u; + ud[0] = xd; + ud[1] = xd; + } + } + + //! Sets all eight 8-bit signed integers. + inline void setI8( + int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept { + + sb[0] = x0; sb[1] = x1; sb[2] = x2; sb[3] = x3; + sb[4] = x4; sb[5] = x5; sb[6] = x6; sb[7] = x7; + } + + //! Sets all eight 8-bit unsigned integers. + inline void setU8( + uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept { + + ub[0] = x0; ub[1] = x1; ub[2] = x2; ub[3] = x3; + ub[4] = x4; ub[5] = x5; ub[6] = x6; ub[7] = x7; + } + + //! Sets all four 16-bit signed integers. + inline void setI16(int16_t x0) noexcept { + setU16(uint16_t(x0)); + } + + //! Sets all four 16-bit unsigned integers. + inline void setU16(uint16_t x0) noexcept { + if (ASMJIT_ARCH_BITS >= 64) { + uint64_t xq = uint64_t(x0) * 0x0001000100010001u; + uq[0] = xq; + } + else { + uint32_t xd = uint32_t(x0) * 0x00010001u; + ud[0] = xd; + ud[1] = xd; + } + } + + //! Sets all four 16-bit signed integers. + inline void setI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept { + sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3; + } + + //! Sets all four 16-bit unsigned integers. + inline void setU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept { + uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3; + } + + //! Sets all two 32-bit signed integers. + inline void setI32(int32_t x0) noexcept { + sd[0] = x0; sd[1] = x0; + } + + //! Sets all two 32-bit unsigned integers. + inline void setU32(uint32_t x0) noexcept { + ud[0] = x0; ud[1] = x0; + } + + //! Sets all two 32-bit signed integers. + inline void setI32(int32_t x0, int32_t x1) noexcept { + sd[0] = x0; sd[1] = x1; + } + + //! Sets all two 32-bit unsigned integers. + inline void setU32(uint32_t x0, uint32_t x1) noexcept { + ud[0] = x0; ud[1] = x1; + } + + //! Sets 64-bit signed integer. + inline void setI64(int64_t x0) noexcept { + sq[0] = x0; + } + + //! Sets 64-bit unsigned integer. + inline void setU64(uint64_t x0) noexcept { + uq[0] = x0; + } + + //! Sets all two SP-FP values. + inline void setF32(float x0) noexcept { + sf[0] = x0; sf[1] = x0; + } + + //! Sets all two SP-FP values. + inline void setF32(float x0, float x1) noexcept { + sf[0] = x0; sf[1] = x1; + } + + //! Sets all two SP-FP values. + inline void setF64(double x0) noexcept { + df[0] = x0; + } }; // ============================================================================ @@ -296,326 +287,6 @@ union Data64 { //! 128-bit data useful for creating SIMD constants. union Data128 { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Set all sixteen 8-bit signed integers. - static ASMJIT_INLINE Data128 fromI8(int8_t x0) noexcept { - Data128 self; - self.setI8(x0); - return self; - } - - //! Set all sixteen 8-bit unsigned integers. - static ASMJIT_INLINE Data128 fromU8(uint8_t x0) noexcept { - Data128 self; - self.setU8(x0); - return self; - } - - //! Set all sixteen 8-bit signed integers. - static ASMJIT_INLINE Data128 fromI8( - int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , - int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , - int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, - int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept { - - Data128 self; - self.setI8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); - return self; - } - - //! Set all sixteen 8-bit unsigned integers. - static ASMJIT_INLINE Data128 fromU8( - uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , - uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , - uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, - uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept { - - Data128 self; - self.setU8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); - return self; - } - - //! Set all eight 16-bit signed integers. - static ASMJIT_INLINE Data128 fromI16(int16_t x0) noexcept { - Data128 self; - self.setI16(x0); - return self; - } - - //! Set all eight 16-bit unsigned integers. - static ASMJIT_INLINE Data128 fromU16(uint16_t x0) noexcept { - Data128 self; - self.setU16(x0); - return self; - } - - //! Set all eight 16-bit signed integers. - static ASMJIT_INLINE Data128 fromI16( - int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept { - - Data128 self; - self.setI16(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } - - //! Set all eight 16-bit unsigned integers. - static ASMJIT_INLINE Data128 fromU16( - uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept { - - Data128 self; - self.setU16(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } - - //! Set all four 32-bit signed integers. - static ASMJIT_INLINE Data128 fromI32(int32_t x0) noexcept { - Data128 self; - self.setI32(x0); - return self; - } - - //! Set all four 32-bit unsigned integers. - static ASMJIT_INLINE Data128 fromU32(uint32_t x0) noexcept { - Data128 self; - self.setU32(x0); - return self; - } - - //! Set all four 32-bit signed integers. - static ASMJIT_INLINE Data128 fromI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept { - Data128 self; - self.setI32(x0, x1, x2, x3); - return self; - } - - //! Set all four 32-bit unsigned integers. - static ASMJIT_INLINE Data128 fromU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept { - Data128 self; - self.setU32(x0, x1, x2, x3); - return self; - } - - //! Set all two 64-bit signed integers. - static ASMJIT_INLINE Data128 fromI64(int64_t x0) noexcept { - Data128 self; - self.setI64(x0); - return self; - } - - //! Set all two 64-bit unsigned integers. - static ASMJIT_INLINE Data128 fromU64(uint64_t x0) noexcept { - Data128 self; - self.setU64(x0); - return self; - } - - //! Set all two 64-bit signed integers. - static ASMJIT_INLINE Data128 fromI64(int64_t x0, int64_t x1) noexcept { - Data128 self; - self.setI64(x0, x1); - return self; - } - - //! Set all two 64-bit unsigned integers. - static ASMJIT_INLINE Data128 fromU64(uint64_t x0, uint64_t x1) noexcept { - Data128 self; - self.setU64(x0, x1); - return self; - } - - //! Set all four SP-FP floats. - static ASMJIT_INLINE Data128 fromF32(float x0) noexcept { - Data128 self; - self.setF32(x0); - return self; - } - - //! Set all four SP-FP floats. - static ASMJIT_INLINE Data128 fromF32(float x0, float x1, float x2, float x3) noexcept { - Data128 self; - self.setF32(x0, x1, x2, x3); - return self; - } - - //! Set all two DP-FP floats. - static ASMJIT_INLINE Data128 fromF64(double x0) noexcept { - Data128 self; - self.setF64(x0); - return self; - } - - //! Set all two DP-FP floats. - static ASMJIT_INLINE Data128 fromF64(double x0, double x1) noexcept { - Data128 self; - self.setF64(x0, x1); - return self; - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Set all sixteen 8-bit signed integers. - ASMJIT_INLINE void setI8(int8_t x0) noexcept { - setU8(static_cast(x0)); - } - - //! Set all sixteen 8-bit unsigned integers. - ASMJIT_INLINE void setU8(uint8_t x0) noexcept { - if (ASMJIT_ARCH_64BIT) { - uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0101010101010101); - uq[0] = xq; - uq[1] = xq; - } - else { - uint32_t xd = static_cast(x0) * static_cast(0x01010101U); - ud[0] = xd; - ud[1] = xd; - ud[2] = xd; - ud[3] = xd; - } - } - - //! Set all sixteen 8-bit signed integers. - ASMJIT_INLINE void setI8( - int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , - int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , - int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, - int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept { - - sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ; - sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ; - sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11; - sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15; - } - - //! Set all sixteen 8-bit unsigned integers. - ASMJIT_INLINE void setU8( - uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , - uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , - uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, - uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept { - - ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ; - ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ; - ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11; - ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15; - } - - //! Set all eight 16-bit signed integers. - ASMJIT_INLINE void setI16(int16_t x0) noexcept { - setU16(static_cast(x0)); - } - - //! Set all eight 16-bit unsigned integers. - ASMJIT_INLINE void setU16(uint16_t x0) noexcept { - if (ASMJIT_ARCH_64BIT) { - uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0001000100010001); - uq[0] = xq; - uq[1] = xq; - } - else { - uint32_t xd = static_cast(x0) * static_cast(0x00010001U); - ud[0] = xd; - ud[1] = xd; - ud[2] = xd; - ud[3] = xd; - } - } - - //! Set all eight 16-bit signed integers. - ASMJIT_INLINE void setI16( - int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept { - - sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3; - sw[4] = x4; sw[5] = x5; sw[6] = x6; sw[7] = x7; - } - - //! Set all eight 16-bit unsigned integers. - ASMJIT_INLINE void setU16( - uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept { - - uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3; - uw[4] = x4; uw[5] = x5; uw[6] = x6; uw[7] = x7; - } - - //! Set all four 32-bit signed integers. - ASMJIT_INLINE void setI32(int32_t x0) noexcept { - setU32(static_cast(x0)); - } - - //! Set all four 32-bit unsigned integers. - ASMJIT_INLINE void setU32(uint32_t x0) noexcept { - if (ASMJIT_ARCH_64BIT) { - uint64_t t = (static_cast(x0) << 32) + x0; - uq[0] = t; - uq[1] = t; - } - else { - ud[0] = x0; - ud[1] = x0; - ud[2] = x0; - ud[3] = x0; - } - } - - //! Set all four 32-bit signed integers. - ASMJIT_INLINE void setI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept { - sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3; - } - - //! Set all four 32-bit unsigned integers. - ASMJIT_INLINE void setU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept { - ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3; - } - - //! Set all two 64-bit signed integers. - ASMJIT_INLINE void setI64(int64_t x0) noexcept { - sq[0] = x0; sq[1] = x0; - } - - //! Set all two 64-bit unsigned integers. - ASMJIT_INLINE void setU64(uint64_t x0) noexcept { - uq[0] = x0; uq[1] = x0; - } - - //! Set all two 64-bit signed integers. - ASMJIT_INLINE void setI64(int64_t x0, int64_t x1) noexcept { - sq[0] = x0; sq[1] = x1; - } - - //! Set all two 64-bit unsigned integers. - ASMJIT_INLINE void setU64(uint64_t x0, uint64_t x1) noexcept { - uq[0] = x0; uq[1] = x1; - } - - //! Set all four SP-FP floats. - ASMJIT_INLINE void setF32(float x0) noexcept { - sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0; - } - - //! Set all four SP-FP floats. - ASMJIT_INLINE void setF32(float x0, float x1, float x2, float x3) noexcept { - sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3; - } - - //! Set all two DP-FP floats. - ASMJIT_INLINE void setF64(double x0) noexcept { - df[0] = x0; df[1] = x0; - } - - //! Set all two DP-FP floats. - ASMJIT_INLINE void setF64(double x0, double x1) noexcept { - df[0] = x0; df[1] = x1; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - //! Array of sixteen 8-bit signed integers. int8_t sb[16]; //! Array of sixteen 8-bit unsigned integers. @@ -637,6 +308,322 @@ union Data128 { float sf[4]; //! Array of two 64-bit double precision floating points. double df[2]; + + //! \name Construction & Destruction + //! \{ + + //! Sets all sixteen 8-bit signed integers. + static inline Data128 fromI8(int8_t x0) noexcept { + Data128 self; + self.setI8(x0); + return self; + } + + //! Sets all sixteen 8-bit unsigned integers. + static inline Data128 fromU8(uint8_t x0) noexcept { + Data128 self; + self.setU8(x0); + return self; + } + + //! Sets all sixteen 8-bit signed integers. + static inline Data128 fromI8( + int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , + int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , + int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, + int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept { + + Data128 self; + self.setI8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + return self; + } + + //! Sets all sixteen 8-bit unsigned integers. + static inline Data128 fromU8( + uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , + uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , + uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, + uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept { + + Data128 self; + self.setU8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + return self; + } + + //! Sets all eight 16-bit signed integers. + static inline Data128 fromI16(int16_t x0) noexcept { + Data128 self; + self.setI16(x0); + return self; + } + + //! Sets all eight 16-bit unsigned integers. + static inline Data128 fromU16(uint16_t x0) noexcept { + Data128 self; + self.setU16(x0); + return self; + } + + //! Sets all eight 16-bit signed integers. + static inline Data128 fromI16( + int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept { + + Data128 self; + self.setI16(x0, x1, x2, x3, x4, x5, x6, x7); + return self; + } + + //! Sets all eight 16-bit unsigned integers. + static inline Data128 fromU16( + uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept { + + Data128 self; + self.setU16(x0, x1, x2, x3, x4, x5, x6, x7); + return self; + } + + //! Sets all four 32-bit signed integers. + static inline Data128 fromI32(int32_t x0) noexcept { + Data128 self; + self.setI32(x0); + return self; + } + + //! Sets all four 32-bit unsigned integers. + static inline Data128 fromU32(uint32_t x0) noexcept { + Data128 self; + self.setU32(x0); + return self; + } + + //! Sets all four 32-bit signed integers. + static inline Data128 fromI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept { + Data128 self; + self.setI32(x0, x1, x2, x3); + return self; + } + + //! Sets all four 32-bit unsigned integers. + static inline Data128 fromU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept { + Data128 self; + self.setU32(x0, x1, x2, x3); + return self; + } + + //! Sets all two 64-bit signed integers. + static inline Data128 fromI64(int64_t x0) noexcept { + Data128 self; + self.setI64(x0); + return self; + } + + //! Sets all two 64-bit unsigned integers. + static inline Data128 fromU64(uint64_t x0) noexcept { + Data128 self; + self.setU64(x0); + return self; + } + + //! Sets all two 64-bit signed integers. + static inline Data128 fromI64(int64_t x0, int64_t x1) noexcept { + Data128 self; + self.setI64(x0, x1); + return self; + } + + //! Sets all two 64-bit unsigned integers. + static inline Data128 fromU64(uint64_t x0, uint64_t x1) noexcept { + Data128 self; + self.setU64(x0, x1); + return self; + } + + //! Sets all four SP-FP floats. + static inline Data128 fromF32(float x0) noexcept { + Data128 self; + self.setF32(x0); + return self; + } + + //! Sets all four SP-FP floats. + static inline Data128 fromF32(float x0, float x1, float x2, float x3) noexcept { + Data128 self; + self.setF32(x0, x1, x2, x3); + return self; + } + + //! Sets all two DP-FP floats. + static inline Data128 fromF64(double x0) noexcept { + Data128 self; + self.setF64(x0); + return self; + } + + //! Sets all two DP-FP floats. + static inline Data128 fromF64(double x0, double x1) noexcept { + Data128 self; + self.setF64(x0, x1); + return self; + } + + //! \} + + //! \name Accessors + //! \{ + + //! Sets all sixteen 8-bit signed integers. + inline void setI8(int8_t x0) noexcept { + setU8(uint8_t(x0)); + } + + //! Sets all sixteen 8-bit unsigned integers. + inline void setU8(uint8_t x0) noexcept { + if (ASMJIT_ARCH_BITS >= 64) { + uint64_t xq = uint64_t(x0) * 0x0101010101010101u; + uq[0] = xq; + uq[1] = xq; + } + else { + uint32_t xd = uint32_t(x0) * 0x01010101u; + ud[0] = xd; + ud[1] = xd; + ud[2] = xd; + ud[3] = xd; + } + } + + //! Sets all sixteen 8-bit signed integers. + inline void setI8( + int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , + int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , + int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, + int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept { + + sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ; + sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ; + sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11; + sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15; + } + + //! Sets all sixteen 8-bit unsigned integers. + inline void setU8( + uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , + uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , + uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, + uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept { + + ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ; + ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ; + ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11; + ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15; + } + + //! Sets all eight 16-bit signed integers. + inline void setI16(int16_t x0) noexcept { + setU16(uint16_t(x0)); + } + + //! Sets all eight 16-bit unsigned integers. + inline void setU16(uint16_t x0) noexcept { + if (ASMJIT_ARCH_BITS >= 64) { + uint64_t xq = uint64_t(x0) * 0x0001000100010001u; + uq[0] = xq; + uq[1] = xq; + } + else { + uint32_t xd = uint32_t(x0) * 0x00010001u; + ud[0] = xd; + ud[1] = xd; + ud[2] = xd; + ud[3] = xd; + } + } + + //! Sets all eight 16-bit signed integers. + inline void setI16( + int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept { + + sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3; + sw[4] = x4; sw[5] = x5; sw[6] = x6; sw[7] = x7; + } + + //! Sets all eight 16-bit unsigned integers. + inline void setU16( + uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept { + + uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3; + uw[4] = x4; uw[5] = x5; uw[6] = x6; uw[7] = x7; + } + + //! Sets all four 32-bit signed integers. + inline void setI32(int32_t x0) noexcept { + setU32(uint32_t(x0)); + } + + //! Sets all four 32-bit unsigned integers. + inline void setU32(uint32_t x0) noexcept { + if (ASMJIT_ARCH_BITS >= 64) { + uint64_t t = (uint64_t(x0) << 32) + x0; + uq[0] = t; + uq[1] = t; + } + else { + ud[0] = x0; + ud[1] = x0; + ud[2] = x0; + ud[3] = x0; + } + } + + //! Sets all four 32-bit signed integers. + inline void setI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept { + sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3; + } + + //! Sets all four 32-bit unsigned integers. + inline void setU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept { + ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3; + } + + //! Sets all two 64-bit signed integers. + inline void setI64(int64_t x0) noexcept { + sq[0] = x0; sq[1] = x0; + } + + //! Sets all two 64-bit unsigned integers. + inline void setU64(uint64_t x0) noexcept { + uq[0] = x0; uq[1] = x0; + } + + //! Sets all two 64-bit signed integers. + inline void setI64(int64_t x0, int64_t x1) noexcept { + sq[0] = x0; sq[1] = x1; + } + + //! Sets all two 64-bit unsigned integers. + inline void setU64(uint64_t x0, uint64_t x1) noexcept { + uq[0] = x0; uq[1] = x1; + } + + //! Sets all four SP-FP floats. + inline void setF32(float x0) noexcept { + sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0; + } + + //! Sets all four SP-FP floats. + inline void setF32(float x0, float x1, float x2, float x3) noexcept { + sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3; + } + + //! Sets all two DP-FP floats. + inline void setF64(double x0) noexcept { + df[0] = x0; df[1] = x0; + } + + //! Sets all two DP-FP floats. + inline void setF64(double x0, double x1) noexcept { + df[0] = x0; df[1] = x1; + } }; // ============================================================================ @@ -645,402 +632,6 @@ union Data128 { //! 256-bit data useful for creating SIMD constants. union Data256 { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Set all thirty two 8-bit signed integers. - static ASMJIT_INLINE Data256 fromI8(int8_t x0) noexcept { - Data256 self; - self.setI8(x0); - return self; - } - - //! Set all thirty two 8-bit unsigned integers. - static ASMJIT_INLINE Data256 fromU8(uint8_t x0) noexcept { - Data256 self; - self.setU8(x0); - return self; - } - - //! Set all thirty two 8-bit signed integers. - static ASMJIT_INLINE Data256 fromI8( - int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , - int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , - int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, - int8_t x12, int8_t x13, int8_t x14, int8_t x15, - int8_t x16, int8_t x17, int8_t x18, int8_t x19, - int8_t x20, int8_t x21, int8_t x22, int8_t x23, - int8_t x24, int8_t x25, int8_t x26, int8_t x27, - int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept { - - Data256 self; - self.setI8( - x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15, - x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31); - return self; - } - - //! Set all thirty two 8-bit unsigned integers. - static ASMJIT_INLINE Data256 fromU8( - uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , - uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , - uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, - uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15, - uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19, - uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23, - uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27, - uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept { - - Data256 self; - self.setU8( - x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15, - x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31); - return self; - } - - //! Set all sixteen 16-bit signed integers. - static ASMJIT_INLINE Data256 fromI16(int16_t x0) noexcept { - Data256 self; - self.setI16(x0); - return self; - } - - //! Set all sixteen 16-bit unsigned integers. - static ASMJIT_INLINE Data256 fromU16(uint16_t x0) noexcept { - Data256 self; - self.setU16(x0); - return self; - } - - //! Set all sixteen 16-bit signed integers. - static ASMJIT_INLINE Data256 fromI16( - int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7 , - int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept { - - Data256 self; - self.setI16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); - return self; - } - - //! Set all sixteen 16-bit unsigned integers. - static ASMJIT_INLINE Data256 fromU16( - uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7 , - uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept { - - Data256 self; - self.setU16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); - return self; - } - - //! Set all eight 32-bit signed integers. - static ASMJIT_INLINE Data256 fromI32(int32_t x0) noexcept { - Data256 self; - self.setI32(x0); - return self; - } - - //! Set all eight 32-bit unsigned integers. - static ASMJIT_INLINE Data256 fromU32(uint32_t x0) noexcept { - Data256 self; - self.setU32(x0); - return self; - } - - //! Set all eight 32-bit signed integers. - static ASMJIT_INLINE Data256 fromI32( - int32_t x0, int32_t x1, int32_t x2, int32_t x3, - int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept { - - Data256 self; - self.setI32(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } - - //! Set all eight 32-bit unsigned integers. - static ASMJIT_INLINE Data256 fromU32( - uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, - uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept { - - Data256 self; - self.setU32(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } - - //! Set all four 64-bit signed integers. - static ASMJIT_INLINE Data256 fromI64(int64_t x0) noexcept { - Data256 self; - self.setI64(x0); - return self; - } - - //! Set all four 64-bit unsigned integers. - static ASMJIT_INLINE Data256 fromU64(uint64_t x0) noexcept { - Data256 self; - self.setU64(x0); - return self; - } - - //! Set all four 64-bit signed integers. - static ASMJIT_INLINE Data256 fromI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept { - Data256 self; - self.setI64(x0, x1, x2, x3); - return self; - } - - //! Set all four 64-bit unsigned integers. - static ASMJIT_INLINE Data256 fromU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept { - Data256 self; - self.setU64(x0, x1, x2, x3); - return self; - } - - //! Set all eight SP-FP floats. - static ASMJIT_INLINE Data256 fromF32(float x0) noexcept { - Data256 self; - self.setF32(x0); - return self; - } - - //! Set all eight SP-FP floats. - static ASMJIT_INLINE Data256 fromF32( - float x0, float x1, float x2, float x3, - float x4, float x5, float x6, float x7) noexcept { - - Data256 self; - self.setF32(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } - - //! Set all four DP-FP floats. - static ASMJIT_INLINE Data256 fromF64(double x0) noexcept { - Data256 self; - self.setF64(x0); - return self; - } - - //! Set all four DP-FP floats. - static ASMJIT_INLINE Data256 fromF64(double x0, double x1, double x2, double x3) noexcept { - Data256 self; - self.setF64(x0, x1, x2, x3); - return self; - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Set all thirty two 8-bit signed integers. - ASMJIT_INLINE void setI8(int8_t x0) noexcept { - setU8(static_cast(x0)); - } - - //! Set all thirty two 8-bit unsigned integers. - ASMJIT_INLINE void setU8(uint8_t x0) noexcept { - if (ASMJIT_ARCH_64BIT) { - uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0101010101010101); - uq[0] = xq; - uq[1] = xq; - uq[2] = xq; - uq[3] = xq; - } - else { - uint32_t xd = static_cast(x0) * static_cast(0x01010101U); - ud[0] = xd; - ud[1] = xd; - ud[2] = xd; - ud[3] = xd; - ud[4] = xd; - ud[5] = xd; - ud[6] = xd; - ud[7] = xd; - } - } - - //! Set all thirty two 8-bit signed integers. - ASMJIT_INLINE void setI8( - int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , - int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , - int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, - int8_t x12, int8_t x13, int8_t x14, int8_t x15, - int8_t x16, int8_t x17, int8_t x18, int8_t x19, - int8_t x20, int8_t x21, int8_t x22, int8_t x23, - int8_t x24, int8_t x25, int8_t x26, int8_t x27, - int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept { - - sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ; - sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ; - sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11; - sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15; - sb[16] = x16; sb[17] = x17; sb[18] = x18; sb[19] = x19; - sb[20] = x20; sb[21] = x21; sb[22] = x22; sb[23] = x23; - sb[24] = x24; sb[25] = x25; sb[26] = x26; sb[27] = x27; - sb[28] = x28; sb[29] = x29; sb[30] = x30; sb[31] = x31; - } - - //! Set all thirty two 8-bit unsigned integers. - ASMJIT_INLINE void setU8( - uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , - uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , - uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, - uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15, - uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19, - uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23, - uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27, - uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept { - - ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ; - ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ; - ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11; - ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15; - ub[16] = x16; ub[17] = x17; ub[18] = x18; ub[19] = x19; - ub[20] = x20; ub[21] = x21; ub[22] = x22; ub[23] = x23; - ub[24] = x24; ub[25] = x25; ub[26] = x26; ub[27] = x27; - ub[28] = x28; ub[29] = x29; ub[30] = x30; ub[31] = x31; - } - - //! Set all sixteen 16-bit signed integers. - ASMJIT_INLINE void setI16(int16_t x0) noexcept { - setU16(static_cast(x0)); - } - - //! Set all eight 16-bit unsigned integers. - ASMJIT_INLINE void setU16(uint16_t x0) noexcept { - if (ASMJIT_ARCH_64BIT) { - uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0001000100010001); - uq[0] = xq; - uq[1] = xq; - uq[2] = xq; - uq[3] = xq; - } - else { - uint32_t xd = static_cast(x0) * static_cast(0x00010001U); - ud[0] = xd; - ud[1] = xd; - ud[2] = xd; - ud[3] = xd; - ud[4] = xd; - ud[5] = xd; - ud[6] = xd; - ud[7] = xd; - } - } - - //! Set all sixteen 16-bit signed integers. - ASMJIT_INLINE void setI16( - int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7, - int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept { - - sw[0 ] = x0 ; sw[1 ] = x1 ; sw[2 ] = x2 ; sw[3 ] = x3 ; - sw[4 ] = x4 ; sw[5 ] = x5 ; sw[6 ] = x6 ; sw[7 ] = x7 ; - sw[8 ] = x8 ; sw[9 ] = x9 ; sw[10] = x10; sw[11] = x11; - sw[12] = x12; sw[13] = x13; sw[14] = x14; sw[15] = x15; - } - - //! Set all sixteen 16-bit unsigned integers. - ASMJIT_INLINE void setU16( - uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7, - uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept { - - uw[0 ] = x0 ; uw[1 ] = x1 ; uw[2 ] = x2 ; uw[3 ] = x3 ; - uw[4 ] = x4 ; uw[5 ] = x5 ; uw[6 ] = x6 ; uw[7 ] = x7 ; - uw[8 ] = x8 ; uw[9 ] = x9 ; uw[10] = x10; uw[11] = x11; - uw[12] = x12; uw[13] = x13; uw[14] = x14; uw[15] = x15; - } - - //! Set all eight 32-bit signed integers. - ASMJIT_INLINE void setI32(int32_t x0) noexcept { - setU32(static_cast(x0)); - } - - //! Set all eight 32-bit unsigned integers. - ASMJIT_INLINE void setU32(uint32_t x0) noexcept { - if (ASMJIT_ARCH_64BIT) { - uint64_t xq = (static_cast(x0) << 32) + x0; - uq[0] = xq; - uq[1] = xq; - uq[2] = xq; - uq[3] = xq; - } - else { - ud[0] = x0; - ud[1] = x0; - ud[2] = x0; - ud[3] = x0; - ud[4] = x0; - ud[5] = x0; - ud[6] = x0; - ud[7] = x0; - } - } - - //! Set all eight 32-bit signed integers. - ASMJIT_INLINE void setI32( - int32_t x0, int32_t x1, int32_t x2, int32_t x3, - int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept { - - sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3; - sd[4] = x4; sd[5] = x5; sd[6] = x6; sd[7] = x7; - } - - //! Set all eight 32-bit unsigned integers. - ASMJIT_INLINE void setU32( - uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, - uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept { - - ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3; - ud[4] = x4; ud[5] = x5; ud[6] = x6; ud[7] = x7; - } - - //! Set all four 64-bit signed integers. - ASMJIT_INLINE void setI64(int64_t x0) noexcept { - sq[0] = x0; sq[1] = x0; sq[2] = x0; sq[3] = x0; - } - - //! Set all four 64-bit unsigned integers. - ASMJIT_INLINE void setU64(uint64_t x0) noexcept { - uq[0] = x0; uq[1] = x0; uq[2] = x0; uq[3] = x0; - } - - //! Set all four 64-bit signed integers. - ASMJIT_INLINE void setI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept { - sq[0] = x0; sq[1] = x1; sq[2] = x2; sq[3] = x3; - } - - //! Set all four 64-bit unsigned integers. - ASMJIT_INLINE void setU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept { - uq[0] = x0; uq[1] = x1; uq[2] = x2; uq[3] = x3; - } - - //! Set all eight SP-FP floats. - ASMJIT_INLINE void setF32(float x0) noexcept { - sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0; - sf[4] = x0; sf[5] = x0; sf[6] = x0; sf[7] = x0; - } - - //! Set all eight SP-FP floats. - ASMJIT_INLINE void setF32( - float x0, float x1, float x2, float x3, - float x4, float x5, float x6, float x7) noexcept { - - sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3; - sf[4] = x4; sf[5] = x5; sf[6] = x6; sf[7] = x7; - } - - //! Set all four DP-FP floats. - ASMJIT_INLINE void setF64(double x0) noexcept { - df[0] = x0; df[1] = x0; df[2] = x0; df[3] = x0; - } - - //! Set all four DP-FP floats. - ASMJIT_INLINE void setF64(double x0, double x1, double x2, double x3) noexcept { - df[0] = x0; df[1] = x1; df[2] = x2; df[3] = x3; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - //! Array of thirty two 8-bit signed integers. int8_t sb[32]; //! Array of thirty two 8-bit unsigned integers. @@ -1062,14 +653,404 @@ union Data256 { float sf[8]; //! Array of four 64-bit double precision floating points. double df[4]; + + //! \name Construction & Destruction + //! \{ + + //! Sets all thirty two 8-bit signed integers. + static inline Data256 fromI8(int8_t x0) noexcept { + Data256 self; + self.setI8(x0); + return self; + } + + //! Sets all thirty two 8-bit unsigned integers. + static inline Data256 fromU8(uint8_t x0) noexcept { + Data256 self; + self.setU8(x0); + return self; + } + + //! Sets all thirty two 8-bit signed integers. + static inline Data256 fromI8( + int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , + int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , + int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, + int8_t x12, int8_t x13, int8_t x14, int8_t x15, + int8_t x16, int8_t x17, int8_t x18, int8_t x19, + int8_t x20, int8_t x21, int8_t x22, int8_t x23, + int8_t x24, int8_t x25, int8_t x26, int8_t x27, + int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept { + + Data256 self; + self.setI8( + x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15, + x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31); + return self; + } + + //! Sets all thirty two 8-bit unsigned integers. + static inline Data256 fromU8( + uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , + uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , + uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, + uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15, + uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19, + uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23, + uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27, + uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept { + + Data256 self; + self.setU8( + x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15, + x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31); + return self; + } + + //! Sets all sixteen 16-bit signed integers. + static inline Data256 fromI16(int16_t x0) noexcept { + Data256 self; + self.setI16(x0); + return self; + } + + //! Sets all sixteen 16-bit unsigned integers. + static inline Data256 fromU16(uint16_t x0) noexcept { + Data256 self; + self.setU16(x0); + return self; + } + + //! Sets all sixteen 16-bit signed integers. + static inline Data256 fromI16( + int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7 , + int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept { + + Data256 self; + self.setI16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + return self; + } + + //! Sets all sixteen 16-bit unsigned integers. + static inline Data256 fromU16( + uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7 , + uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept { + + Data256 self; + self.setU16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + return self; + } + + //! Sets all eight 32-bit signed integers. + static inline Data256 fromI32(int32_t x0) noexcept { + Data256 self; + self.setI32(x0); + return self; + } + + //! Sets all eight 32-bit unsigned integers. + static inline Data256 fromU32(uint32_t x0) noexcept { + Data256 self; + self.setU32(x0); + return self; + } + + //! Sets all eight 32-bit signed integers. + static inline Data256 fromI32( + int32_t x0, int32_t x1, int32_t x2, int32_t x3, + int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept { + + Data256 self; + self.setI32(x0, x1, x2, x3, x4, x5, x6, x7); + return self; + } + + //! Sets all eight 32-bit unsigned integers. + static inline Data256 fromU32( + uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept { + + Data256 self; + self.setU32(x0, x1, x2, x3, x4, x5, x6, x7); + return self; + } + + //! Sets all four 64-bit signed integers. + static inline Data256 fromI64(int64_t x0) noexcept { + Data256 self; + self.setI64(x0); + return self; + } + + //! Sets all four 64-bit unsigned integers. + static inline Data256 fromU64(uint64_t x0) noexcept { + Data256 self; + self.setU64(x0); + return self; + } + + //! Sets all four 64-bit signed integers. + static inline Data256 fromI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept { + Data256 self; + self.setI64(x0, x1, x2, x3); + return self; + } + + //! Sets all four 64-bit unsigned integers. + static inline Data256 fromU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept { + Data256 self; + self.setU64(x0, x1, x2, x3); + return self; + } + + //! Sets all eight SP-FP floats. + static inline Data256 fromF32(float x0) noexcept { + Data256 self; + self.setF32(x0); + return self; + } + + //! Sets all eight SP-FP floats. + static inline Data256 fromF32( + float x0, float x1, float x2, float x3, + float x4, float x5, float x6, float x7) noexcept { + + Data256 self; + self.setF32(x0, x1, x2, x3, x4, x5, x6, x7); + return self; + } + + //! Sets all four DP-FP floats. + static inline Data256 fromF64(double x0) noexcept { + Data256 self; + self.setF64(x0); + return self; + } + + //! Sets all four DP-FP floats. + static inline Data256 fromF64(double x0, double x1, double x2, double x3) noexcept { + Data256 self; + self.setF64(x0, x1, x2, x3); + return self; + } + + //! \} + + //! \name Accessors + //! \{ + + //! Sets all thirty two 8-bit signed integers. + inline void setI8(int8_t x0) noexcept { + setU8(uint8_t(x0)); + } + + //! Sets all thirty two 8-bit unsigned integers. + inline void setU8(uint8_t x0) noexcept { + if (ASMJIT_ARCH_BITS >= 64) { + uint64_t xq = uint64_t(x0) * 0x0101010101010101u; + uq[0] = xq; + uq[1] = xq; + uq[2] = xq; + uq[3] = xq; + } + else { + uint32_t xd = uint32_t(x0) * 0x01010101u; + ud[0] = xd; + ud[1] = xd; + ud[2] = xd; + ud[3] = xd; + ud[4] = xd; + ud[5] = xd; + ud[6] = xd; + ud[7] = xd; + } + } + + //! Sets all thirty two 8-bit signed integers. + inline void setI8( + int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , + int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , + int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, + int8_t x12, int8_t x13, int8_t x14, int8_t x15, + int8_t x16, int8_t x17, int8_t x18, int8_t x19, + int8_t x20, int8_t x21, int8_t x22, int8_t x23, + int8_t x24, int8_t x25, int8_t x26, int8_t x27, + int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept { + + sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ; + sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ; + sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11; + sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15; + sb[16] = x16; sb[17] = x17; sb[18] = x18; sb[19] = x19; + sb[20] = x20; sb[21] = x21; sb[22] = x22; sb[23] = x23; + sb[24] = x24; sb[25] = x25; sb[26] = x26; sb[27] = x27; + sb[28] = x28; sb[29] = x29; sb[30] = x30; sb[31] = x31; + } + + //! Sets all thirty two 8-bit unsigned integers. + inline void setU8( + uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , + uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , + uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, + uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15, + uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19, + uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23, + uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27, + uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept { + + ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ; + ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ; + ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11; + ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15; + ub[16] = x16; ub[17] = x17; ub[18] = x18; ub[19] = x19; + ub[20] = x20; ub[21] = x21; ub[22] = x22; ub[23] = x23; + ub[24] = x24; ub[25] = x25; ub[26] = x26; ub[27] = x27; + ub[28] = x28; ub[29] = x29; ub[30] = x30; ub[31] = x31; + } + + //! Sets all sixteen 16-bit signed integers. + inline void setI16(int16_t x0) noexcept { + setU16(uint16_t(x0)); + } + + //! Sets all eight 16-bit unsigned integers. + inline void setU16(uint16_t x0) noexcept { + if (ASMJIT_ARCH_BITS >= 64) { + uint64_t xq = uint64_t(x0) * 0x0001000100010001u; + uq[0] = xq; + uq[1] = xq; + uq[2] = xq; + uq[3] = xq; + } + else { + uint32_t xd = uint32_t(x0) * 0x00010001u; + ud[0] = xd; + ud[1] = xd; + ud[2] = xd; + ud[3] = xd; + ud[4] = xd; + ud[5] = xd; + ud[6] = xd; + ud[7] = xd; + } + } + + //! Sets all sixteen 16-bit signed integers. + inline void setI16( + int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7, + int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept { + + sw[0 ] = x0 ; sw[1 ] = x1 ; sw[2 ] = x2 ; sw[3 ] = x3 ; + sw[4 ] = x4 ; sw[5 ] = x5 ; sw[6 ] = x6 ; sw[7 ] = x7 ; + sw[8 ] = x8 ; sw[9 ] = x9 ; sw[10] = x10; sw[11] = x11; + sw[12] = x12; sw[13] = x13; sw[14] = x14; sw[15] = x15; + } + + //! Sets all sixteen 16-bit unsigned integers. + inline void setU16( + uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7, + uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept { + + uw[0 ] = x0 ; uw[1 ] = x1 ; uw[2 ] = x2 ; uw[3 ] = x3 ; + uw[4 ] = x4 ; uw[5 ] = x5 ; uw[6 ] = x6 ; uw[7 ] = x7 ; + uw[8 ] = x8 ; uw[9 ] = x9 ; uw[10] = x10; uw[11] = x11; + uw[12] = x12; uw[13] = x13; uw[14] = x14; uw[15] = x15; + } + + //! Sets all eight 32-bit signed integers. + inline void setI32(int32_t x0) noexcept { + setU32(uint32_t(x0)); + } + + //! Sets all eight 32-bit unsigned integers. + inline void setU32(uint32_t x0) noexcept { + if (ASMJIT_ARCH_BITS >= 64) { + uint64_t xq = (uint64_t(x0) << 32) + x0; + uq[0] = xq; + uq[1] = xq; + uq[2] = xq; + uq[3] = xq; + } + else { + ud[0] = x0; + ud[1] = x0; + ud[2] = x0; + ud[3] = x0; + ud[4] = x0; + ud[5] = x0; + ud[6] = x0; + ud[7] = x0; + } + } + + //! Sets all eight 32-bit signed integers. + inline void setI32( + int32_t x0, int32_t x1, int32_t x2, int32_t x3, + int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept { + + sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3; + sd[4] = x4; sd[5] = x5; sd[6] = x6; sd[7] = x7; + } + + //! Sets all eight 32-bit unsigned integers. + inline void setU32( + uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept { + + ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3; + ud[4] = x4; ud[5] = x5; ud[6] = x6; ud[7] = x7; + } + + //! Sets all four 64-bit signed integers. + inline void setI64(int64_t x0) noexcept { + sq[0] = x0; sq[1] = x0; sq[2] = x0; sq[3] = x0; + } + + //! Sets all four 64-bit unsigned integers. + inline void setU64(uint64_t x0) noexcept { + uq[0] = x0; uq[1] = x0; uq[2] = x0; uq[3] = x0; + } + + //! Sets all four 64-bit signed integers. + inline void setI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept { + sq[0] = x0; sq[1] = x1; sq[2] = x2; sq[3] = x3; + } + + //! Sets all four 64-bit unsigned integers. + inline void setU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept { + uq[0] = x0; uq[1] = x1; uq[2] = x2; uq[3] = x3; + } + + //! Sets all eight SP-FP floats. + inline void setF32(float x0) noexcept { + sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0; + sf[4] = x0; sf[5] = x0; sf[6] = x0; sf[7] = x0; + } + + //! Sets all eight SP-FP floats. + inline void setF32( + float x0, float x1, float x2, float x3, + float x4, float x5, float x6, float x7) noexcept { + + sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3; + sf[4] = x4; sf[5] = x5; sf[6] = x6; sf[7] = x7; + } + + //! Sets all four DP-FP floats. + inline void setF64(double x0) noexcept { + df[0] = x0; df[1] = x0; df[2] = x0; df[3] = x0; + } + + //! Sets all four DP-FP floats. + inline void setF64(double x0, double x1, double x2, double x3) noexcept { + df[0] = x0; df[1] = x1; df[2] = x2; df[3] = x3; + } + + //! \} }; //! \} -} // asmjit namespace +ASMJIT_END_NAMESPACE -// [Api-End] -#include "../asmjit_apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_SIMDTYPES_H +#endif // _ASMJIT_CORE_DATATYPES_H diff --git a/src/asmjit/core/emitter.cpp b/src/asmjit/core/emitter.cpp new file mode 100644 index 0000000..f319020 --- /dev/null +++ b/src/asmjit/core/emitter.cpp @@ -0,0 +1,257 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/logging.h" +#include "../core/support.h" + +#ifdef ASMJIT_BUILD_X86 + #include "../x86/x86internal_p.h" + #include "../x86/x86instdb_p.h" +#endif // ASMJIT_BUILD_X86 + +#ifdef ASMJIT_BUILD_ARM + #include "../arm/arminternal_p.h" + #include "../arm/arminstdb.h" +#endif // ASMJIT_BUILD_ARM + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::BaseEmitter - Construction / Destruction] +// ============================================================================ + +BaseEmitter::BaseEmitter(uint32_t type) noexcept + : _type(uint8_t(type)), + _reserved(0), + _flags(0), + _emitterOptions(0), + _code(nullptr), + _errorHandler(nullptr), + _codeInfo(), + _gpRegInfo(), + _privateData(0), + _instOptions(0), + _globalInstOptions(BaseInst::kOptionReserved), + _extraReg(), + _inlineComment(nullptr) {} + +BaseEmitter::~BaseEmitter() noexcept { + if (_code) { + _addFlags(kFlagDestroyed); + _code->detach(this); + } +} + +// ============================================================================ +// [asmjit::BaseEmitter - Code-Generation] +// ============================================================================ + +Error BaseEmitter::_emitOpArray(uint32_t instId, const Operand_* operands, size_t count) { + const Operand_* op = operands; + const Operand& none_ = Globals::none; + + switch (count) { + case 0: return _emit(instId, none_, none_, none_, none_); + case 1: return _emit(instId, op[0], none_, none_, none_); + case 2: return _emit(instId, op[0], op[1], none_, none_); + case 3: return _emit(instId, op[0], op[1], op[2], none_); + case 4: return _emit(instId, op[0], op[1], op[2], op[3]); + case 5: return _emit(instId, op[0], op[1], op[2], op[3], op[4], none_); + case 6: return _emit(instId, op[0], op[1], op[2], op[3], op[4], op[5]); + default: return DebugUtils::errored(kErrorInvalidArgument); + } +} + +// ============================================================================ +// [asmjit::BaseEmitter - Finalize] +// ============================================================================ + +Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept { + return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : uint32_t(Globals::kInvalidId)); +} + +// ============================================================================ +// [asmjit::BaseEmitter - Finalize] +// ============================================================================ + +Error BaseEmitter::finalize() { + // Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`. + return kErrorOk; +} + +// ============================================================================ +// [asmjit::BaseEmitter - Error Handling] +// ============================================================================ + +Error BaseEmitter::reportError(Error err, const char* message) { + ErrorHandler* handler = errorHandler(); + if (!handler) { + if (code()) + handler = code()->errorHandler(); + } + + if (handler) { + if (!message) + message = DebugUtils::errorAsString(err); + handler->handleError(err, message, this); + } + + return err; +} + +// ============================================================================ +// [asmjit::BaseEmitter - Label Management] +// ============================================================================ + +bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept { + return _code && labelId < _code->labelCount(); +} + +// ============================================================================ +// [asmjit::BaseEmitter - Emit (High-Level)] +// ============================================================================ + +ASMJIT_FAVOR_SIZE Error BaseEmitter::emitProlog(const FuncFrame& frame) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + #ifdef ASMJIT_BUILD_X86 + if (archInfo().isX86Family()) + return x86::X86Internal::emitProlog(as(), frame); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (archInfo().isArmFamily()) + return arm::ArmInternal::emitProlog(as(), frame); + #endif + + return DebugUtils::errored(kErrorInvalidArch); +} + +ASMJIT_FAVOR_SIZE Error BaseEmitter::emitEpilog(const FuncFrame& frame) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + #ifdef ASMJIT_BUILD_X86 + if (archInfo().isX86Family()) + return x86::X86Internal::emitEpilog(as(), frame); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (archInfo().isArmFamily()) + return arm::ArmInternal::emitEpilog(as(), frame); + #endif + + return DebugUtils::errored(kErrorInvalidArch); +} + +ASMJIT_FAVOR_SIZE Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + #ifdef ASMJIT_BUILD_X86 + if (archInfo().isX86Family()) + return x86::X86Internal::emitArgsAssignment(as(), frame, args); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (archInfo().isArmFamily()) + return arm::ArmInternal::emitArgsAssignment(as(), frame, args); + #endif + + return DebugUtils::errored(kErrorInvalidArch); +} + +// ============================================================================ +// [asmjit::BaseEmitter - Comment] +// ============================================================================ + +Error BaseEmitter::commentf(const char* fmt, ...) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + #ifndef ASMJIT_NO_LOGGING + StringTmp<1024> sb; + + va_list ap; + va_start(ap, fmt); + Error err = sb.appendVFormat(fmt, ap); + va_end(ap); + + if (ASMJIT_UNLIKELY(err)) + return err; + + return comment(sb.data(), sb.size()); + #else + ASMJIT_UNUSED(fmt); + return kErrorOk; + #endif +} + +Error BaseEmitter::commentv(const char* fmt, va_list ap) { + if (ASMJIT_UNLIKELY(!_code)) + return DebugUtils::errored(kErrorNotInitialized); + + #ifndef ASMJIT_NO_LOGGING + StringTmp<1024> sb; + + Error err = sb.appendVFormat(fmt, ap); + if (ASMJIT_UNLIKELY(err)) + return err; + + return comment(sb.data(), sb.size()); + #else + ASMJIT_UNUSED(fmt); + ASMJIT_UNUSED(ap); + return kErrorOk; + #endif +} + +// ============================================================================ +// [asmjit::BaseEmitter - Events] +// ============================================================================ + +Error BaseEmitter::onAttach(CodeHolder* code) noexcept { + _code = code; + _codeInfo = code->codeInfo(); + _emitterOptions = code->emitterOptions(); + + onUpdateGlobalInstOptions(); + return kErrorOk; +} + +Error BaseEmitter::onDetach(CodeHolder* code) noexcept { + ASMJIT_UNUSED(code); + + _flags = 0; + _emitterOptions = 0; + _errorHandler = nullptr; + + _codeInfo.reset(); + _gpRegInfo.reset(); + _privateData = 0; + + _instOptions = 0; + _globalInstOptions = BaseInst::kOptionReserved; + _extraReg.reset(); + _inlineComment = nullptr; + + return kErrorOk; +} + +void BaseEmitter::onUpdateGlobalInstOptions() noexcept { + constexpr uint32_t kCriticalEmitterOptions = + kOptionLoggingEnabled | + kOptionStrictValidation ; + + _globalInstOptions &= ~BaseInst::kOptionReserved; + if ((_emitterOptions & kCriticalEmitterOptions) != 0) + _globalInstOptions |= BaseInst::kOptionReserved; +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/emitter.h b/src/asmjit/core/emitter.h new file mode 100644 index 0000000..a139634 --- /dev/null +++ b/src/asmjit/core/emitter.h @@ -0,0 +1,532 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_EMITTER_H +#define _ASMJIT_CORE_EMITTER_H + +#include "../core/arch.h" +#include "../core/inst.h" +#include "../core/operand.h" +#include "../core/codeholder.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [Forward Declarations] +// ============================================================================ + +class ConstPool; +class FuncFrame; +class FuncArgsAssignment; + +// ============================================================================ +// [asmjit::BaseEmitter] +// ============================================================================ + +//! Provides a base foundation to emit code - specialized by `Assembler` and +//! `BaseBuilder`. +class ASMJIT_VIRTAPI BaseEmitter { +public: + ASMJIT_BASE_CLASS(BaseEmitter) + + //! See `EmitterType`. + uint8_t _type; + //! Reserved for future use. + uint8_t _reserved; + //! See \ref BaseEmitter::Flags. + uint16_t _flags; + //! Emitter options, always in sync with CodeHolder. + uint32_t _emitterOptions; + + //! CodeHolder the BaseEmitter is attached to. + CodeHolder* _code; + //! Attached `ErrorHandler`. + ErrorHandler* _errorHandler; + + //! Basic information about the code (matches CodeHolder::_codeInfo). + CodeInfo _codeInfo; + //! Native GP register signature and signature related information. + RegInfo _gpRegInfo; + //! Internal private data used freely by any emitter. + uint32_t _privateData; + + //! Next instruction options (affects the next instruction). + uint32_t _instOptions; + //! Global Instruction options (combined with `_instOptions` by `emit...()`). + uint32_t _globalInstOptions; + //! Extra register (op-mask {k} on AVX-512) (affects the next instruction). + RegOnly _extraReg; + //! Inline comment of the next instruction (affects the next instruction). + const char* _inlineComment; + + //! Emitter type. + enum EmitterType : uint32_t { + kTypeNone = 0, + kTypeAssembler = 1, + kTypeBuilder = 2, + kTypeCompiler = 3, + kTypeCount = 4 + }; + + //! Emitter flags. + enum Flags : uint32_t { + //! The emitter was finalized. + kFlagFinalized = 0x4000u, + //! The emitter was destroyed. + kFlagDestroyed = 0x8000u + }; + + //! Emitter options. + enum Options : uint32_t { + //! Logging is enabled, `BaseEmitter::logger()` must return a valid logger. + //! This option is set automatically by the emitter if the logger is present. + //! User code should never alter this value. + //! + //! Default `false`. + kOptionLoggingEnabled = 0x00000001u, + + //! Stricly validate each instruction before it's emitted. + //! + //! Default `false`. + kOptionStrictValidation = 0x00000002u, + + //! Emit instructions that are optimized for size, if possible. + //! + //! Default `false`. + //! + //! X86 Specific + //! ------------ + //! + //! When this option is set it the assembler will try to fix instructions + //! if possible into operation equivalent instructions that take less bytes + //! by taking advantage of implicit zero extension. For example instruction + //! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm` + //! and `and r32, imm` when the immediate constant is lesser than `2^31`. + kOptionOptimizedForSize = 0x00000004u, + + //! Emit optimized code-alignment sequences. + //! + //! Default `false`. + //! + //! X86 Specific + //! ------------ + //! + //! Default align sequence used by X86 architecture is one-byte (0x90) + //! opcode that is often shown by disassemblers as NOP. However there are + //! more optimized align sequences for 2-11 bytes that may execute faster + //! on certain CPUs. If this feature is enabled AsmJit will generate + //! specialized sequences for alignment between 2 to 11 bytes. + kOptionOptimizedAlign = 0x00000008u, + + //! Emit jump-prediction hints. + //! + //! Default `false`. + //! + //! X86 Specific + //! ------------ + //! + //! Jump prediction is usually based on the direction of the jump. If the + //! jump is backward it is usually predicted as taken; and if the jump is + //! forward it is usually predicted as not-taken. The reason is that loops + //! generally use backward jumps and conditions usually use forward jumps. + //! However this behavior can be overridden by using instruction prefixes. + //! If this option is enabled these hints will be emitted. + //! + //! This feature is disabled by default, because the only processor that + //! used to take into consideration prediction hints was P4. Newer processors + //! implement heuristics for branch prediction and ignore static hints. This + //! means that this feature can be used for annotation purposes. + kOptionPredictedJumps = 0x00000010u + }; + + //! \name Construction & Destruction + //! \{ + + ASMJIT_API explicit BaseEmitter(uint32_t type) noexcept; + ASMJIT_API virtual ~BaseEmitter() noexcept; + + //! \} + + //! \name Cast + //! \{ + + template + inline T* as() noexcept { return reinterpret_cast(this); } + + template + inline const T* as() const noexcept { return reinterpret_cast(this); } + + //! \} + + //! \name Emitter Type & Flags + //! \{ + + //! Returns the type of this emitter, see `EmitterType`. + inline uint32_t emitterType() const noexcept { return _type; } + //! Returns emitter flags , see `Flags`. + inline uint32_t emitterFlags() const noexcept { return _flags; } + + //! Tests whether the emitter inherits from `BaseAssembler`. + inline bool isAssembler() const noexcept { return _type == kTypeAssembler; } + //! Tests whether the emitter inherits from `BaseBuilder`. + //! + //! \note Both Builder and Compiler emitters would return `true`. + inline bool isBuilder() const noexcept { return _type >= kTypeBuilder; } + //! Tests whether the emitter inherits from `BaseCompiler`. + inline bool isCompiler() const noexcept { return _type == kTypeCompiler; } + + //! Tests whether the emitter has the given `flag` enabled. + inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; } + //! Tests whether the emitter is finalized. + inline bool isFinalized() const noexcept { return hasFlag(kFlagFinalized); } + //! Tests whether the emitter is destroyed (only used during destruction). + inline bool isDestroyed() const noexcept { return hasFlag(kFlagDestroyed); } + + inline void _addFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags | flags); } + inline void _clearFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags & ~flags); } + + //! \} + + //! \name Target Information + //! \{ + + //! Returns the CodeHolder this emitter is attached to. + inline CodeHolder* code() const noexcept { return _code; } + //! Returns an information about the code, see `CodeInfo`. + inline const CodeInfo& codeInfo() const noexcept { return _codeInfo; } + //! Returns an information about the architecture, see `ArchInfo`. + inline const ArchInfo& archInfo() const noexcept { return _codeInfo.archInfo(); } + + //! Tests whether the target architecture is 32-bit. + inline bool is32Bit() const noexcept { return archInfo().is32Bit(); } + //! Tests whether the target architecture is 64-bit. + inline bool is64Bit() const noexcept { return archInfo().is64Bit(); } + + //! Returns the target architecture type. + inline uint32_t archId() const noexcept { return archInfo().archId(); } + //! Returns the target architecture sub-type. + inline uint32_t archSubId() const noexcept { return archInfo().archSubId(); } + //! Returns the target architecture's GP register size (4 or 8 bytes). + inline uint32_t gpSize() const noexcept { return archInfo().gpSize(); } + //! Returns the number of target GP registers. + inline uint32_t gpCount() const noexcept { return archInfo().gpCount(); } + + //! \} + + //! \name Initialization & Finalization + //! \{ + + //! Tests whether the BaseEmitter is initialized (i.e. attached to the `CodeHolder`). + inline bool isInitialized() const noexcept { return _code != nullptr; } + + ASMJIT_API virtual Error finalize(); + + //! \} + + //! \name Emitter Options + //! \{ + + //! Tests whether the `option` is present in emitter options. + inline bool hasEmitterOption(uint32_t option) const noexcept { return (_emitterOptions & option) != 0; } + //! Returns the emitter options. + inline uint32_t emitterOptions() const noexcept { return _emitterOptions; } + + // TODO: Deprecate and remove, CodeHolder::addEmitterOptions() is the way. + inline void addEmitterOptions(uint32_t options) noexcept { + _emitterOptions |= options; + onUpdateGlobalInstOptions(); + } + + inline void clearEmitterOptions(uint32_t options) noexcept { + _emitterOptions &= ~options; + onUpdateGlobalInstOptions(); + } + + //! Returns the global instruction options. + //! + //! Default instruction options are merged with instruction options before the + //! instruction is encoded. These options have some bits reserved that are used + //! for error handling, logging, and strict validation. Other options are globals that + //! affect each instruction, for example if VEX3 is set globally, it will all + //! instructions, even those that don't have such option set. + inline uint32_t globalInstOptions() const noexcept { return _globalInstOptions; } + + //! \} + + //! \name Error Handling + //! \{ + + //! Tests whether the local error handler is attached. + inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; } + //! Returns the local error handler. + inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; } + //! Sets the local error handler. + inline void setErrorHandler(ErrorHandler* handler) noexcept { _errorHandler = handler; } + //! Resets the local error handler (does nothing if not attached). + inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); } + + //! Handles the given error in the following way: + //! 1. Gets either Emitter's (preferred) or CodeHolder's ErrorHandler. + //! 2. If exists, calls `ErrorHandler::handleError(error, message, this)`. + //! 3. Returns the given `err` if ErrorHandler haven't thrown. + ASMJIT_API Error reportError(Error err, const char* message = nullptr); + + //! \} + + //! \name Instruction Options + //! \{ + + //! Returns options of the next instruction. + inline uint32_t instOptions() const noexcept { return _instOptions; } + //! Returns options of the next instruction. + inline void setInstOptions(uint32_t options) noexcept { _instOptions = options; } + //! Adds options of the next instruction. + inline void addInstOptions(uint32_t options) noexcept { _instOptions |= options; } + //! Resets options of the next instruction. + inline void resetInstOptions() noexcept { _instOptions = 0; } + + //! Tests whether the extra register operand is valid. + inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); } + //! Returns an extra operand that will be used by the next instruction (architecture specific). + inline const RegOnly& extraReg() const noexcept { return _extraReg; } + //! Sets an extra operand that will be used by the next instruction (architecture specific). + inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); } + //! Sets an extra operand that will be used by the next instruction (architecture specific). + inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); } + //! Resets an extra operand that will be used by the next instruction (architecture specific). + inline void resetExtraReg() noexcept { _extraReg.reset(); } + + //! Returns comment/annotation of the next instruction. + inline const char* inlineComment() const noexcept { return _inlineComment; } + //! Sets comment/annotation of the next instruction. + //! + //! \note This string is set back to null by `_emit()`, but until that it has + //! to remain valid as the Emitter is not required to make a copy of it (and + //! it would be slow to do that for each instruction). + inline void setInlineComment(const char* s) noexcept { _inlineComment = s; } + //! Resets the comment/annotation to nullptr. + inline void resetInlineComment() noexcept { _inlineComment = nullptr; } + + //! \} + + //! \name Sections + //! \{ + + virtual Error section(Section* section) = 0; + + //! \} + + //! \name Labels + //! \{ + + //! Creates a new label. + virtual Label newLabel() = 0; + //! Creates a new named label. + virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) = 0; + + //! Returns `Label` by `name`. + //! + //! Returns invalid Label in case that the name is invalid or label was not found. + //! + //! \note This function doesn't trigger ErrorHandler in case the name is invalid + //! or no such label exist. You must always check the validity of the `Label` returned. + ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept; + + //! Binds the `label` to the current position of the current section. + //! + //! \note Attempt to bind the same label multiple times will return an error. + virtual Error bind(const Label& label) = 0; + + //! Tests whether the label `id` is valid (i.e. registered). + ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept; + //! Tests whether the `label` is valid (i.e. registered). + inline bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); } + + //! \} + + //! \name Emit + //! \{ + + // NOTE: These `emit()` helpers are designed to address a code-bloat generated + // by C++ compilers to call a function having many arguments. Each parameter to + // `_emit()` requires some code to pass it, which means that if we default to 4 + // operand parameters in `_emit()` and instId the C++ compiler would have to + // generate a virtual function call having 5 parameters, which is quite a lot. + // Since by default asm instructions have 2 to 3 operands it's better to + // introduce helpers that pass those and fill out the remaining operands. + + #define OP const Operand_& + #define NONE Globals::none + + //! Emits an instruction. + ASMJIT_NOINLINE Error emit(uint32_t instId) { return _emit(instId, NONE, NONE, NONE, NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0) { return _emit(instId, o0, NONE, NONE, NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1) { return _emit(instId, o0, o1, NONE, NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2) { return _emit(instId, o0, o1, o2, NONE); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3) { return _emit(instId, o0, o1, o2, o3); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4) { return _emit(instId, o0, o1, o2, o3, o4, NONE); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, OP o5) { return _emit(instId, o0, o1, o2, o3, o4, o5); } + + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, int o0) { return _emit(instId, Imm(o0), NONE, NONE, NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, int o1) { return _emit(instId, o0, Imm(o1), NONE, NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, int o2) { return _emit(instId, o0, o1, Imm(o2), NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, int o3) { return _emit(instId, o0, o1, o2, Imm(o3)); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); } + + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, int64_t o0) { return _emit(instId, Imm(o0), NONE, NONE, NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, int64_t o1) { return _emit(instId, o0, Imm(o1), NONE, NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, int64_t o2) { return _emit(instId, o0, o1, Imm(o2), NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, int64_t o3) { return _emit(instId, o0, o1, o2, Imm(o3)); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int64_t o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), NONE); } + //! \overload + ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int64_t o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); } + + //! \overload + inline Error emit(uint32_t instId, unsigned int o0) { return emit(instId, int64_t(o0)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, unsigned int o1) { return emit(instId, o0, int64_t(o1)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, unsigned int o2) { return emit(instId, o0, o1, int64_t(o2)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, unsigned int o3) { return emit(instId, o0, o1, o2, int64_t(o3)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, unsigned int o4) { return emit(instId, o0, o1, o2, o3, int64_t(o4)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, unsigned int o5) { return emit(instId, o0, o1, o2, o3, o4, int64_t(o5)); } + + //! \overload + inline Error emit(uint32_t instId, uint64_t o0) { return emit(instId, int64_t(o0)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, uint64_t o1) { return emit(instId, o0, int64_t(o1)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, uint64_t o2) { return emit(instId, o0, o1, int64_t(o2)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, uint64_t o3) { return emit(instId, o0, o1, o2, int64_t(o3)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, uint64_t o4) { return emit(instId, o0, o1, o2, o3, int64_t(o4)); } + //! \overload + inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, uint64_t o5) { return emit(instId, o0, o1, o2, o3, o4, int64_t(o5)); } + + #undef NONE + #undef OP + + inline Error emitOpArray(uint32_t instId, const Operand_* operands, size_t count) { return _emitOpArray(instId, operands, count); } + + inline Error emitInst(const BaseInst& inst, const Operand_* operands, size_t count) { + setInstOptions(inst.options()); + setExtraReg(inst.extraReg()); + return _emitOpArray(inst.id(), operands, count); + } + + //! \cond INTERNAL + //! Emits instruction having max 4 operands. + virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) = 0; + //! Emits instruction having max 6 operands. + virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) = 0; + //! Emits instruction having operands stored in array. + virtual Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t count); + //! \endcond + + //! \} + + //! \name Emit Utilities + //! \{ + + ASMJIT_API Error emitProlog(const FuncFrame& layout); + ASMJIT_API Error emitEpilog(const FuncFrame& layout); + ASMJIT_API Error emitArgsAssignment(const FuncFrame& layout, const FuncArgsAssignment& args); + + //! \} + + //! \name Align + //! \{ + + //! Aligns the current CodeBuffer to the `alignment` specified. + //! + //! The sequence that is used to fill the gap between the aligned location + //! and the current location depends on the align `mode`, see `AlignMode`. + virtual Error align(uint32_t alignMode, uint32_t alignment) = 0; + + //! \} + + //! \name Embed + //! \{ + + //! Embeds raw data into the CodeBuffer. + virtual Error embed(const void* data, uint32_t dataSize) = 0; + + //! Embeds an absolute label address as data (4 or 8 bytes). + virtual Error embedLabel(const Label& label) = 0; + + //! Embeds a delta (distance) between the `label` and `base` calculating it + //! as `label - base`. This function was designed to make it easier to embed + //! lookup tables where each index is a relative distance of two labels. + virtual Error embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) = 0; + + //! Embeds a constant pool at the current offset by performing the following: + //! 1. Aligns by using kAlignData to the minimum `pool` alignment. + //! 2. Binds the ConstPool label so it's bound to an aligned location. + //! 3. Emits ConstPool content. + virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0; + + //! \} + + //! \name Comment + //! \{ + + //! Emits a comment stored in `data` with an optional `size` parameter. + virtual Error comment(const char* data, size_t size = SIZE_MAX) = 0; + + //! Emits a formatted comment specified by `fmt` and variable number of arguments. + ASMJIT_API Error commentf(const char* fmt, ...); + //! Emits a formatted comment specified by `fmt` and `ap`. + ASMJIT_API Error commentv(const char* fmt, va_list ap); + + //! \} + + //! \name Events + //! \{ + + //! Called after the emitter was attached to `CodeHolder`. + virtual Error onAttach(CodeHolder* code) noexcept = 0; + //! Called after the emitter was detached from `CodeHolder`. + virtual Error onDetach(CodeHolder* code) noexcept = 0; + + //! Called to update `_globalInstOptions` based on `_emitterOptions`. + //! + //! This function should only touch one bit `BaseInst::kOptionReserved`, which + //! is used to handle errors and special-cases in a way that minimizes branching. + ASMJIT_API void onUpdateGlobalInstOptions() noexcept; + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_EMITTER_H diff --git a/src/asmjit/core/features.h b/src/asmjit/core/features.h new file mode 100644 index 0000000..f22b71a --- /dev/null +++ b/src/asmjit/core/features.h @@ -0,0 +1,145 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_FEATURES_H +#define _ASMJIT_CORE_FEATURES_H + +#include "../core/globals.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [asmjit::BaseFeatures] +// ============================================================================ + +class BaseFeatures { +public: + typedef Support::BitWord BitWord; + + enum : uint32_t { + kMaxFeatures = 128, + kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits + }; + + BitWord _bits[kNumBitWords]; + + //! \name Construction & Destruction + //! \{ + + inline BaseFeatures() noexcept { reset(); } + inline BaseFeatures(const BaseFeatures& other) noexcept = default; + inline explicit BaseFeatures(Globals::NoInit_) noexcept {} + + inline void reset() noexcept { + for (size_t i = 0; i < kNumBitWords; i++) + _bits[i] = 0; + } + + //! \} + + //! \name Overloaded Operators + //! \{ + + inline BaseFeatures& operator=(const BaseFeatures& other) noexcept = default; + + inline bool operator==(const BaseFeatures& other) noexcept { return eq(other); } + inline bool operator!=(const BaseFeatures& other) noexcept { return !eq(other); } + + //! \} + + //! \name Cast + //! \{ + + template + inline T& as() noexcept { return static_cast(*this); } + + template + inline const T& as() const noexcept { return static_cast(*this); } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns all features as `BitWord` array. + inline BitWord* bits() noexcept { return _bits; } + //! Returns all features as `BitWord` array (const). + inline const BitWord* bits() const noexcept { return _bits; } + + //! Tests whether the feature `featureId` is present. + inline bool has(uint32_t featureId) const noexcept { + ASMJIT_ASSERT(featureId < kMaxFeatures); + + uint32_t idx = featureId / Support::kBitWordSizeInBits; + uint32_t bit = featureId % Support::kBitWordSizeInBits; + + return bool((_bits[idx] >> bit) & 0x1); + } + + //! Tests whether all features as defined by `other` are present. + inline bool hasAll(const BaseFeatures& other) const noexcept { + for (uint32_t i = 0; i < kNumBitWords; i++) + if ((_bits[i] & other._bits[i]) != other._bits[i]) + return false; + return true; + } + + //! \} + + //! \name Utilities + //! \{ + + //! Adds the given CPU `featureId` to the list of features. + inline void add(uint32_t featureId) noexcept { + ASMJIT_ASSERT(featureId < kMaxFeatures); + + uint32_t idx = featureId / Support::kBitWordSizeInBits; + uint32_t bit = featureId % Support::kBitWordSizeInBits; + + _bits[idx] |= BitWord(1) << bit; + } + + template + inline void add(uint32_t featureId, ArgsT... otherIds) noexcept { + add(featureId); + add(otherIds...); + } + + //! Removes the given CPU `featureId` from the list of features. + inline void remove(uint32_t featureId) noexcept { + ASMJIT_ASSERT(featureId < kMaxFeatures); + + uint32_t idx = featureId / Support::kBitWordSizeInBits; + uint32_t bit = featureId % Support::kBitWordSizeInBits; + + _bits[idx] &= ~(BitWord(1) << bit); + } + + template + inline void remove(uint32_t featureId, ArgsT... otherIds) noexcept { + remove(featureId); + remove(otherIds...); + } + + inline bool eq(const BaseFeatures& other) const noexcept { + for (size_t i = 0; i < kNumBitWords; i++) + if (_bits[i] != other._bits[i]) + return false; + return true; + } + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_FEATURES_H diff --git a/src/asmjit/core/func.cpp b/src/asmjit/core/func.cpp new file mode 100644 index 0000000..248b291 --- /dev/null +++ b/src/asmjit/core/func.cpp @@ -0,0 +1,128 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/arch.h" +#include "../core/func.h" +#include "../core/type.h" + +#ifdef ASMJIT_BUILD_X86 + #include "../x86/x86internal_p.h" + #include "../x86/x86operand.h" +#endif + +#ifdef ASMJIT_BUILD_ARM + #include "../arm/arminternal_p.h" + #include "../arm/armoperand.h" +#endif + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::FuncDetail - Init / Reset] +// ============================================================================ + +ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& sign) { + uint32_t ccId = sign.callConv(); + CallConv& cc = _callConv; + + uint32_t argCount = sign.argCount(); + if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs)) + return DebugUtils::errored(kErrorInvalidArgument); + + ASMJIT_PROPAGATE(cc.init(ccId)); + + uint32_t gpSize = (cc.archId() == ArchInfo::kIdX86) ? 4 : 8; + uint32_t deabstractDelta = Type::deabstractDeltaOfSize(gpSize); + + const uint8_t* args = sign.args(); + for (uint32_t i = 0; i < argCount; i++) { + FuncValue& arg = _args[i]; + arg.initTypeId(Type::deabstract(args[i], deabstractDelta)); + } + _argCount = uint8_t(argCount); + _vaIndex = uint8_t(sign.vaIndex()); + + uint32_t ret = sign.ret(); + if (ret != Type::kIdVoid) { + _rets[0].initTypeId(Type::deabstract(ret, deabstractDelta)); + _retCount = 1; + } + + #ifdef ASMJIT_BUILD_X86 + if (CallConv::isX86Family(ccId)) + return x86::X86Internal::initFuncDetail(*this, sign, gpSize); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (CallConv::isArmFamily(ccId)) + return arm::ArmInternal::initFuncDetail(*this, sign, gpSize); + #endif + + // We should never bubble here as if `cc.init()` succeeded then there has to + // be an implementation for the current architecture. However, stay safe. + return DebugUtils::errored(kErrorInvalidArgument); +} + +// ============================================================================ +// [asmjit::FuncFrame - Init / Reset / Finalize] +// ============================================================================ + +ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept { + uint32_t ccId = func.callConv().id(); + + #ifdef ASMJIT_BUILD_X86 + if (CallConv::isX86Family(ccId)) + return x86::X86Internal::initFuncFrame(*this, func); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (CallConv::isArmFamily(ccId)) + return arm::ArmInternal::initFuncFrame(*this, func); + #endif + + return DebugUtils::errored(kErrorInvalidArgument); +} + +ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept { + #ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId())) + return x86::X86Internal::finalizeFuncFrame(*this); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId())) + return arm::ArmInternal::finalizeFuncFrame(*this); + #endif + + return DebugUtils::errored(kErrorInvalidArgument); +} + +// ============================================================================ +// [asmjit::FuncArgsAssignment] +// ============================================================================ + +ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept { + const FuncDetail* func = funcDetail(); + if (!func) return DebugUtils::errored(kErrorInvalidState); + + uint32_t ccId = func->callConv().id(); + + #ifdef ASMJIT_BUILD_X86 + if (CallConv::isX86Family(ccId)) + return x86::X86Internal::argsToFuncFrame(*this, frame); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (CallConv::isArmFamily(ccId)) + return arm::ArmInternal::argsToFuncFrame(*this, frame); + #endif + + return DebugUtils::errored(kErrorInvalidArch); +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/func.h b/src/asmjit/core/func.h new file mode 100644 index 0000000..5d8d64e --- /dev/null +++ b/src/asmjit/core/func.h @@ -0,0 +1,949 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_FUNC_H +#define _ASMJIT_CORE_FUNC_H + +#include "../core/arch.h" +#include "../core/callconv.h" +#include "../core/operand.h" +#include "../core/type.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_func +//! \{ + +// ============================================================================ +// [asmjit::FuncArgIndex] +// ============================================================================ + +//! Function argument index (lo/hi). +enum FuncArgIndex : uint32_t { + //! Maximum number of function arguments supported by AsmJit. + kFuncArgCount = Globals::kMaxFuncArgs, + //! Extended maximum number of arguments (used internally). + kFuncArgCountLoHi = kFuncArgCount * 2, + + //! Index to the LO part of function argument (default). + //! + //! This value is typically omitted and added only if there is HI argument + //! accessed. + kFuncArgLo = 0, + + //! Index to the HI part of function argument. + //! + //! HI part of function argument depends on target architecture. On x86 it's + //! typically used to transfer 64-bit integers (they form a pair of 32-bit + //! integers). + kFuncArgHi = kFuncArgCount +}; + +// ============================================================================ +// [asmjit::FuncSignature] +// ============================================================================ + +//! Function signature. +//! +//! Contains information about function return type, count of arguments and +//! their TypeIds. Function signature is a low level structure which doesn't +//! contain platform specific or calling convention specific information. +struct FuncSignature { + //! Calling convention id. + uint8_t _callConv; + //! Count of arguments. + uint8_t _argCount; + //! Index of a first VA or `kNoVarArgs`. + uint8_t _vaIndex; + //! Return value TypeId. + uint8_t _ret; + //! Function arguments TypeIds. + const uint8_t* _args; + + enum : uint8_t { + //! Doesn't have variable number of arguments (`...`). + kNoVarArgs = 0xFF + }; + + //! \name Initializtion & Reset + //! \{ + + //! Initializes the function signature. + inline void init(uint32_t ccId, uint32_t vaIndex, uint32_t ret, const uint8_t* args, uint32_t argCount) noexcept { + ASMJIT_ASSERT(ccId <= 0xFF); + ASMJIT_ASSERT(argCount <= 0xFF); + + _callConv = uint8_t(ccId); + _argCount = uint8_t(argCount); + _vaIndex = uint8_t(vaIndex); + _ret = uint8_t(ret); + _args = args; + } + + inline void reset() noexcept { memset(this, 0, sizeof(*this)); } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the calling convention. + inline uint32_t callConv() const noexcept { return _callConv; } + //! Sets the calling convention to `ccId`; + inline void setCallConv(uint32_t ccId) noexcept { _callConv = uint8_t(ccId); } + + //! Tests whether the function has variable number of arguments (...). + inline bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; } + //! Returns the variable arguments (...) index, `kNoVarArgs` if none. + inline uint32_t vaIndex() const noexcept { return _vaIndex; } + //! Sets the variable arguments (...) index to `index`. + inline void setVaIndex(uint32_t index) noexcept { _vaIndex = uint8_t(index); } + //! Resets the variable arguments index (making it a non-va function). + inline void resetVaIndex() noexcept { _vaIndex = kNoVarArgs; } + + //! Returns the number of function arguments. + inline uint32_t argCount() const noexcept { return _argCount; } + + inline bool hasRet() const noexcept { return _ret != Type::kIdVoid; } + //! Returns the return value type. + inline uint32_t ret() const noexcept { return _ret; } + + //! Returns the type of the argument at index `i`. + inline uint32_t arg(uint32_t i) const noexcept { + ASMJIT_ASSERT(i < _argCount); + return _args[i]; + } + //! Returns the array of function arguments' types. + inline const uint8_t* args() const noexcept { return _args; } + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncSignatureT] +// ============================================================================ + +template +class FuncSignatureT : public FuncSignature { +public: + inline FuncSignatureT(uint32_t ccId = CallConv::kIdHost, uint32_t vaIndex = kNoVarArgs) noexcept { + static const uint8_t ret_args[] = { (uint8_t(Type::IdOfT::kTypeId))... }; + init(ccId, vaIndex, ret_args[0], ret_args + 1, uint32_t(ASMJIT_ARRAY_SIZE(ret_args) - 1)); + } +}; + +// ============================================================================ +// [asmjit::FuncSignatureBuilder] +// ============================================================================ + +//! Function signature builder. +class FuncSignatureBuilder : public FuncSignature { +public: + uint8_t _builderArgList[kFuncArgCount]; + + //! \name Initializtion & Reset + //! \{ + + inline FuncSignatureBuilder(uint32_t ccId = CallConv::kIdHost, uint32_t vaIndex = kNoVarArgs) noexcept { + init(ccId, vaIndex, Type::kIdVoid, _builderArgList, 0); + } + + //! \} + + //! \name Accessors + //! \{ + + //! Sets the return type to `retType`. + inline void setRet(uint32_t retType) noexcept { _ret = uint8_t(retType); } + //! Sets the return type based on `T`. + template + inline void setRetT() noexcept { setRet(Type::IdOfT::kTypeId); } + + //! Sets the argument at index `index` to `argType`. + inline void setArg(uint32_t index, uint32_t argType) noexcept { + ASMJIT_ASSERT(index < _argCount); + _builderArgList[index] = uint8_t(argType); + } + //! Sets the argument at index `i` to the type based on `T`. + template + inline void setArgT(uint32_t index) noexcept { setArg(index, Type::IdOfT::kTypeId); } + + //! Appends an argument of `type` to the function prototype. + inline void addArg(uint32_t type) noexcept { + ASMJIT_ASSERT(_argCount < kFuncArgCount); + _builderArgList[_argCount++] = uint8_t(type); + } + //! Appends an argument of type based on `T` to the function prototype. + template + inline void addArgT() noexcept { addArg(Type::IdOfT::kTypeId); } + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncValue] +// ============================================================================ + +//! Argument or return value as defined by `FuncSignature`, but with register +//! or stack address (and other metadata) assigned to it. +struct FuncValue { + uint32_t _data; + + enum Parts : uint32_t { + kTypeIdShift = 0, //!< TypeId shift. + kTypeIdMask = 0x000000FFu, //!< TypeId mask. + + kFlagIsReg = 0x00000100u, //!< Passed by register. + kFlagIsStack = 0x00000200u, //!< Passed by stack. + kFlagIsIndirect = 0x00000400u, //!< Passed indirectly by reference (internally a pointer). + kFlagIsDone = 0x00000800u, //!< Used internally by arguments allocator. + + kStackOffsetShift = 12, //!< Stack offset shift. + kStackOffsetMask = 0xFFFFF000u, //!< Stack offset mask (must occupy MSB bits). + + kRegIdShift = 16, //!< RegId shift. + kRegIdMask = 0x00FF0000u, //!< RegId mask. + + kRegTypeShift = 24, //!< RegType shift. + kRegTypeMask = 0xFF000000u //!< RegType mask. + }; + + //! \name Initializtion & Reset + //! \{ + + // These initialize the whole `FuncValue` to either register or stack. Useful + // when you know all of these properties and wanna just set it up. + + //! Initializes the `typeId` of this `FuncValue`. + inline void initTypeId(uint32_t typeId) noexcept { + _data = typeId << kTypeIdShift; + } + + inline void initReg(uint32_t regType, uint32_t regId, uint32_t typeId, uint32_t flags = 0) noexcept { + _data = (regType << kRegTypeShift) | (regId << kRegIdShift) | (typeId << kTypeIdShift) | kFlagIsReg | flags; + } + + inline void initStack(int32_t offset, uint32_t typeId) noexcept { + _data = (uint32_t(offset) << kStackOffsetShift) | (typeId << kTypeIdShift) | kFlagIsStack; + } + + //! Resets the value to its unassigned state. + inline void reset() noexcept { _data = 0; } + + //! \} + + //! \name Assign + //! \{ + + // These initialize only part of `FuncValue`, useful when building `FuncValue` + // incrementally. The caller should first init the type-id by caliing `initTypeId` + // and then continue building either register or stack. + + inline void assignRegData(uint32_t regType, uint32_t regId) noexcept { + ASMJIT_ASSERT((_data & (kRegTypeMask | kRegIdMask)) == 0); + _data |= (regType << kRegTypeShift) | (regId << kRegIdShift) | kFlagIsReg; + } + + inline void assignStackOffset(int32_t offset) noexcept { + ASMJIT_ASSERT((_data & kStackOffsetMask) == 0); + _data |= (uint32_t(offset) << kStackOffsetShift) | kFlagIsStack; + } + + //! \} + + //! \name Accessors + //! \{ + + inline void _replaceValue(uint32_t mask, uint32_t value) noexcept { _data = (_data & ~mask) | value; } + + //! Tests whether the `FuncValue` has a flag `flag` set. + inline bool hasFlag(uint32_t flag) const noexcept { return (_data & flag) != 0; } + //! Adds `flags` to `FuncValue`. + inline void addFlags(uint32_t flags) noexcept { _data |= flags; } + //! Clears `flags` of `FuncValue`. + inline void clearFlags(uint32_t flags) noexcept { _data &= ~flags; } + + //! Tests whether the value is initialized (i.e. contains a valid data). + inline bool isInitialized() const noexcept { return _data != 0; } + //! Tests whether the argument is passed by register. + inline bool isReg() const noexcept { return hasFlag(kFlagIsReg); } + //! Tests whether the argument is passed by stack. + inline bool isStack() const noexcept { return hasFlag(kFlagIsStack); } + //! Tests whether the argument is passed by register. + inline bool isAssigned() const noexcept { return hasFlag(kFlagIsReg | kFlagIsStack); } + //! Tests whether the argument is passed through a pointer (used by WIN64 to pass XMM|YMM|ZMM). + inline bool isIndirect() const noexcept { return hasFlag(kFlagIsIndirect); } + + //! Tests whether the argument was already processed (used internally). + inline bool isDone() const noexcept { return hasFlag(kFlagIsDone); } + + //! Returns a register type of the register used to pass function argument or return value. + inline uint32_t regType() const noexcept { return (_data & kRegTypeMask) >> kRegTypeShift; } + //! Sets a register type of the register used to pass function argument or return value. + inline void setRegType(uint32_t regType) noexcept { _replaceValue(kRegTypeMask, regType << kRegTypeShift); } + + //! Returns a physical id of the register used to pass function argument or return value. + inline uint32_t regId() const noexcept { return (_data & kRegIdMask) >> kRegIdShift; } + //! Sets a physical id of the register used to pass function argument or return value. + inline void setRegId(uint32_t regId) noexcept { _replaceValue(kRegIdMask, regId << kRegIdShift); } + + //! Returns a stack offset of this argument. + inline int32_t stackOffset() const noexcept { return int32_t(_data & kStackOffsetMask) >> kStackOffsetShift; } + //! Sets a stack offset of this argument. + inline void setStackOffset(int32_t offset) noexcept { _replaceValue(kStackOffsetMask, uint32_t(offset) << kStackOffsetShift); } + + //! Tests whether the argument or return value has associated `Type::Id`. + inline bool hasTypeId() const noexcept { return (_data & kTypeIdMask) != 0; } + //! Returns a TypeId of this argument or return value. + inline uint32_t typeId() const noexcept { return (_data & kTypeIdMask) >> kTypeIdShift; } + //! Sets a TypeId of this argument or return value. + inline void setTypeId(uint32_t typeId) noexcept { _replaceValue(kTypeIdMask, typeId << kTypeIdShift); } + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncDetail] +// ============================================================================ + +//! Function detail - CallConv and expanded FuncSignature. +//! +//! Function detail is architecture and OS dependent representation of a function. +//! It contains calling convention and expanded function signature so all +//! arguments have assigned either register type & id or stack address. +class FuncDetail { +public: + //! Calling convention. + CallConv _callConv; + //! Number of function arguments. + uint8_t _argCount; + //! Number of function return values. + uint8_t _retCount; + //! Variable arguments index of `kNoVarArgs`. + uint8_t _vaIndex; + //! Reserved for future use. + uint8_t _reserved; + //! Registers that contains arguments. + uint32_t _usedRegs[BaseReg::kGroupVirt]; + //! Size of arguments passed by stack. + uint32_t _argStackSize; + //! Function return values. + FuncValue _rets[2]; + //! Function arguments. + FuncValue _args[kFuncArgCountLoHi]; + + enum : uint8_t { + //! Doesn't have variable number of arguments (`...`). + kNoVarArgs = 0xFF + }; + + //! \name Construction & Destruction + //! \{ + + inline FuncDetail() noexcept { reset(); } + inline FuncDetail(const FuncDetail& other) noexcept = default; + + //! Initializes this `FuncDetail` to the given signature. + ASMJIT_API Error init(const FuncSignature& sign); + inline void reset() noexcept { memset(this, 0, sizeof(*this)); } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the function's calling convention, see `CallConv`. + inline const CallConv& callConv() const noexcept { return _callConv; } + + //! Returns the associated calling convention flags, see `CallConv::Flags`. + inline uint32_t flags() const noexcept { return _callConv.flags(); } + //! Checks whether a CallConv `flag` is set, see `CallConv::Flags`. + inline bool hasFlag(uint32_t ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); } + + //! Returns count of function return values. + inline uint32_t retCount() const noexcept { return _retCount; } + //! Returns the number of function arguments. + inline uint32_t argCount() const noexcept { return _argCount; } + + //! Tests whether the function has a return value. + inline bool hasRet() const noexcept { return _retCount != 0; } + //! Returns function return value associated with the given `index`. + inline FuncValue& ret(uint32_t index = 0) noexcept { + ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets)); + return _rets[index]; + } + //! Returns function return value associated with the given `index` (const). + inline const FuncValue& ret(uint32_t index = 0) const noexcept { + ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets)); + return _rets[index]; + } + + //! Returns function arguments array. + inline FuncValue* args() noexcept { return _args; } + //! Returns function arguments array (const). + inline const FuncValue* args() const noexcept { return _args; } + + inline bool hasArg(uint32_t index) const noexcept { + ASMJIT_ASSERT(index < kFuncArgCountLoHi); + return _args[index].isInitialized(); + } + + //! Returns function argument at the given `index`. + inline FuncValue& arg(uint32_t index) noexcept { + ASMJIT_ASSERT(index < kFuncArgCountLoHi); + return _args[index]; + } + + //! Returnsfunction argument at the given index `index` (const). + inline const FuncValue& arg(uint32_t index) const noexcept { + ASMJIT_ASSERT(index < kFuncArgCountLoHi); + return _args[index]; + } + + inline void resetArg(uint32_t index) noexcept { + ASMJIT_ASSERT(index < kFuncArgCountLoHi); + _args[index].reset(); + } + + inline bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; } + inline uint32_t vaIndex() const noexcept { return _vaIndex; } + + //! Tests whether the function passes one or more argument by stack. + inline bool hasStackArgs() const noexcept { return _argStackSize != 0; } + //! Returns stack size needed for function arguments passed on the stack. + inline uint32_t argStackSize() const noexcept { return _argStackSize; } + + inline uint32_t redZoneSize() const noexcept { return _callConv.redZoneSize(); } + inline uint32_t spillZoneSize() const noexcept { return _callConv.spillZoneSize(); } + inline uint32_t naturalStackAlignment() const noexcept { return _callConv.naturalStackAlignment(); } + + inline uint32_t passedRegs(uint32_t group) const noexcept { return _callConv.passedRegs(group); } + inline uint32_t preservedRegs(uint32_t group) const noexcept { return _callConv.preservedRegs(group); } + + inline uint32_t usedRegs(uint32_t group) const noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + return _usedRegs[group]; + } + + inline void addUsedRegs(uint32_t group, uint32_t regs) noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + _usedRegs[group] |= regs; + } + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncFrame] +// ============================================================================ + +//! Function frame. +//! +//! Function frame is used directly by prolog and epilog insertion (PEI) utils. +//! It provides information necessary to insert a proper and ABI comforming +//! prolog and epilog. Function frame calculation is based on `CallConv` and +//! other function attributes. +//! +//! Function Frame Structure +//! ------------------------ +//! +//! Various properties can contribute to the size and structure of the function +//! frame. The function frame in most cases won't use all of the properties +//! illustrated (for example Spill Zone and Red Zone are never used together). +//! +//! +-----------------------------+ +//! | Arguments Passed by Stack | +//! +-----------------------------+ +//! | Spill Zone | +//! +-----------------------------+ <- Stack offset (args) starts from here. +//! | Return Address if Pushed | +//! +-----------------------------+ <- Stack pointer (SP) upon entry. +//! | Save/Restore Stack. | +//! +-----------------------------+-----------------------------+ +//! | Local Stack | | +//! +-----------------------------+ Final Stack | +//! | Call Stack | | +//! +-----------------------------+-----------------------------+ <- SP after prolog. +//! | Red Zone | +//! +-----------------------------+ +class FuncFrame { +public: + enum Tag : uint32_t { + kTagInvalidOffset = 0xFFFFFFFFu //!< Tag used to inform that some offset is invalid. + }; + + //! Attributes are designed in a way that all are initially false, and user + //! or FuncFrame finalizer adds them when necessary. + enum Attributes : uint32_t { + kAttrHasVarArgs = 0x00000001u, //!< Function has variable number of arguments. + kAttrHasPreservedFP = 0x00000010u, //!< Preserve frame pointer (don't omit FP). + kAttrHasFuncCalls = 0x00000020u, //!< Function calls other functions (is not leaf). + + kAttrX86AvxEnabled = 0x00010000u, //!< Use AVX instead of SSE for all operations (X86). + kAttrX86AvxCleanup = 0x00020000u, //!< Emit VZEROUPPER instruction in epilog (X86). + kAttrX86MmxCleanup = 0x00040000u, //!< Emit EMMS instruction in epilog (X86). + + kAttrAlignedVecSR = 0x40000000u, //!< Function has aligned save/restore of vector registers. + kAttrIsFinalized = 0x80000000u //!< FuncFrame is finalized and can be used by PEI. + }; + + //! Function attributes. + uint32_t _attributes; + + //! Architecture ID. + uint8_t _archId; + //! SP register ID (to access call stack and local stack). + uint8_t _spRegId; + //! SA register ID (to access stack arguments). + uint8_t _saRegId; + + //! Red zone size (copied from CallConv). + uint8_t _redZoneSize; + //! Spill zone size (copied from CallConv). + uint8_t _spillZoneSize; + //! Natural stack alignment (copied from CallConv). + uint8_t _naturalStackAlignment; + //! Minimum stack alignment to turn on dynamic alignment. + uint8_t _minDynamicAlignment; + + //! Call stack alignment. + uint8_t _callStackAlignment; + //! Local stack alignment. + uint8_t _localStackAlignment; + //! Final stack alignment. + uint8_t _finalStackAlignment; + + //! Adjustment of the stack before returning (X86-STDCALL). + uint16_t _calleeStackCleanup; + + //! Call stack size. + uint32_t _callStackSize; + //! Local stack size. + uint32_t _localStackSize; + //! Final stack size (sum of call stack and local stack). + uint32_t _finalStackSize; + + //! Local stack offset (non-zero only if call stack is used). + uint32_t _localStackOffset; + //! Offset relative to SP that contains previous SP (before alignment). + uint32_t _daOffset; + //! Offset of the first stack argument relative to SP. + uint32_t _saOffsetFromSP; + //! Offset of the first stack argument relative to SA (_saRegId or FP). + uint32_t _saOffsetFromSA; + + //! Local stack adjustment in prolog/epilog. + uint32_t _stackAdjustment; + + //! Registers that are dirty. + uint32_t _dirtyRegs[BaseReg::kGroupVirt]; + //! Registers that must be preserved (copied from CallConv). + uint32_t _preservedRegs[BaseReg::kGroupVirt]; + + //! Final stack size required to save GP regs. + uint16_t _gpSaveSize; + //! Final Stack size required to save other than GP regs. + uint16_t _nonGpSaveSize; + //! Final offset where saved GP regs are stored. + uint32_t _gpSaveOffset; + //! Final offset where saved other than GP regs are stored. + uint32_t _nonGpSaveOffset; + + //! \name Construction & Destruction + //! \{ + + inline FuncFrame() noexcept { reset(); } + inline FuncFrame(const FuncFrame& other) noexcept = default; + + ASMJIT_API Error init(const FuncDetail& func) noexcept; + + inline void reset() noexcept { + memset(this, 0, sizeof(FuncFrame)); + _spRegId = BaseReg::kIdBad; + _saRegId = BaseReg::kIdBad; + _daOffset = kTagInvalidOffset; + } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the target architecture of the function frame. + inline uint32_t archId() const noexcept { return _archId; } + + //! Returns function frame attributes, see `Attributes`. + inline uint32_t attributes() const noexcept { return _attributes; } + //! Checks whether the FuncFame contains an attribute `attr`. + inline bool hasAttribute(uint32_t attr) const noexcept { return (_attributes & attr) != 0; } + //! Adds attributes `attrs` to the FuncFrame. + inline void addAttributes(uint32_t attrs) noexcept { _attributes |= attrs; } + //! Clears attributes `attrs` from the FrameFrame. + inline void clearAttributes(uint32_t attrs) noexcept { _attributes &= ~attrs; } + + //! Tests whether the function has variable number of arguments. + inline bool hasVarArgs() const noexcept { return hasAttribute(kAttrHasVarArgs); } + //! Sets the variable arguments flag. + inline void setVarArgs() noexcept { addAttributes(kAttrHasVarArgs); } + //! Resets variable arguments flag. + inline void resetVarArgs() noexcept { clearAttributes(kAttrHasVarArgs); } + + //! Tests whether the function preserves frame pointer (EBP|ESP on X86). + inline bool hasPreservedFP() const noexcept { return hasAttribute(kAttrHasPreservedFP); } + //! Enables preserved frame pointer. + inline void setPreservedFP() noexcept { addAttributes(kAttrHasPreservedFP); } + //! Disables preserved frame pointer. + inline void resetPreservedFP() noexcept { clearAttributes(kAttrHasPreservedFP); } + + //! Tests whether the function calls other functions. + inline bool hasFuncCalls() const noexcept { return hasAttribute(kAttrHasFuncCalls); } + //! Sets `kFlagHasCalls` to true. + inline void setFuncCalls() noexcept { addAttributes(kAttrHasFuncCalls); } + //! Sets `kFlagHasCalls` to false. + inline void resetFuncCalls() noexcept { clearAttributes(kAttrHasFuncCalls); } + + //! Tests whether the function contains AVX cleanup - 'vzeroupper' instruction in epilog. + inline bool hasAvxCleanup() const noexcept { return hasAttribute(kAttrX86AvxCleanup); } + //! Enables AVX cleanup. + inline void setAvxCleanup() noexcept { addAttributes(kAttrX86AvxCleanup); } + //! Disables AVX cleanup. + inline void resetAvxCleanup() noexcept { clearAttributes(kAttrX86AvxCleanup); } + + //! Tests whether the function contains AVX cleanup - 'vzeroupper' instruction in epilog. + inline bool isAvxEnabled() const noexcept { return hasAttribute(kAttrX86AvxEnabled); } + //! Enables AVX cleanup. + inline void setAvxEnabled() noexcept { addAttributes(kAttrX86AvxEnabled); } + //! Disables AVX cleanup. + inline void resetAvxEnabled() noexcept { clearAttributes(kAttrX86AvxEnabled); } + + //! Tests whether the function contains MMX cleanup - 'emms' instruction in epilog. + inline bool hasMmxCleanup() const noexcept { return hasAttribute(kAttrX86MmxCleanup); } + //! Enables MMX cleanup. + inline void setMmxCleanup() noexcept { addAttributes(kAttrX86MmxCleanup); } + //! Disables MMX cleanup. + inline void resetMmxCleanup() noexcept { clearAttributes(kAttrX86MmxCleanup); } + + //! Tests whether the function uses call stack. + inline bool hasCallStack() const noexcept { return _callStackSize != 0; } + //! Tests whether the function uses local stack. + inline bool hasLocalStack() const noexcept { return _localStackSize != 0; } + //! Tests whether vector registers can be saved and restored by using aligned reads and writes. + inline bool hasAlignedVecSR() const noexcept { return hasAttribute(kAttrAlignedVecSR); } + //! Tests whether the function has to align stack dynamically. + inline bool hasDynamicAlignment() const noexcept { return _finalStackAlignment >= _minDynamicAlignment; } + + //! Tests whether the calling convention specifies 'RedZone'. + inline bool hasRedZone() const noexcept { return _redZoneSize != 0; } + //! Tests whether the calling convention specifies 'SpillZone'. + inline bool hasSpillZone() const noexcept { return _spillZoneSize != 0; } + + //! Returns the size of 'RedZone'. + inline uint32_t redZoneSize() const noexcept { return _redZoneSize; } + //! Returns the size of 'SpillZone'. + inline uint32_t spillZoneSize() const noexcept { return _spillZoneSize; } + //! Returns natural stack alignment (guaranteed stack alignment upon entry). + inline uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; } + //! Returns natural stack alignment (guaranteed stack alignment upon entry). + inline uint32_t minDynamicAlignment() const noexcept { return _minDynamicAlignment; } + + //! Tests whether the callee must adjust SP before returning (X86-STDCALL only) + inline bool hasCalleeStackCleanup() const noexcept { return _calleeStackCleanup != 0; } + //! Returns home many bytes of the stack the the callee must adjust before returning (X86-STDCALL only) + inline uint32_t calleeStackCleanup() const noexcept { return _calleeStackCleanup; } + + //! Returns call stack alignment. + inline uint32_t callStackAlignment() const noexcept { return _callStackAlignment; } + //! Returns local stack alignment. + inline uint32_t localStackAlignment() const noexcept { return _localStackAlignment; } + //! Returns final stack alignment (the maximum value of call, local, and natural stack alignments). + inline uint32_t finalStackAlignment() const noexcept { return _finalStackAlignment; } + + //! Sets call stack alignment. + //! + //! \note This also updates the final stack alignment. + inline void setCallStackAlignment(uint32_t alignment) noexcept { + _callStackAlignment = uint8_t(alignment); + _finalStackAlignment = Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment); + } + + //! Sets local stack alignment. + //! + //! \note This also updates the final stack alignment. + inline void setLocalStackAlignment(uint32_t value) noexcept { + _localStackAlignment = uint8_t(value); + _finalStackAlignment = Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment); + } + + //! Combines call stack alignment with `alignment`, updating it to the greater value. + //! + //! \note This also updates the final stack alignment. + inline void updateCallStackAlignment(uint32_t alignment) noexcept { + _callStackAlignment = uint8_t(Support::max(_callStackAlignment, alignment)); + _finalStackAlignment = Support::max(_finalStackAlignment, _callStackAlignment); + } + + //! Combines local stack alignment with `alignment`, updating it to the greater value. + //! + //! \note This also updates the final stack alignment. + inline void updateLocalStackAlignment(uint32_t alignment) noexcept { + _localStackAlignment = uint8_t(Support::max(_localStackAlignment, alignment)); + _finalStackAlignment = Support::max(_finalStackAlignment, _localStackAlignment); + } + + //! Returns call stack size. + inline uint32_t callStackSize() const noexcept { return _callStackSize; } + //! Returns local stack size. + inline uint32_t localStackSize() const noexcept { return _localStackSize; } + + //! Sets call stack size. + inline void setCallStackSize(uint32_t size) noexcept { _callStackSize = size; } + //! Sets local stack size. + inline void setLocalStackSize(uint32_t size) noexcept { _localStackSize = size; } + + //! Combines call stack size with `size`, updating it to the greater value. + inline void updateCallStackSize(uint32_t size) noexcept { _callStackSize = Support::max(_callStackSize, size); } + //! Combines local stack size with `size`, updating it to the greater value. + inline void updateLocalStackSize(uint32_t size) noexcept { _localStackSize = Support::max(_localStackSize, size); } + + //! Returns final stack size (only valid after the FuncFrame is finalized). + inline uint32_t finalStackSize() const noexcept { return _finalStackSize; } + + //! Returns an offset to access the local stack (non-zero only if call stack is used). + inline uint32_t localStackOffset() const noexcept { return _localStackOffset; } + + //! Tests whether the function prolog/epilog requires a memory slot for storing unaligned SP. + inline bool hasDAOffset() const noexcept { return _daOffset != kTagInvalidOffset; } + //! Returns a memory offset used to store DA (dynamic alignment) slot (relative to SP). + inline uint32_t daOffset() const noexcept { return _daOffset; } + + inline uint32_t saOffset(uint32_t regId) const noexcept { + return regId == _spRegId ? saOffsetFromSP() + : saOffsetFromSA(); + } + + inline uint32_t saOffsetFromSP() const noexcept { return _saOffsetFromSP; } + inline uint32_t saOffsetFromSA() const noexcept { return _saOffsetFromSA; } + + //! Returns mask of registers of the given register `group` that are modified + //! by the function. The engine would then calculate which registers must be + //! saved & restored by the function by using the data provided by the calling + //! convention. + inline uint32_t dirtyRegs(uint32_t group) const noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + return _dirtyRegs[group]; + } + + //! Sets which registers (as a mask) are modified by the function. + //! + //! \remarks Please note that this will completely overwrite the existing + //! register mask, use `addDirtyRegs()` to modify the existing register + //! mask. + inline void setDirtyRegs(uint32_t group, uint32_t regs) noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + _dirtyRegs[group] = regs; + } + + //! Adds which registers (as a mask) are modified by the function. + inline void addDirtyRegs(uint32_t group, uint32_t regs) noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + _dirtyRegs[group] |= regs; + } + + //! \overload + inline void addDirtyRegs(const BaseReg& reg) noexcept { + ASMJIT_ASSERT(reg.id() < Globals::kMaxPhysRegs); + addDirtyRegs(reg.group(), Support::bitMask(reg.id())); + } + + //! \overload + template + ASMJIT_INLINE void addDirtyRegs(const BaseReg& reg, ArgsT&&... args) noexcept { + addDirtyRegs(reg); + addDirtyRegs(std::forward(args)...); + } + + inline void setAllDirty() noexcept { + _dirtyRegs[0] = 0xFFFFFFFFu; + _dirtyRegs[1] = 0xFFFFFFFFu; + _dirtyRegs[2] = 0xFFFFFFFFu; + _dirtyRegs[3] = 0xFFFFFFFFu; + } + + inline void setAllDirty(uint32_t group) noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + _dirtyRegs[group] = 0xFFFFFFFFu; + } + + //! Returns a calculated mask of registers of the given `group` that will be + //! saved and restored in the function's prolog and epilog, respectively. The + //! register mask is calculated from both `dirtyRegs` (provided by user) and + //! `preservedMask` (provided by the calling convention). + inline uint32_t savedRegs(uint32_t group) const noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + return _dirtyRegs[group] & _preservedRegs[group]; + } + + //! Returns the mask of preserved registers of the given register `group`. + //! + //! Preserved registers are those that must survive the function call + //! unmodified. The function can only modify preserved registers it they + //! are saved and restored in funciton's prolog and epilog, respectively. + inline uint32_t preservedRegs(uint32_t group) const noexcept { + ASMJIT_ASSERT(group < BaseReg::kGroupVirt); + return _preservedRegs[group]; + } + + inline bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; } + inline uint32_t saRegId() const noexcept { return _saRegId; } + inline void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); } + inline void resetSARegId() { setSARegId(BaseReg::kIdBad); } + + //! Returns stack size required to save GP registers. + inline uint32_t gpSaveSize() const noexcept { return _gpSaveSize; } + //! Returns stack size required to save other than GP registers (MM, XMM|YMM|ZMM, K, VFP, etc...). + inline uint32_t nonGpSaveSize() const noexcept { return _nonGpSaveSize; } + + //! Returns an offset to the stack where general purpose registers are saved. + inline uint32_t gpSaveOffset() const noexcept { return _gpSaveOffset; } + //! Returns an offset to the stack where other than GP registers are saved. + inline uint32_t nonGpSaveOffset() const noexcept { return _nonGpSaveOffset; } + + //! Tests whether the functions contains stack adjustment. + inline bool hasStackAdjustment() const noexcept { return _stackAdjustment != 0; } + //! Returns function's stack adjustment used in function's prolog and epilog. + //! + //! If the returned value is zero it means that the stack is not adjusted. + //! This can mean both that the stack is not used and/or the stack is only + //! adjusted by instructions that pust/pop registers into/from stack. + inline uint32_t stackAdjustment() const noexcept { return _stackAdjustment; } + + //! \} + + //! \name Finaliztion + //! \{ + + ASMJIT_API Error finalize() noexcept; + + //! \} +}; + +// ============================================================================ +// [asmjit::FuncArgsAssignment] +// ============================================================================ + +//! A helper class that can be used to assign a physical register for each +//! function argument. Use with `BaseEmitter::emitArgsAssignment()`. +class FuncArgsAssignment { +public: + //! Function detail. + const FuncDetail* _funcDetail; + //! Register that can be used to access arguments passed by stack. + uint8_t _saRegId; + //! Reserved for future use. + uint8_t _reserved[3]; + //! Mapping of each function argument. + FuncValue _args[kFuncArgCountLoHi]; + + //! \name Construction & Destruction + //! \{ + + inline explicit FuncArgsAssignment(const FuncDetail* fd = nullptr) noexcept { reset(fd); } + + inline FuncArgsAssignment(const FuncArgsAssignment& other) noexcept { + memcpy(this, &other, sizeof(*this)); + } + + inline void reset(const FuncDetail* fd = nullptr) noexcept { + _funcDetail = fd; + _saRegId = uint8_t(BaseReg::kIdBad); + memset(_reserved, 0, sizeof(_reserved)); + memset(_args, 0, sizeof(_args)); + } + + //! \} + + //! \name Accessors + //! \{ + + inline const FuncDetail* funcDetail() const noexcept { return _funcDetail; } + inline void setFuncDetail(const FuncDetail* fd) noexcept { _funcDetail = fd; } + + inline bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; } + inline uint32_t saRegId() const noexcept { return _saRegId; } + inline void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); } + inline void resetSARegId() { _saRegId = uint8_t(BaseReg::kIdBad); } + + inline FuncValue& arg(uint32_t index) noexcept { + ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args)); + return _args[index]; + } + inline const FuncValue& arg(uint32_t index) const noexcept { + ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args)); + return _args[index]; + } + + inline bool isAssigned(uint32_t argIndex) const noexcept { + ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args)); + return _args[argIndex].isAssigned(); + } + + inline void assignReg(uint32_t argIndex, const BaseReg& reg, uint32_t typeId = Type::kIdVoid) noexcept { + ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args)); + ASMJIT_ASSERT(reg.isPhysReg()); + _args[argIndex].initReg(reg.type(), reg.id(), typeId); + } + + inline void assignReg(uint32_t argIndex, uint32_t regType, uint32_t regId, uint32_t typeId = Type::kIdVoid) noexcept { + ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args)); + _args[argIndex].initReg(regType, regId, typeId); + } + + inline void assignStack(uint32_t argIndex, int32_t offset, uint32_t typeId = Type::kIdVoid) { + ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args)); + _args[argIndex].initStack(offset, typeId); + } + + // NOTE: All `assignAll()` methods are shortcuts to assign all arguments at + // once, however, since registers are passed all at once these initializers + // don't provide any way to pass TypeId and/or to keep any argument between + // the arguments passed unassigned. + inline void _assignAllInternal(uint32_t argIndex, const BaseReg& reg) noexcept { + assignReg(argIndex, reg); + } + + template + inline void _assignAllInternal(uint32_t argIndex, const BaseReg& reg, ArgsT&&... args) noexcept { + assignReg(argIndex, reg); + _assignAllInternal(argIndex + 1, std::forward(args)...); + } + + template + inline void assignAll(ArgsT&&... args) noexcept { + _assignAllInternal(0, std::forward(args)...); + } + + //! \} + + //! \name Utilities + //! \{ + + //! Update `FuncFrame` based on function's arguments assignment. + //! + //! \note You MUST call this in orher to use `BaseEmitter::emitArgsAssignment()`, + //! otherwise the FuncFrame would not contain the information necessary to + //! assign all arguments into the registers and/or stack specified. + ASMJIT_API Error updateFuncFrame(FuncFrame& frame) const noexcept; + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_FUNC_H + diff --git a/src/asmjit/core/globals.cpp b/src/asmjit/core/globals.cpp new file mode 100644 index 0000000..5a16de4 --- /dev/null +++ b/src/asmjit/core/globals.cpp @@ -0,0 +1,115 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/globals.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::DebugUtils] +// ============================================================================ + +ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept { +#ifndef ASMJIT_NO_TEXT + static const char errorMessages[] = + "Ok\0" + "Out of memory\0" + "Invalid argument\0" + "Invalid state\0" + "Invalid architecture\0" + "Not initialized\0" + "Already initialized\0" + "Feature not enabled\0" + "Too many handles or file descriptors\0" + "Too large (code or memory request)\0" + "No code generated\0" + "Invalid directive\0" + "Invalid label\0" + "Too many labels\0" + "Label already bound\0" + "Label already defined\0" + "Label name too long\0" + "Invalid label name\0" + "Invalid parent label\0" + "Non-local label can't have parent\0" + "Invalid section\0" + "Too many sections\0" + "Invalid section name\0" + "Too many relocations\0" + "Invalid relocation entry\0" + "Relocation offset out of range\0" + "Invalid assignment\0" + "Invalid instruction\0" + "Invalid register type\0" + "Invalid register group\0" + "Invalid register physical id\0" + "Invalid register virtual id\0" + "Invalid prefix combination\0" + "Invalid lock prefix\0" + "Invalid xacquire prefix\0" + "Invalid xrelease prefix\0" + "Invalid rep prefix\0" + "Invalid rex prefix\0" + "Invalid {...} register \0" + "Invalid use of {k}\0" + "Invalid use of {k}{z}\0" + "Invalid broadcast {1tox}\0" + "Invalid {er} or {sae} option\0" + "Invalid address\0" + "Invalid address index\0" + "Invalid address scale\0" + "Invalid use of 64-bit address or offset\0" + "Invalid use of 64-bit address or offset that requires 32-bit zero-extension\0" + "Invalid displacement\0" + "Invalid segment\0" + "Invalid immediate value\0" + "Invalid operand size\0" + "Ambiguous operand size\0" + "Operand size mismatch\0" + "Invalid option\0" + "Option already defined\0" + "Invalid type-info\0" + "Invalid use of a low 8-bit GPB register\0" + "Invalid use of a 64-bit GPQ register in 32-bit mode\0" + "Invalid use of an 80-bit float\0" + "Not consecutive registers\0" + "No more physical registers\0" + "Overlapped registers\0" + "Overlapping register and arguments base-address register\0" + "Unbound label cannot be evaluated by expression\0" + "Arithmetic overflow during expression evaluation\0" + "Unknown error\0"; + return Support::findPackedString(errorMessages, Support::min(err, kErrorCount)); +#else + ASMJIT_UNUSED(err); + static const char noMessage[] = ""; + return noMessage; +#endif +} + +ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept { +#if defined(_WIN32) + ::OutputDebugStringA(str); +#else + ::fputs(str, stderr); +#endif +} + +ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept { + char str[1024]; + + snprintf(str, 1024, + "[asmjit] Assertion failed at %s (line %d):\n" + "[asmjit] %s\n", file, line, msg); + + debugOutput(str); + ::abort(); +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/globals.h b/src/asmjit/core/globals.h new file mode 100644 index 0000000..fff13a4 --- /dev/null +++ b/src/asmjit/core/globals.h @@ -0,0 +1,404 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_GLOBALS_H +#define _ASMJIT_CORE_GLOBALS_H + +#include "../core/build.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::Support] +// ============================================================================ + +//! \cond INTERNAL +//! \addtogroup Support +//! \{ +namespace Support { + //! Cast designed to cast between function and void* pointers. + template + static constexpr Dst ptr_cast_impl(Src p) noexcept { return (Dst)p; } +} // {Support} + +#if defined(ASMJIT_NO_STDCXX) +namespace Support { + ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); } + ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); } +} // {Support} + +#define ASMJIT_BASE_CLASS(TYPE) \ + ASMJIT_INLINE void* operator new(size_t n) noexcept { \ + return Support::operatorNew(n); \ + } \ + \ + ASMJIT_INLINE void operator delete(void* p) noexcept { \ + Support::operatorDelete(p); \ + } \ + \ + ASMJIT_INLINE void* operator new(size_t, void* p) noexcept { return p; } \ + ASMJIT_INLINE void operator delete(void*, void*) noexcept {} +#else +#define ASMJIT_BASE_CLASS(TYPE) +#endif + +//! \} +//! \endcond + +// ============================================================================ +// [asmjit::Globals] +// ============================================================================ + +//! \addtogroup asmjit_core +//! \{ + +//! Contains typedefs, constants, and variables used globally by AsmJit. +namespace Globals { + +// ============================================================================ +// [asmjit::Globals::] +// ============================================================================ + +//! Host memory allocator overhead. +constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4); + +//! Host memory allocator alignment. +constexpr uint32_t kAllocAlignment = 8; + +//! Aggressive growing strategy threshold. +constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16; + +//! Maximum height of RB-Tree is: +//! +//! `2 * log2(n + 1)`. +//! +//! Size of RB node is at least two pointers (without data), +//! so a theoretical architecture limit would be: +//! +//! `2 * log2(addressableMemorySize / sizeof(Node) + 1)` +//! +//! Which yields 30 on 32-bit arch and 61 on 64-bit arch. +//! The final value was adjusted by +1 for safety reasons. +constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1; + +//! Maximum number of operands per a single instruction. +constexpr uint32_t kMaxOpCount = 6; + +// TODO: Use this one. +constexpr uint32_t kMaxFuncArgs = 16; + +//! Maximum number of physical registers AsmJit can use per register group. +constexpr uint32_t kMaxPhysRegs = 32; + +//! Maximum alignment. +constexpr uint32_t kMaxAlignment = 64; + +//! Maximum label or symbol size in bytes. +constexpr uint32_t kMaxLabelNameSize = 2048; + +//! Maximum section name size. +constexpr uint32_t kMaxSectionNameSize = 35; + +//! Maximum size of comment. +constexpr uint32_t kMaxCommentSize = 1024; + +//! Invalid identifier. +constexpr uint32_t kInvalidId = 0xFFFFFFFFu; + +//! Returned by `indexOf()` and similar when working with containers that use 32-bit index/size. +constexpr uint32_t kNotFound = 0xFFFFFFFFu; + +//! Invalid base address. +constexpr uint64_t kNoBaseAddress = ~uint64_t(0); + +// ============================================================================ +// [asmjit::Globals::ResetPolicy] +// ============================================================================ + +//! Reset policy used by most `reset()` functions. +enum ResetPolicy : uint32_t { + //! Soft reset, doesn't deallocate memory (default). + kResetSoft = 0, + //! Hard reset, releases all memory used, if any. + kResetHard = 1 +}; + +// ============================================================================ +// [asmjit::Globals::Link] +// ============================================================================ + +enum Link : uint32_t { + kLinkLeft = 0, + kLinkRight = 1, + + kLinkPrev = 0, + kLinkNext = 1, + + kLinkFirst = 0, + kLinkLast = 1, + + kLinkCount = 2 +}; + +struct Init_ {}; +struct NoInit_ {}; + +static const constexpr Init_ Init {}; +static const constexpr NoInit_ NoInit {}; + +} // {Globals} + +// ============================================================================ +// [asmjit::Error] +// ============================================================================ + +//! AsmJit error type (uint32_t). +typedef uint32_t Error; + +//! AsmJit error codes. +enum ErrorCode : uint32_t { + //! No error (success). + kErrorOk = 0, + + //! Out of memory. + kErrorOutOfMemory, + + //! Invalid argument. + kErrorInvalidArgument, + + //! Invalid state. + //! + //! If this error is returned it means that either you are doing something + //! wrong or AsmJit caught itself by doing something wrong. This error should + //! never be ignored. + kErrorInvalidState, + + //! Invalid or incompatible architecture. + kErrorInvalidArch, + + //! The object is not initialized. + kErrorNotInitialized, + //! The object is already initialized. + kErrorAlreadyInitialized, + + //! Built-in feature was disabled at compile time and it's not available. + kErrorFeatureNotEnabled, + + //! Too many handles (Windows) or file descriptors (Unix/Posix). + kErrorTooManyHandles, + //! Code generated is larger than allowed. + kErrorTooLarge, + + //! No code generated. + //! + //! Returned by runtime if the `CodeHolder` contains no code. + kErrorNoCodeGenerated, + + //! Invalid directive. + kErrorInvalidDirective, + //! Attempt to use uninitialized label. + kErrorInvalidLabel, + //! Label index overflow - a single `Assembler` instance can hold almost + //! 2^32 (4 billion) labels. If there is an attempt to create more labels + //! then this error is returned. + kErrorTooManyLabels, + //! Label is already bound. + kErrorLabelAlreadyBound, + //! Label is already defined (named labels). + kErrorLabelAlreadyDefined, + //! Label name is too long. + kErrorLabelNameTooLong, + //! Label must always be local if it's anonymous (without a name). + kErrorInvalidLabelName, + //! Parent id passed to `CodeHolder::newNamedLabelId()` was invalid. + kErrorInvalidParentLabel, + //! Parent id specified for a non-local (global) label. + kErrorNonLocalLabelCantHaveParent, + + //! Invalid section. + kErrorInvalidSection, + //! Too many sections (section index overflow). + kErrorTooManySections, + //! Invalid section name (most probably too long). + kErrorInvalidSectionName, + + //! Relocation index overflow (too many relocations). + kErrorTooManyRelocations, + //! Invalid relocation entry. + kErrorInvalidRelocEntry, + //! Reloc entry contains address that is out of range (unencodable). + kErrorRelocOffsetOutOfRange, + + //! Invalid assignment to a register, function argument, or function return value. + kErrorInvalidAssignment, + //! Invalid instruction. + kErrorInvalidInstruction, + //! Invalid register type. + kErrorInvalidRegType, + //! Invalid register group. + kErrorInvalidRegGroup, + //! Invalid register's physical id. + kErrorInvalidPhysId, + //! Invalid register's virtual id. + kErrorInvalidVirtId, + //! Invalid prefix combination. + kErrorInvalidPrefixCombination, + //! Invalid LOCK prefix. + kErrorInvalidLockPrefix, + //! Invalid XACQUIRE prefix. + kErrorInvalidXAcquirePrefix, + //! Invalid XRELEASE prefix. + kErrorInvalidXReleasePrefix, + //! Invalid REP prefix. + kErrorInvalidRepPrefix, + //! Invalid REX prefix. + kErrorInvalidRexPrefix, + //! Invalid {...} register. + kErrorInvalidExtraReg, + //! Invalid {k} use (not supported by the instruction). + kErrorInvalidKMaskUse, + //! Invalid {k}{z} use (not supported by the instruction). + kErrorInvalidKZeroUse, + //! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox}. + kErrorInvalidBroadcast, + //! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512). + kErrorInvalidEROrSAE, + //! Invalid address used (not encodable). + kErrorInvalidAddress, + //! Invalid index register used in memory address (not encodable). + kErrorInvalidAddressIndex, + //! Invalid address scale (not encodable). + kErrorInvalidAddressScale, + //! Invalid use of 64-bit address. + kErrorInvalidAddress64Bit, + //! Invalid use of 64-bit address that require 32-bit zero-extension (X64). + kErrorInvalidAddress64BitZeroExtension, + //! Invalid displacement (not encodable). + kErrorInvalidDisplacement, + //! Invalid segment (X86). + kErrorInvalidSegment, + + //! Invalid immediate (out of bounds on X86 and invalid pattern on ARM). + kErrorInvalidImmediate, + + //! Invalid operand size. + kErrorInvalidOperandSize, + //! Ambiguous operand size (memory has zero size while it's required to determine the operation type. + kErrorAmbiguousOperandSize, + //! Mismatching operand size (size of multiple operands doesn't match the operation size). + kErrorOperandSizeMismatch, + + //! Invalid option. + kErrorInvalidOption, + //! Option already defined. + kErrorOptionAlreadyDefined, + + //! Invalid TypeId. + kErrorInvalidTypeId, + //! Invalid use of a 8-bit GPB-HIGH register. + kErrorInvalidUseOfGpbHi, + //! Invalid use of a 64-bit GPQ register in 32-bit mode. + kErrorInvalidUseOfGpq, + //! Invalid use of an 80-bit float (Type::kIdF80). + kErrorInvalidUseOfF80, + //! Some registers in the instruction muse be consecutive (some ARM and AVX512 neural-net instructions). + kErrorNotConsecutiveRegs, + + //! AsmJit requires a physical register, but no one is available. + kErrorNoMorePhysRegs, + //! A variable has been assigned more than once to a function argument (BaseCompiler). + kErrorOverlappedRegs, + //! Invalid register to hold stack arguments offset. + kErrorOverlappingStackRegWithRegArg, + + //! Unbound label cannot be evaluated by expression. + kErrorExpressionLabelNotBound, + //! Arithmetic overflow during expression evaluation. + kErrorExpressionOverflow, + + //! Count of AsmJit error codes. + kErrorCount +}; + +// ============================================================================ +// [asmjit::ByteOrder] +// ============================================================================ + +//! Byte order. +namespace ByteOrder { + enum : uint32_t { + kLE = 0, + kBE = 1, + kNative = ASMJIT_ARCH_LE ? kLE : kBE, + kSwapped = ASMJIT_ARCH_LE ? kBE : kLE + }; +} + +// ============================================================================ +// [asmjit::ptr_as_func / func_as_ptr] +// ============================================================================ + +template +static constexpr Func ptr_as_func(void* func) noexcept { return Support::ptr_cast_impl(func); } +template +static constexpr void* func_as_ptr(Func func) noexcept { return Support::ptr_cast_impl(func); } + +// ============================================================================ +// [asmjit::DebugUtils] +// ============================================================================ + +//! Debugging utilities. +namespace DebugUtils { + +//! Returns the error `err` passed. +//! +//! Provided for debugging purposes. Putting a breakpoint inside `errored` can +//! help with tracing the origin of any error reported / returned by AsmJit. +static constexpr Error errored(Error err) noexcept { return err; } + +//! Returns a printable version of `asmjit::Error` code. +ASMJIT_API const char* errorAsString(Error err) noexcept; + +//! Called to output debugging message(s). +ASMJIT_API void debugOutput(const char* str) noexcept; + +//! Called on assertion failure. +//! +//! \param file Source file name where it happened. +//! \param line Line in the source file. +//! \param msg Message to display. +//! +//! If you have problems with assertions put a breakpoint at assertionFailed() +//! function (asmjit/core/globals.cpp) and check the call stack to locate the +//! failing code. +ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept; + +#if defined(ASMJIT_BUILD_DEBUG) +#define ASMJIT_ASSERT(EXP) \ + do { \ + if (ASMJIT_LIKELY(EXP)) \ + break; \ + ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #EXP); \ + } while (0) +#else +#define ASMJIT_ASSERT(EXP) ((void)0) +#endif + +//! Used by AsmJit to propagate a possible `Error` produced by `...` to the caller. +#define ASMJIT_PROPAGATE(...) \ + do { \ + ::asmjit::Error _err = __VA_ARGS__; \ + if (ASMJIT_UNLIKELY(_err)) \ + return _err; \ + } while (0) + +} // {DebugUtils} + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_GLOBALS_H diff --git a/src/asmjit/core/inst.cpp b/src/asmjit/core/inst.cpp new file mode 100644 index 0000000..f5a2bf9 --- /dev/null +++ b/src/asmjit/core/inst.cpp @@ -0,0 +1,124 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/build.h" +#ifdef ASMJIT_BUILD_X86 + +#include "../core/arch.h" +#include "../core/inst.h" + +#ifdef ASMJIT_BUILD_X86 + #include "../x86/x86instapi_p.h" +#endif + +#ifdef ASMJIT_BUILD_ARM + #include "../arm/arminstapi_p.h" +#endif + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::InstAPI - Text] +// ============================================================================ + +#ifndef ASMJIT_NO_TEXT +Error InstAPI::instIdToString(uint32_t archId, uint32_t instId, String& output) noexcept { +#ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) + return x86::InstInternal::instIdToString(archId, instId, output); +#endif + +#ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId)) + return arm::InstInternal::instIdToString(archId, instId, output); +#endif + + return DebugUtils::errored(kErrorInvalidArch); +} + +uint32_t InstAPI::stringToInstId(uint32_t archId, const char* s, size_t len) noexcept { +#ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) + return x86::InstInternal::stringToInstId(archId, s, len); +#endif + +#ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId)) + return arm::InstInternal::stringToInstId(archId, s, len); +#endif + + return 0; +} +#endif // !ASMJIT_NO_TEXT + +// ============================================================================ +// [asmjit::InstAPI - Validate] +// ============================================================================ + +#ifndef ASMJIT_NO_VALIDATION +Error InstAPI::validate(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept { +#ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) + return x86::InstInternal::validate(archId, inst, operands, opCount); +#endif + +#ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId)) + return arm::InstInternal::validate(archId, inst, operands, opCount); +#endif + + return DebugUtils::errored(kErrorInvalidArch); +} +#endif // !ASMJIT_NO_VALIDATION + +// ============================================================================ +// [asmjit::InstAPI - QueryRWInfo] +// ============================================================================ + +#ifndef ASMJIT_NO_INTROSPECTION +Error InstAPI::queryRWInfo(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, InstRWInfo& out) noexcept { + if (ASMJIT_UNLIKELY(opCount > 6)) + return DebugUtils::errored(kErrorInvalidArgument); + +#ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) + return x86::InstInternal::queryRWInfo(archId, inst, operands, opCount, out); +#endif + +#ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId)) + return arm::InstInternal::queryRWInfo(archId, inst, operands, opCount, out); +#endif + + return DebugUtils::errored(kErrorInvalidArch); +} +#endif // !ASMJIT_NO_INTROSPECTION + +// ============================================================================ +// [asmjit::InstAPI - QueryFeatures] +// ============================================================================ + +#ifndef ASMJIT_NO_INTROSPECTION +Error InstAPI::queryFeatures(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, BaseFeatures& out) noexcept { +#ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) + return x86::InstInternal::queryFeatures(archId, inst, operands, opCount, out); +#endif + +#ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId)) + return arm::InstInternal::queryFeatures(archId, inst, operands, opCount, out); +#endif + + return DebugUtils::errored(kErrorInvalidArch); +} +#endif // !ASMJIT_NO_INTROSPECTION + +ASMJIT_END_NAMESPACE + +#endif // ASMJIT_BUILD_X86 diff --git a/src/asmjit/core/inst.h b/src/asmjit/core/inst.h new file mode 100644 index 0000000..9605b8b --- /dev/null +++ b/src/asmjit/core/inst.h @@ -0,0 +1,448 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_INST_H +#define _ASMJIT_CORE_INST_H + +#include "../core/cpuinfo.h" +#include "../core/operand.h" +#include "../core/string.h" +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [asmjit::InstInfo] +// ============================================================================ + +// TODO: Finalize instruction info and make more x86::InstDB methods/structs private. + +/* + +struct InstInfo { + //! Architecture agnostic attributes. + enum Attributes : uint32_t { + + + }; + + //! Instruction attributes. + uint32_t _attributes; + + inline void reset() noexcept { memset(this, 0, sizeof(*this)); } + + inline uint32_t attributes() const noexcept { return _attributes; } + inline bool hasAttribute(uint32_t attr) const noexcept { return (_attributes & attr) != 0; } +}; + +//! Gets attributes of the given instruction. +ASMJIT_API Error queryCommonInfo(uint32_t archId, uint32_t instId, InstInfo& out) noexcept; + +*/ + +// ============================================================================ +// [asmjit::InstRWInfo / OpRWInfo] +// ============================================================================ + +//! Read/Write information related to a single operand, used by `InstRWInfo`. +struct OpRWInfo { + //! Read/Write flags, see `OpRWInfo::Flags`. + uint32_t _opFlags; + //! Physical register index, if required. + uint8_t _physId; + //! Size of a possible memory operand that can replace a register operand. + uint8_t _rmSize; + //! Reserved for future use. + uint8_t _reserved[2]; + //! Read bit-mask where each bit represents one byte read from Reg/Mem. + uint64_t _readByteMask; + //! Write bit-mask where each bit represents one byte written to Reg/Mem. + uint64_t _writeByteMask; + //! Zero/Sign extend bit-mask where each bit represents one byte written to Reg/Mem. + uint64_t _extendByteMask; + + //! Flags describe how the operand is accessed and some additional information. + enum Flags : uint32_t { + //! Operand is read. + //! + //! \note This flag must be `0x00000001`. + kRead = 0x00000001u, + + //! Operand is written. + //! + //! \note This flag must be `0x00000002`. + kWrite = 0x00000002u, + + //! Operand is both read and written. + //! + //! \note This combination of flags must be `0x00000003`. + kRW = 0x00000003u, + + //! Register operand can be replaced by a memory operand. + kRegMem = 0x00000004u, + + //! The `extendByteMask()` represents a zero extension. + kZExt = 0x00000010u, + + //! Register operand must use `physId()`. + kRegPhysId = 0x00000100u, + //! Base register of a memory operand must use `physId()`. + kMemPhysId = 0x00000200u, + + //! This memory operand is only used to encode registers and doesn't access memory. + //! + //! X86 Specific + //! ------------ + //! + //! Instructions that use such feature include BNDLDX, BNDSTX, and LEA. + kMemFake = 0x000000400u, + + //! Base register of the memory operand will be read. + kMemBaseRead = 0x00001000u, + //! Base register of the memory operand will be written. + kMemBaseWrite = 0x00002000u, + //! Base register of the memory operand will be read & written. + kMemBaseRW = 0x00003000u, + + //! Index register of the memory operand will be read. + kMemIndexRead = 0x00004000u, + //! Index register of the memory operand will be written. + kMemIndexWrite = 0x00008000u, + //! Index register of the memory operand will be read & written. + kMemIndexRW = 0x0000C000u, + + //! Base register of the memory operand will be modified before the operation. + kMemBasePreModify = 0x00010000u, + //! Base register of the memory operand will be modified after the operation. + kMemBasePostModify = 0x00020000u + }; + + static_assert(kRead == 0x1, "OpRWInfo::kRead flag must be 0x1"); + static_assert(kWrite == 0x2, "OpRWInfo::kWrite flag must be 0x2"); + static_assert(kRegMem == 0x4, "OpRWInfo::kRegMem flag must be 0x4"); + + //! \name Reset + //! \{ + + inline void reset() noexcept { memset(this, 0, sizeof(*this)); } + inline void reset(uint32_t opFlags, uint32_t regSize, uint32_t physId = BaseReg::kIdBad) noexcept { + _opFlags = opFlags; + _physId = uint8_t(physId); + _rmSize = uint8_t((opFlags & kRegMem) ? regSize : uint32_t(0)); + _resetReserved(); + + uint64_t mask = Support::lsbMask(regSize); + _readByteMask = opFlags & kRead ? mask : uint64_t(0); + _writeByteMask = opFlags & kWrite ? mask : uint64_t(0); + _extendByteMask = 0; + } + + inline void _resetReserved() noexcept { + memset(_reserved, 0, sizeof(_reserved)); + } + + //! \} + + //! \name Operand Flags + //! \{ + + inline uint32_t opFlags() const noexcept { return _opFlags; } + inline bool hasOpFlag(uint32_t flag) const noexcept { return (_opFlags & flag) != 0; } + + inline void addOpFlags(uint32_t flags) noexcept { _opFlags |= flags; } + inline void clearOpFlags(uint32_t flags) noexcept { _opFlags &= ~flags; } + + inline bool isRead() const noexcept { return hasOpFlag(kRead); } + inline bool isWrite() const noexcept { return hasOpFlag(kWrite); } + inline bool isReadWrite() const noexcept { return (_opFlags & kRW) == kRW; } + inline bool isReadOnly() const noexcept { return (_opFlags & kRW) == kRead; } + inline bool isWriteOnly() const noexcept { return (_opFlags & kRW) == kWrite; } + inline bool isRm() const noexcept { return hasOpFlag(kRegMem); } + inline bool isZExt() const noexcept { return hasOpFlag(kZExt); } + + //! \} + + //! \name Physical Register ID + //! \{ + + inline uint32_t physId() const noexcept { return _physId; } + inline bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; } + inline void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); } + + //! \} + + //! \name Reg/Mem + //! \{ + + inline uint32_t rmSize() const noexcept { return _rmSize; } + inline void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); } + + //! \} + + //! \name Read & Write Masks + //! \{ + + inline uint64_t readByteMask() const noexcept { return _readByteMask; } + inline uint64_t writeByteMask() const noexcept { return _writeByteMask; } + inline uint64_t extendByteMask() const noexcept { return _extendByteMask; } + + inline void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; } + inline void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; } + inline void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; } + + //! \} +}; + +//! Read/Write information of an instruction. +struct InstRWInfo { + //! Instruction flags. + uint32_t _instFlags; + //! Mask of flags read. + uint32_t _readFlags; + //! Mask of flags written. + uint32_t _writeFlags; + //! Count of operands. + uint8_t _opCount; + //! CPU feature required for replacing register operand with memory operand. + uint8_t _rmFeature; + //! Reserved for future use. + uint8_t _reserved[19]; + //! Read/Write onfo of extra register (rep{} or kz{}). + OpRWInfo _extraReg; + //! Read/Write info of instruction operands. + OpRWInfo _operands[Globals::kMaxOpCount]; + + inline void reset() noexcept { memset(this, 0, sizeof(*this)); } + + inline uint32_t instFlags() const noexcept { return _instFlags; } + inline bool hasInstFlag(uint32_t flag) const noexcept { return (_instFlags & flag) != 0; } + + inline uint32_t opCount() const noexcept { return _opCount; } + + inline uint32_t readFlags() const noexcept { return _readFlags; } + inline uint32_t writeFlags() const noexcept { return _writeFlags; } + + //! Returns the CPU feature required to replace a register operand with memory + //! operand. If the returned feature is zero (none) then this instruction + //! either doesn't provide memory operand combination or there is no extra + //! CPU feature required. + //! + //! X86 Specific + //! ------------ + //! + //! Some AVX+ instructions may require extra features for replacing registers + //! with memory operands, for example VPSLLDQ instruction only supports + //! 'reg/reg/imm' combination on AVX/AVX2 capable CPUs and requires AVX-512 for + //! 'reg/mem/imm' combination. + inline uint32_t rmFeature() const noexcept { return _rmFeature; } + + inline const OpRWInfo& extraReg() const noexcept { return _extraReg; } + inline const OpRWInfo* operands() const noexcept { return _operands; } + + inline const OpRWInfo& operand(size_t index) const noexcept { + ASMJIT_ASSERT(index < Globals::kMaxOpCount); + return _operands[index]; + } +}; + +// ============================================================================ +// [asmjit::BaseInst] +// ============================================================================ + +//! Instruction id, options, and extraReg in a single structure. This structure +//! exists mainly to simplify analysis and validation API that requires `BaseInst` +//! and `Operand[]` array. +class BaseInst { +public: + //! Instruction id. + uint32_t _id; + //! Instruction options. + uint32_t _options; + //! Extra register used by instruction (either REP register or AVX-512 selector). + RegOnly _extraReg; + + enum Id : uint32_t { + //! Invalid or uninitialized instruction id. + kIdNone = 0x00000000u, + //! Abstract instruction (BaseBuilder and BaseCompiler). + kIdAbstract = 0x80000000u + }; + + enum Options : uint32_t { + //! Used internally by emitters for handling errors and rare cases. + kOptionReserved = 0x00000001u, + + //! Used only by Assembler to mark that `_op4` and `_op5` are used (internal). + kOptionOp4Op5Used = 0x00000002u, + + //! Prevents following a jump during compilation (BaseCompiler). + kOptionUnfollow = 0x00000010u, + + //! Overwrite the destination operand(s) (BaseCompiler). + //! + //! Hint that is important for register liveness analysis. It tells the + //! compiler that the destination operand will be overwritten now or by + //! adjacent instructions. BaseCompiler knows when a register is completely + //! overwritten by a single instruction, for example you don't have to + //! mark "movaps" or "pxor x, x", however, if a pair of instructions is + //! used and the first of them doesn't completely overwrite the content + //! of the destination, BaseCompiler fails to mark that register as dead. + //! + //! X86 Specific + //! ------------ + //! + //! - All instructions that always overwrite at least the size of the + //! register the virtual-register uses , for example "mov", "movq", + //! "movaps" don't need the overwrite option to be used - conversion, + //! shuffle, and other miscellaneous instructions included. + //! + //! - All instructions that clear the destination register if all operands + //! are the same, for example "xor x, x", "pcmpeqb x x", etc... + //! + //! - Consecutive instructions that partially overwrite the variable until + //! there is no old content require `BaseCompiler::overwrite()` to be used. + //! Some examples (not always the best use cases thought): + //! + //! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa + //! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa + //! - `mov al, ?` followed by `and ax, 0xFF` + //! - `mov al, ?` followed by `mov ah, al` + //! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1` + //! + //! - If allocated variable is used temporarily for scalar operations. For + //! example if you allocate a full vector like `x86::Compiler::newXmm()` + //! and then use that vector for scalar operations you should use + //! `overwrite()` directive: + //! + //! - `sqrtss x, y` - only LO element of `x` is changed, if you don't + //! use HI elements, use `compiler.overwrite().sqrtss(x, y)`. + kOptionOverwrite = 0x00000020u, + + //! Emit short-form of the instruction. + kOptionShortForm = 0x00000040u, + //! Emit long-form of the instruction. + kOptionLongForm = 0x00000080u, + + //! Conditional jump is likely to be taken. + kOptionTaken = 0x00000100u, + //! Conditional jump is unlikely to be taken. + kOptionNotTaken = 0x00000200u + }; + + //! Control type. + enum ControlType : uint32_t { + //! No control type (doesn't jump). + kControlNone = 0u, + //! Unconditional jump. + kControlJump = 1u, + //! Conditional jump (branch). + kControlBranch = 2u, + //! Function call. + kControlCall = 3u, + //! Function return. + kControlReturn = 4u + }; + + //! \name Construction & Destruction + //! \{ + + inline explicit BaseInst(uint32_t id = 0, uint32_t options = 0) noexcept + : _id(id), + _options(options), + _extraReg() {} + + inline BaseInst(uint32_t id, uint32_t options, const RegOnly& extraReg) noexcept + : _id(id), + _options(options), + _extraReg(extraReg) {} + + inline BaseInst(uint32_t id, uint32_t options, const BaseReg& extraReg) noexcept + : _id(id), + _options(options), + _extraReg { extraReg.signature(), extraReg.id() } {} + + //! \} + + //! \name Instruction ID + //! \{ + + inline uint32_t id() const noexcept { return _id; } + inline void setId(uint32_t id) noexcept { _id = id; } + inline void resetId() noexcept { _id = 0; } + + //! \} + + //! \name Instruction Options + //! \{ + + inline uint32_t options() const noexcept { return _options; } + inline void setOptions(uint32_t options) noexcept { _options = options; } + inline void addOptions(uint32_t options) noexcept { _options |= options; } + inline void clearOptions(uint32_t options) noexcept { _options &= ~options; } + inline void resetOptions() noexcept { _options = 0; } + + //! \} + + //! \name Extra Register + //! \{ + + inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); } + inline RegOnly& extraReg() noexcept { return _extraReg; } + inline const RegOnly& extraReg() const noexcept { return _extraReg; } + inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); } + inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); } + inline void resetExtraReg() noexcept { _extraReg.reset(); } + + //! \} +}; + +// ============================================================================ +// [asmjit::InstAPI] +// ============================================================================ + +//! Instruction API. +namespace InstAPI { + +#ifndef ASMJIT_NO_TEXT +//! Appends the name of the instruction specified by `instId` and `instOptions` +//! into the `output` string. +//! +//! \note Instruction options would only affect instruction prefix & suffix, +//! other options would be ignored. If `instOptions` is zero then only raw +//! instruction name (without any additional text) will be appended. +ASMJIT_API Error instIdToString(uint32_t archId, uint32_t instId, String& output) noexcept; + +//! Parses an instruction name in the given string `s`. Length is specified +//! by `len` argument, which can be `SIZE_MAX` if `s` is known to be null +//! terminated. +//! +//! The output is stored in `instId`. +ASMJIT_API uint32_t stringToInstId(uint32_t archId, const char* s, size_t len) noexcept; +#endif // !ASMJIT_NO_TEXT + +#ifndef ASMJIT_NO_VALIDATION +//! Validates the given instruction. +ASMJIT_API Error validate(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept; +#endif // !ASMJIT_NO_VALIDATION + +#ifndef ASMJIT_NO_INTROSPECTION +//! Gets Read/Write information of the given instruction. +ASMJIT_API Error queryRWInfo(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, InstRWInfo& out) noexcept; + +//! Gets CPU features required by the given instruction. +ASMJIT_API Error queryFeatures(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, BaseFeatures& out) noexcept; +#endif // !ASMJIT_NO_INTROSPECTION + +} // {InstAPI} + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_INST_H diff --git a/src/asmjit/core/jitallocator.cpp b/src/asmjit/core/jitallocator.cpp new file mode 100644 index 0000000..76166d2 --- /dev/null +++ b/src/asmjit/core/jitallocator.cpp @@ -0,0 +1,1137 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/build.h" +#ifndef ASMJIT_NO_JIT + +#include "../core/arch.h" +#include "../core/jitallocator.h" +#include "../core/osutils.h" +#include "../core/support.h" +#include "../core/virtmem.h" +#include "../core/zone.h" +#include "../core/zonelist.h" +#include "../core/zonetree.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::JitAllocator - Constants] +// ============================================================================ + +enum JitAllocatorConstants : uint32_t { + //! Number of pools to use when `JitAllocator::kOptionUseMultiplePools` is set. + //! + //! Each pool increases granularity twice to make memory management more + //! efficient. Ideal number of pools appears to be 3 to 4 as it distributes + //! small and large functions properly. + kJitAllocatorMultiPoolCount = 3, + + //! Minimum granularity (and the default granularity for pool #0). + kJitAllocatorBaseGranularity = 64, + + //! Maximum block size (16MB). + kJitAllocatorMaxBlockSize = 1024 * 1024 * 16 +}; + +static inline uint32_t JitAllocator_defaultFillPattern() noexcept { + // X86 and X86_64 - 4x 'int3' instruction. + if (ASMJIT_ARCH_X86) + return 0xCCCCCCCCu; + + // Unknown... + return 0u; +} + +// ============================================================================ +// [asmjit::JitAllocator - BitFlipIterator] +// ============================================================================ + +//! BitWord[] iterator used by `JitAllocator` that can flip the search pattern +//! during iteration. +template +class BitFlipIterator { +public: + ASMJIT_INLINE BitFlipIterator(const T* data, size_t numBitWords, size_t start = 0, T xorMask = 0) noexcept { + init(data, numBitWords, start, xorMask); + } + + ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start = 0, T xorMask = 0) noexcept { + const T* ptr = data + (start / Support::bitSizeOf()); + size_t idx = Support::alignDown(start, Support::bitSizeOf()); + size_t end = numBitWords * Support::bitSizeOf(); + + T bitWord = T(0); + if (idx < end) { + bitWord = (*ptr++ ^ xorMask) & (Support::allOnes() << (start % Support::bitSizeOf())); + while (!bitWord && (idx += Support::bitSizeOf()) < end) + bitWord = *ptr++ ^ xorMask; + } + + _ptr = ptr; + _idx = idx; + _end = end; + _current = bitWord; + _xorMask = xorMask; + } + + ASMJIT_INLINE bool hasNext() const noexcept { + return _current != T(0); + } + + ASMJIT_INLINE size_t next() noexcept { + T bitWord = _current; + ASMJIT_ASSERT(bitWord != T(0)); + + uint32_t bit = Support::ctz(bitWord); + bitWord ^= T(1u) << bit; + + size_t n = _idx + bit; + while (!bitWord && (_idx += Support::bitSizeOf()) < _end) + bitWord = *_ptr++ ^ _xorMask; + + _current = bitWord; + return n; + } + + ASMJIT_INLINE size_t nextAndFlip() noexcept { + T bitWord = _current; + ASMJIT_ASSERT(bitWord != T(0)); + + uint32_t bit = Support::ctz(bitWord); + bitWord ^= Support::allOnes() << bit; + _xorMask ^= Support::allOnes(); + + size_t n = _idx + bit; + while (!bitWord && (_idx += Support::bitSizeOf()) < _end) + bitWord = *_ptr++ ^ _xorMask; + + _current = bitWord; + return n; + } + + ASMJIT_INLINE size_t peekNext() const noexcept { + ASMJIT_ASSERT(_current != T(0)); + return _idx + Support::ctz(_current); + } + + const T* _ptr; + size_t _idx; + size_t _end; + T _current; + T _xorMask; +}; + +// ============================================================================ +// [asmjit::JitAllocator - Pool] +// ============================================================================ + +class JitAllocatorBlock; + +class JitAllocatorPool { +public: + ASMJIT_NONCOPYABLE(JitAllocatorPool) + + inline JitAllocatorPool(uint32_t granularity) noexcept + : blocks(), + cursor(nullptr), + blockCount(0), + granularity(uint16_t(granularity)), + granularityLog2(uint8_t(Support::ctz(granularity))), + emptyBlockCount(0), + totalAreaSize(0), + totalAreaUsed(0), + totalOverheadBytes(0) {} + + inline void reset() noexcept { + blocks.reset(); + cursor = nullptr; + blockCount = 0; + totalAreaSize = 0; + totalAreaUsed = 0; + totalOverheadBytes = 0; + } + + inline size_t byteSizeFromAreaSize(uint32_t areaSize) const noexcept { return size_t(areaSize) * granularity; } + inline uint32_t areaSizeFromByteSize(size_t size) const noexcept { return uint32_t((size + granularity - 1) >> granularityLog2); } + + inline size_t bitWordCountFromAreaSize(uint32_t areaSize) const noexcept { + using namespace Support; + return alignUp(areaSize, kBitWordSizeInBits) / kBitWordSizeInBits; + } + + //! Double linked list of blocks. + ZoneList blocks; + //! Where to start looking first. + JitAllocatorBlock* cursor; + + //! Count of blocks. + uint32_t blockCount; + //! Allocation granularity. + uint16_t granularity; + //! Log2(granularity). + uint8_t granularityLog2; + //! Count of empty blocks (either 0 or 1 as we won't keep more blocks empty). + uint8_t emptyBlockCount; + + //! Number of bits reserved across all blocks. + size_t totalAreaSize; + //! Number of bits used across all blocks. + size_t totalAreaUsed; + //! Overhead of all blocks (in bytes). + size_t totalOverheadBytes; +}; + +// ============================================================================ +// [asmjit::JitAllocator - Block] +// ============================================================================ + +class JitAllocatorBlock : public ZoneTreeNodeT, + public ZoneListNode { +public: + ASMJIT_NONCOPYABLE(JitAllocatorBlock) + + enum Flags : uint32_t { + //! Block is empty. + kFlagEmpty = 0x00000001u, + //! Block is dirty (largestUnusedArea, searchStart, searchEnd). + kFlagDirty = 0x00000002u, + //! Block is dual-mapped. + kFlagDualMapped = 0x00000004u + }; + + inline JitAllocatorBlock( + JitAllocatorPool* pool, + VirtMem::DualMapping mapping, + size_t blockSize, + uint32_t blockFlags, + Support::BitWord* usedBitVector, + Support::BitWord* stopBitVector, + uint32_t areaSize) noexcept + : ZoneTreeNodeT(), + pool(pool), + mapping(mapping), + blockSize(blockSize), + flags(blockFlags), + areaSize(areaSize), + areaUsed(0), + largestUnusedArea(areaSize), + searchStart(0), + searchEnd(areaSize), + usedBitVector(usedBitVector), + stopBitVector(stopBitVector) {} + + inline uint8_t* roPtr() const noexcept { return static_cast(mapping.ro); } + inline uint8_t* rwPtr() const noexcept { return static_cast(mapping.rw); } + + inline bool hasFlag(uint32_t f) const noexcept { return (flags & f) != 0; } + inline void addFlags(uint32_t f) noexcept { flags |= f; } + inline void clearFlags(uint32_t f) noexcept { flags &= ~f; } + + inline uint32_t areaAvailable() const noexcept { return areaSize - areaUsed; } + + inline void increaseUsedArea(uint32_t value) noexcept { + areaUsed += value; + pool->totalAreaUsed += value; + } + + inline void decreaseUsedArea(uint32_t value) noexcept { + areaUsed -= value; + pool->totalAreaUsed -= value; + } + + // RBTree default CMP uses '<' and '>' operators. + inline bool operator<(const JitAllocatorBlock& other) const noexcept { return roPtr() < other.roPtr(); } + inline bool operator>(const JitAllocatorBlock& other) const noexcept { return roPtr() > other.roPtr(); } + + // Special implementation for querying blocks by `key`, which must be in `[BlockPtr, BlockPtr + BlockSize)` range. + inline bool operator<(const uint8_t* key) const noexcept { return roPtr() + blockSize <= key; } + inline bool operator>(const uint8_t* key) const noexcept { return roPtr() > key; } + + //! Link to the pool that owns this block. + JitAllocatorPool* pool; + //! Virtual memory mapping - either single mapping (both pointers equal) or + //! dual mapping, where one pointer is Read+Execute and the second Read+Write. + VirtMem::DualMapping mapping; + //! Virtual memory size (block size) [bytes]. + size_t blockSize; + + //! Block flags. + uint32_t flags; + //! Size of the whole block area (bit-vector size). + uint32_t areaSize; + //! Used area (number of bits in bit-vector used). + uint32_t areaUsed; + //! The largest unused continuous area in the bit-vector (or `areaSize` to initiate rescan). + uint32_t largestUnusedArea; + //! Start of a search range (for unused bits). + uint32_t searchStart; + //! End of a search range (for unused bits). + uint32_t searchEnd; + + //! Used bit-vector (0 = unused, 1 = used). + Support::BitWord* usedBitVector; + //! Stop bit-vector (0 = don't care, 1 = stop). + Support::BitWord* stopBitVector; +}; + +// ============================================================================ +// [asmjit::JitAllocator - PrivateImpl] +// ============================================================================ + +class JitAllocatorPrivateImpl : public JitAllocator::Impl { +public: + inline JitAllocatorPrivateImpl(JitAllocatorPool* pools, size_t poolCount) noexcept + : JitAllocator::Impl {}, + pools(pools), + poolCount(poolCount) {} + inline ~JitAllocatorPrivateImpl() noexcept {} + + //! Lock for thread safety. + mutable Lock lock; + //! System page size (also a minimum block size). + uint32_t pageSize; + + //! Blocks from all pools in RBTree. + ZoneTree tree; + //! Allocator pools. + JitAllocatorPool* pools; + //! Number of allocator pools. + size_t poolCount; +}; + +static const JitAllocator::Impl JitAllocatorImpl_none {}; +static const JitAllocator::CreateParams JitAllocatorParams_none {}; + +// ============================================================================ +// [asmjit::JitAllocator - Utilities] +// ============================================================================ + +static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::CreateParams* params) noexcept { + VirtMem::Info vmInfo = VirtMem::info(); + + if (!params) + params = &JitAllocatorParams_none; + + uint32_t options = params->options; + uint32_t blockSize = params->blockSize; + uint32_t granularity = params->granularity; + uint32_t fillPattern = params->fillPattern; + + // Setup pool count to [1..3]. + size_t poolCount = 1; + if (options & JitAllocator::kOptionUseMultiplePools) + poolCount = kJitAllocatorMultiPoolCount;; + + // Setup block size [64kB..256MB]. + if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize)) + blockSize = vmInfo.pageGranularity; + + // Setup granularity [64..256]. + if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity)) + granularity = kJitAllocatorBaseGranularity; + + // Setup fill-pattern. + if (!(options & JitAllocator::kOptionCustomFillPattern)) + fillPattern = JitAllocator_defaultFillPattern(); + + size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount; + void* p = ::malloc(size); + if (ASMJIT_UNLIKELY(!p)) + return nullptr; + + JitAllocatorPool* pools = reinterpret_cast((uint8_t*)p + sizeof(JitAllocatorPrivateImpl)); + JitAllocatorPrivateImpl* impl = new(p) JitAllocatorPrivateImpl(pools, poolCount); + + impl->options = options; + impl->blockSize = blockSize; + impl->granularity = granularity; + impl->fillPattern = fillPattern; + impl->pageSize = vmInfo.pageSize; + + for (size_t poolId = 0; poolId < poolCount; poolId++) + new(&pools[poolId]) JitAllocatorPool(granularity << poolId); + + return impl; +} + +static inline void JitAllocatorImpl_destroy(JitAllocatorPrivateImpl* impl) noexcept { + impl->~JitAllocatorPrivateImpl(); + ::free(impl); +} + +static inline size_t JitAllocatorImpl_sizeToPoolId(const JitAllocatorPrivateImpl* impl, size_t size) noexcept { + size_t poolId = impl->poolCount - 1; + size_t granularity = size_t(impl->granularity) << poolId; + + while (poolId) { + if (Support::alignUp(size, granularity) == size) + break; + poolId--; + granularity >>= 1; + } + + return poolId; +} + +static inline size_t JitAllocatorImpl_bitVectorSizeToByteSize(uint32_t areaSize) noexcept { + using Support::kBitWordSizeInBits; + return ((areaSize + kBitWordSizeInBits - 1u) / kBitWordSizeInBits) * sizeof(Support::BitWord); +} + +static inline size_t JitAllocatorImpl_calculateIdealBlockSize(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t allocationSize) noexcept { + JitAllocatorBlock* last = pool->blocks.last(); + size_t blockSize = last ? last->blockSize : size_t(impl->blockSize); + + if (blockSize < kJitAllocatorMaxBlockSize) + blockSize *= 2u; + + if (allocationSize > blockSize) { + blockSize = Support::alignUp(allocationSize, impl->blockSize); + if (ASMJIT_UNLIKELY(blockSize < allocationSize)) + return 0; // Overflown. + } + + return blockSize; +} + +ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t pattern, size_t sizeInBytes) noexcept { + size_t n = sizeInBytes / 4u; + uint32_t* p = static_cast(mem); + + for (size_t i = 0; i < n; i++) + p[i] = pattern; +} + +// Allocate a new `JitAllocatorBlock` for the given `blockSize`. +// +// NOTE: The block doesn't have `kFlagEmpty` flag set, because the new block +// is only allocated when it's actually needed, so it would be cleared anyway. +static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t blockSize) noexcept { + using Support::BitWord; + using Support::kBitWordSizeInBits; + + uint32_t areaSize = uint32_t((blockSize + pool->granularity - 1) >> pool->granularityLog2); + uint32_t numBitWords = (areaSize + kBitWordSizeInBits - 1u) / kBitWordSizeInBits; + + JitAllocatorBlock* block = static_cast(::malloc(sizeof(JitAllocatorBlock))); + BitWord* bitWords = nullptr; + VirtMem::DualMapping virtMem {}; + Error err = kErrorOutOfMemory; + + if (block != nullptr) + bitWords = static_cast(::malloc(size_t(numBitWords) * 2 * sizeof(BitWord))); + + uint32_t blockFlags = 0; + if (bitWords != nullptr) { + if (impl->options & JitAllocator::kOptionUseDualMapping) { + err = VirtMem::allocDualMapping(&virtMem, blockSize, VirtMem::kAccessReadWrite | VirtMem::kAccessExecute); + blockFlags |= JitAllocatorBlock::kFlagDualMapped; + } + else { + err = VirtMem::alloc(&virtMem.ro, blockSize, VirtMem::kAccessReadWrite | VirtMem::kAccessExecute); + virtMem.rw = virtMem.ro; + } + } + + // Out of memory. + if (ASMJIT_UNLIKELY(!block || !bitWords || err != kErrorOk)) { + if (bitWords) ::free(bitWords); + if (block) ::free(block); + return nullptr; + } + + // Fill the memory if the secure mode is enabled. + if (impl->options & JitAllocator::kOptionFillUnusedMemory) + JitAllocatorImpl_fillPattern(virtMem.rw, impl->fillPattern, blockSize); + + memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord)); + return new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize); +} + +static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { + ASMJIT_UNUSED(impl); + + if (block->flags & JitAllocatorBlock::kFlagDualMapped) + VirtMem::releaseDualMapping(&block->mapping, block->blockSize); + else + VirtMem::release(block->mapping.ro, block->blockSize); + + ::free(block->usedBitVector); + ::free(block); +} + +static void JitAllocatorImpl_insertBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { + JitAllocatorPool* pool = block->pool; + + if (!pool->cursor) + pool->cursor = block; + + // Add to RBTree and List. + impl->tree.insert(block); + pool->blocks.append(block); + + // Update statistics. + pool->blockCount++; + pool->totalAreaSize += block->areaSize; + pool->totalOverheadBytes += sizeof(JitAllocatorBlock) + JitAllocatorImpl_bitVectorSizeToByteSize(block->areaSize) * 2u; +} + +static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { + JitAllocatorPool* pool = block->pool; + + // Remove from RBTree and List. + if (pool->cursor == block) + pool->cursor = block->hasPrev() ? block->prev() : block->next(); + + impl->tree.remove(block); + pool->blocks.unlink(block); + + // Update statistics. + pool->blockCount--; + pool->totalAreaSize -= block->areaSize; + pool->totalOverheadBytes -= sizeof(JitAllocatorBlock) + JitAllocatorImpl_bitVectorSizeToByteSize(block->areaSize) * 2u; +} + +static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { + JitAllocatorPool* pool = block->pool; + + if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) + return; + + uint32_t areaSize = block->areaSize; + uint32_t granularity = pool->granularity; + size_t numBitWords = pool->bitWordCountFromAreaSize(areaSize); + + if (impl->options & JitAllocator::kOptionFillUnusedMemory) { + BitFlipIterator it(block->usedBitVector, numBitWords); + + while (it.hasNext()) { + uint32_t start = uint32_t(it.nextAndFlip()); + uint32_t end = areaSize; + + if (it.hasNext()) + end = uint32_t(it.nextAndFlip()); + + JitAllocatorImpl_fillPattern(block->rwPtr() + start * granularity, impl->fillPattern, (end - start) * granularity); + } + } + + memset(block->usedBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord)); + memset(block->stopBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord)); + + block->areaUsed = 0; + block->largestUnusedArea = areaSize; + block->searchStart = 0; + block->searchEnd = areaSize; + block->addFlags(JitAllocatorBlock::kFlagEmpty); + block->clearFlags(JitAllocatorBlock::kFlagDirty); +} + +// ============================================================================ +// [asmjit::JitAllocator - Construction / Destruction] +// ============================================================================ + +JitAllocator::JitAllocator(const CreateParams* params) noexcept { + _impl = JitAllocatorImpl_new(params); + if (ASMJIT_UNLIKELY(!_impl)) + _impl = const_cast(&JitAllocatorImpl_none); +} + +JitAllocator::~JitAllocator() noexcept { + if (_impl == &JitAllocatorImpl_none) + return; + + reset(Globals::kResetHard); + JitAllocatorImpl_destroy(static_cast(_impl)); +} + +// ============================================================================ +// [asmjit::JitAllocator - Reset] +// ============================================================================ + +void JitAllocator::reset(uint32_t resetPolicy) noexcept { + if (_impl == &JitAllocatorImpl_none) + return; + + JitAllocatorPrivateImpl* impl = static_cast(_impl); + impl->tree.reset(); + size_t poolCount = impl->poolCount; + + for (size_t poolId = 0; poolId < poolCount; poolId++) { + JitAllocatorPool& pool = impl->pools[poolId]; + JitAllocatorBlock* block = pool.blocks.first(); + + JitAllocatorBlock* blockToKeep = nullptr; + if (resetPolicy != Globals::kResetHard && !(impl->options & kOptionImmediateRelease)) { + blockToKeep = block; + block = block->next(); + } + + while (block) { + JitAllocatorBlock* next = block->next(); + JitAllocatorImpl_deleteBlock(impl, block); + block = next; + } + + pool.reset(); + + if (blockToKeep) { + blockToKeep->_listNodes[0] = nullptr; + blockToKeep->_listNodes[1] = nullptr; + JitAllocatorImpl_wipeOutBlock(impl, blockToKeep); + JitAllocatorImpl_insertBlock(impl, blockToKeep); + pool.emptyBlockCount = 1; + } + } +} + +// ============================================================================ +// [asmjit::JitAllocator - Statistics] +// ============================================================================ + +JitAllocator::Statistics JitAllocator::statistics() const noexcept { + Statistics statistics; + statistics.reset(); + + if (ASMJIT_LIKELY(_impl != &JitAllocatorImpl_none)) { + JitAllocatorPrivateImpl* impl = static_cast(_impl); + ScopedLock locked(impl->lock); + + size_t poolCount = impl->poolCount; + for (size_t poolId = 0; poolId < poolCount; poolId++) { + const JitAllocatorPool& pool = impl->pools[poolId]; + statistics._blockCount += size_t(pool.blockCount); + statistics._reservedSize += size_t(pool.totalAreaSize) * pool.granularity; + statistics._usedSize += size_t(pool.totalAreaUsed) * pool.granularity; + statistics._overheadSize += size_t(pool.totalOverheadBytes); + } + } + + return statistics; +} + +// ============================================================================ +// [asmjit::JitAllocator - Alloc / Release] +// ============================================================================ + +Error JitAllocator::alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept { + if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) + return DebugUtils::errored(kErrorNotInitialized); + + JitAllocatorPrivateImpl* impl = static_cast(_impl); + constexpr uint32_t kNoIndex = std::numeric_limits::max(); + + *roPtrOut = nullptr; + *rwPtrOut = nullptr; + + // Align to the minimum granularity by default. + size = Support::alignUp(size, impl->granularity); + if (ASMJIT_UNLIKELY(size == 0)) + return DebugUtils::errored(kErrorInvalidArgument); + + if (ASMJIT_UNLIKELY(size > std::numeric_limits::max() / 2)) + return DebugUtils::errored(kErrorTooLarge); + + ScopedLock locked(impl->lock); + JitAllocatorPool* pool = &impl->pools[JitAllocatorImpl_sizeToPoolId(impl, size)]; + + uint32_t areaIndex = kNoIndex; + uint32_t areaSize = uint32_t(pool->areaSizeFromByteSize(size)); + + // Try to find the requested memory area in existing blocks. + JitAllocatorBlock* block = pool->blocks.first(); + if (block) { + JitAllocatorBlock* initial = block; + do { + JitAllocatorBlock* next = block->hasNext() ? block->next() : pool->blocks.first(); + if (block->areaAvailable() >= areaSize) { + if (block->hasFlag(JitAllocatorBlock::kFlagDirty) || block->largestUnusedArea >= areaSize) { + uint32_t blockAreaSize = block->areaSize; + uint32_t searchStart = block->searchStart; + uint32_t searchEnd = block->searchEnd; + + BitFlipIterator it( + block->usedBitVector, + pool->bitWordCountFromAreaSize(searchEnd), + searchStart, + Support::allOnes()); + + // If there is unused area available then there has to be at least one match. + ASMJIT_ASSERT(it.hasNext()); + + uint32_t bestArea = blockAreaSize; + uint32_t largestArea = 0; + uint32_t holeIndex = uint32_t(it.peekNext()); + uint32_t holeEnd = holeIndex; + + searchStart = holeIndex; + do { + holeIndex = uint32_t(it.nextAndFlip()); + if (holeIndex >= searchEnd) break; + + holeEnd = it.hasNext() ? Support::min(searchEnd, uint32_t(it.nextAndFlip())) : searchEnd; + uint32_t holeSize = holeEnd - holeIndex; + + if (holeSize >= areaSize && bestArea >= holeSize) { + largestArea = Support::max(largestArea, bestArea); + bestArea = holeSize; + areaIndex = holeIndex; + } + else { + largestArea = Support::max(largestArea, holeSize); + } + } while (it.hasNext()); + searchEnd = holeEnd; + + // Because we have traversed the entire block, we can now mark the + // largest unused area that can be used to cache the next traversal. + block->searchStart = searchStart; + block->searchEnd = searchEnd; + block->largestUnusedArea = largestArea; + block->clearFlags(JitAllocatorBlock::kFlagDirty); + + if (areaIndex != kNoIndex) { + if (searchStart == areaIndex) + block->searchStart += areaSize; + break; + } + } + } + + block = next; + } while (block != initial); + } + + // Allocate a new block if there is no region of a required width. + if (areaIndex == kNoIndex) { + size_t blockSize = JitAllocatorImpl_calculateIdealBlockSize(impl, pool, size); + if (ASMJIT_UNLIKELY(!blockSize)) + return DebugUtils::errored(kErrorOutOfMemory); + + block = JitAllocatorImpl_newBlock(impl, pool, blockSize); + + if (ASMJIT_UNLIKELY(!block)) + return DebugUtils::errored(kErrorOutOfMemory); + + JitAllocatorImpl_insertBlock(impl, block); + areaIndex = 0; + block->searchStart = areaSize; + block->largestUnusedArea = block->areaSize - areaSize; + } + + // Update statistics. + block->increaseUsedArea(areaSize); + + // Handle special cases. + if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) { + pool->emptyBlockCount--; + block->clearFlags(JitAllocatorBlock::kFlagEmpty); + } + + if (block->areaAvailable() == 0) { + // The whole block is filled. + block->searchStart = block->areaSize; + block->searchEnd = 0; + block->largestUnusedArea = 0; + block->clearFlags(JitAllocatorBlock::kFlagDirty); + } + + // Mark the newly allocated space as occupied and also the sentinel. + Support::bitVectorFill(block->usedBitVector, areaIndex, areaSize); + Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaSize - 1, true); + + // Return a pointer to the allocated memory. + size_t offset = pool->byteSizeFromAreaSize(areaIndex); + ASMJIT_ASSERT(offset <= block->blockSize - size); + + *roPtrOut = block->roPtr() + offset; + *rwPtrOut = block->rwPtr() + offset; + return kErrorOk; +} + +Error JitAllocator::release(void* ro) noexcept { + if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) + return DebugUtils::errored(kErrorNotInitialized); + + if (ASMJIT_UNLIKELY(!ro)) + return DebugUtils::errored(kErrorInvalidArgument); + + JitAllocatorPrivateImpl* impl = static_cast(_impl); + ScopedLock locked(impl->lock); + + JitAllocatorBlock* block = impl->tree.get(static_cast(ro)); + if (ASMJIT_UNLIKELY(!block)) + return DebugUtils::errored(kErrorInvalidState); + + // Offset relative to the start of the block. + JitAllocatorPool* pool = block->pool; + size_t offset = (size_t)((uint8_t*)ro - block->roPtr()); + + // The first bit representing the allocated area and its size. + uint32_t areaIndex = uint32_t(offset >> pool->granularityLog2); + uint32_t areaLast = uint32_t(Support::bitVectorIndexOf(block->stopBitVector, areaIndex, true)); + uint32_t areaSize = areaLast - areaIndex + 1; + + // Update the search region and statistics. + block->searchStart = Support::min(block->searchStart, areaIndex); + block->searchEnd = Support::max(block->searchEnd, areaLast + 1); + block->addFlags(JitAllocatorBlock::kFlagDirty); + block->decreaseUsedArea(areaSize); + + // Clear all occupied bits and also the sentinel. + Support::bitVectorClear(block->usedBitVector, areaIndex, areaSize); + Support::bitVectorSetBit(block->stopBitVector, areaLast, false); + + // Fill the released memory if the secure mode is enabled. + if (impl->options & kOptionFillUnusedMemory) + JitAllocatorImpl_fillPattern(block->rwPtr() + areaIndex * pool->granularity, impl->fillPattern, areaSize * pool->granularity); + + // Release the whole block if it became empty. + if (block->areaUsed == 0) { + if (pool->emptyBlockCount || (impl->options & kOptionImmediateRelease)) { + JitAllocatorImpl_removeBlock(impl, block); + JitAllocatorImpl_deleteBlock(impl, block); + } + else { + pool->emptyBlockCount++; + block->largestUnusedArea = areaSize; + block->searchStart = 0; + block->searchEnd = areaSize; + block->addFlags(JitAllocatorBlock::kFlagEmpty); + block->clearFlags(JitAllocatorBlock::kFlagDirty); + } + } + + return kErrorOk; +} + +Error JitAllocator::shrink(void* ro, size_t newSize) noexcept { + if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) + return DebugUtils::errored(kErrorNotInitialized); + + if (ASMJIT_UNLIKELY(!ro)) + return DebugUtils::errored(kErrorInvalidArgument); + + if (ASMJIT_UNLIKELY(newSize == 0)) + return release(ro); + + JitAllocatorPrivateImpl* impl = static_cast(_impl); + ScopedLock locked(impl->lock); + JitAllocatorBlock* block = impl->tree.get(static_cast(ro)); + + if (ASMJIT_UNLIKELY(!block)) + return DebugUtils::errored(kErrorInvalidArgument); + + // Offset relative to the start of the block. + JitAllocatorPool* pool = block->pool; + size_t offset = (size_t)((uint8_t*)ro - block->roPtr()); + + // The first bit representing the allocated area and its size. + uint32_t areaIndex = uint32_t(offset >> pool->granularityLog2); + uint32_t areaOldSize = uint32_t(Support::bitVectorIndexOf(block->stopBitVector, areaIndex, true)) + 1 - areaIndex; + uint32_t areaNewSize = pool->areaSizeFromByteSize(newSize); + + if (ASMJIT_UNLIKELY(areaNewSize > areaOldSize)) + return DebugUtils::errored(kErrorInvalidState); + + uint32_t areaDiff = areaOldSize - areaNewSize; + if (!areaDiff) + return kErrorOk; + + // Update the search region and statistics. + block->searchStart = Support::min(block->searchStart, areaIndex + areaNewSize); + block->searchEnd = Support::max(block->searchEnd, areaIndex + areaOldSize); + block->addFlags(JitAllocatorBlock::kFlagDirty); + block->decreaseUsedArea(areaDiff); + + // Unmark the released space and move the sentinel. + Support::bitVectorClear(block->usedBitVector, areaIndex + areaNewSize, areaDiff); + Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaOldSize - 1, false); + Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaNewSize - 1, true); + + // Fill released memory if the secure mode is enabled. + if (impl->options & kOptionFillUnusedMemory) + JitAllocatorImpl_fillPattern( + block->rwPtr() + (areaIndex + areaOldSize) * pool->granularity, + fillPattern(), + areaDiff * pool->granularity); + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::JitAllocator - Unit] +// ============================================================================ + +#if defined(ASMJIT_TEST) +// A pseudo random number generator based on a paper by Sebastiano Vigna: +// http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf +class Random { +public: + // Constants suggested as `23/18/5`. + enum Steps : uint32_t { + kStep1_SHL = 23, + kStep2_SHR = 18, + kStep3_SHR = 5 + }; + + inline explicit Random(uint64_t seed = 0) noexcept { reset(seed); } + inline Random(const Random& other) noexcept = default; + + inline void reset(uint64_t seed = 0) noexcept { + // The number is arbitrary, it means nothing. + constexpr uint64_t kZeroSeed = 0x1F0A2BE71D163FA0u; + + // Generate the state data by using splitmix64. + for (uint32_t i = 0; i < 2; i++) { + seed += 0x9E3779B97F4A7C15u; + uint64_t x = seed; + x = (x ^ (x >> 30)) * 0xBF58476D1CE4E5B9u; + x = (x ^ (x >> 27)) * 0x94D049BB133111EBu; + x = (x ^ (x >> 31)); + _state[i] = x != 0 ? x : kZeroSeed; + } + } + + inline uint32_t nextUInt32() noexcept { + return uint32_t(nextUInt64() >> 32); + } + + inline uint64_t nextUInt64() noexcept { + uint64_t x = _state[0]; + uint64_t y = _state[1]; + + x ^= x << kStep1_SHL; + y ^= y >> kStep3_SHR; + x ^= x >> kStep2_SHR; + x ^= y; + + _state[0] = y; + _state[1] = x; + return x + y; + } + + uint64_t _state[2]; +}; + +// Helper class to verify that JitAllocator doesn't return addresses that overlap. +class JitAllocatorWrapper { +public: + explicit inline JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept + : _zone(1024 * 1024), + _heap(&_zone), + _allocator(params) {} + + // Address to a memory region of a given size. + class Range { + public: + inline Range(uint8_t* addr, size_t size) noexcept + : addr(addr), + size(size) {} + uint8_t* addr; + size_t size; + }; + + // Based on JitAllocator::Block, serves our purpose well... + class Record : public ZoneTreeNodeT, + public Range { + public: + inline Record(uint8_t* addr, size_t size) + : ZoneTreeNodeT(), + Range(addr, size) {} + + inline bool operator<(const Record& other) const noexcept { return addr < other.addr; } + inline bool operator>(const Record& other) const noexcept { return addr > other.addr; } + + inline bool operator<(const uint8_t* key) const noexcept { return addr + size <= key; } + inline bool operator>(const uint8_t* key) const noexcept { return addr > key; } + }; + + void _insert(void* p_, size_t size) noexcept { + uint8_t* p = static_cast(p_); + uint8_t* pEnd = p + size - 1; + + Record* record; + + record = _records.get(p); + if (record) + EXPECT(record == nullptr, + "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size); + + record = _records.get(pEnd); + if (record) + EXPECT(record == nullptr, + "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size); + + record = _heap.newT(p, size); + EXPECT(record != nullptr, + "Out of memory, cannot allocate 'Record'"); + + _records.insert(record); + } + + void _remove(void* p) noexcept { + Record* record = _records.get(static_cast(p)); + EXPECT(record != nullptr, + "Address [%p] doesn't exist\n", p); + + _records.remove(record); + _heap.release(record, sizeof(Record)); + } + + void* alloc(size_t size) noexcept { + void* roPtr; + void* rwPtr; + + Error err = _allocator.alloc(&roPtr, &rwPtr, size); + EXPECT(err == kErrorOk, + "JitAllocator failed to allocate '%u' bytes\n", unsigned(size)); + + _insert(roPtr, size); + return roPtr; + } + + void release(void* p) noexcept { + _remove(p); + EXPECT(_allocator.release(p) == kErrorOk, + "JitAllocator failed to release '%p'\n", p); + } + + Zone _zone; + ZoneAllocator _heap; + ZoneTree _records; + JitAllocator _allocator; +}; + +static void JitAllocatorTest_shuffle(void** ptrArray, size_t count, Random& prng) noexcept { + for (size_t i = 0; i < count; ++i) + std::swap(ptrArray[i], ptrArray[size_t(prng.nextUInt32() % count)]); +} + +static void JitAllocatorTest_usage(JitAllocator& allocator) noexcept { + JitAllocator::Statistics stats = allocator.statistics(); + INFO(" Block Count : %9llu [Blocks]" , (unsigned long long)(stats.blockCount())); + INFO(" Reserved (VirtMem): %9llu [Bytes]" , (unsigned long long)(stats.reservedSize())); + INFO(" Used (VirtMem): %9llu [Bytes] (%.1f%%)", (unsigned long long)(stats.usedSize()), stats.usedSizeAsPercent()); + INFO(" Overhead (HeapMem): %9llu [Bytes] (%.1f%%)", (unsigned long long)(stats.overheadSize()), stats.overheadSizeAsPercent()); +} + +UNIT(asmjit_jit_allocator) { + size_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 100000; + + struct TestParams { + const char* name; + uint32_t options; + uint32_t blockSize; + uint32_t granularity; + }; + + #define OPT(OPTION) JitAllocator::OPTION + static TestParams testParams[] = { + { "Default", 0, 0, 0 }, + { "16MB blocks", 0, 16 * 1024 * 1024, 0 }, + { "256B granularity", 0, 0, 256 }, + { "kOptionUseDualMapping", OPT(kOptionUseDualMapping), 0, 0 }, + { "kOptionUseMultiplePools", OPT(kOptionUseMultiplePools), 0, 0 }, + { "kOptionFillUnusedMemory", OPT(kOptionFillUnusedMemory), 0, 0 }, + { "kOptionImmediateRelease", OPT(kOptionImmediateRelease), 0, 0 }, + { "kOptionUseDualMapping | kOptionFillUnusedMemory", OPT(kOptionUseDualMapping) | OPT(kOptionFillUnusedMemory), 0, 0 } + }; + #undef OPT + + INFO("BitFlipIterator"); + { + static const uint32_t bits[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u }; + BitFlipIterator it(bits, ASMJIT_ARRAY_SIZE(bits)); + + EXPECT(it.hasNext()); + EXPECT(it.nextAndFlip() == 31); + EXPECT(it.hasNext()); + EXPECT(it.nextAndFlip() == 32); + EXPECT(it.hasNext()); + EXPECT(it.nextAndFlip() == 63); + EXPECT(it.hasNext()); + EXPECT(it.nextAndFlip() == 64); + EXPECT(it.hasNext()); + EXPECT(it.nextAndFlip() == 127); + EXPECT(!it.hasNext()); + } + + INFO("BitFlipIterator"); + { + static const uint64_t bits[] = { 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFFFFFFFFF, 0, 0 }; + BitFlipIterator it(bits, ASMJIT_ARRAY_SIZE(bits)); + + EXPECT(it.hasNext()); + EXPECT(it.nextAndFlip() == 0); + EXPECT(it.hasNext()); + EXPECT(it.nextAndFlip() == 128); + EXPECT(!it.hasNext()); + } + + for (uint32_t testId = 0; testId < ASMJIT_ARRAY_SIZE(testParams); testId++) { + INFO("Testing JitAllocator: %s", testParams[testId].name); + + JitAllocator::CreateParams params {}; + params.options = testParams[testId].options; + params.blockSize = testParams[testId].blockSize; + params.granularity = testParams[testId].granularity; + + JitAllocatorWrapper wrapper(¶ms); + Random prng(100); + + size_t i; + + INFO(" Memory alloc/release test - %d allocations", kCount); + + void** ptrArray = (void**)::malloc(sizeof(void*) * size_t(kCount)); + EXPECT(ptrArray != nullptr, + "Couldn't allocate '%u' bytes for pointer-array", unsigned(sizeof(void*) * size_t(kCount))); + + INFO(" Allocating virtual memory..."); + for (i = 0; i < kCount; i++) + ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); + JitAllocatorTest_usage(wrapper._allocator); + + INFO(" Releasing virtual memory..."); + for (i = 0; i < kCount; i++) + wrapper.release(ptrArray[i]); + JitAllocatorTest_usage(wrapper._allocator); + + INFO(" Allocating virtual memory...", kCount); + for (i = 0; i < kCount; i++) + ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); + JitAllocatorTest_usage(wrapper._allocator); + + INFO(" Shuffling..."); + JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng); + + INFO(" Releasing 50%% blocks..."); + for (i = 0; i < kCount / 2; i++) + wrapper.release(ptrArray[i]); + JitAllocatorTest_usage(wrapper._allocator); + + INFO(" Allocating 50%% blocks again..."); + for (i = 0; i < kCount / 2; i++) + ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); + JitAllocatorTest_usage(wrapper._allocator); + + INFO(" Releasing virtual memory..."); + for (i = 0; i < kCount; i++) + wrapper.release(ptrArray[i]); + JitAllocatorTest_usage(wrapper._allocator); + + ::free(ptrArray); + } +} +#endif + +ASMJIT_END_NAMESPACE + +#endif diff --git a/src/asmjit/core/jitallocator.h b/src/asmjit/core/jitallocator.h new file mode 100644 index 0000000..e5531f8 --- /dev/null +++ b/src/asmjit/core/jitallocator.h @@ -0,0 +1,261 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_JITALLOCATOR_H +#define _ASMJIT_CORE_JITALLOCATOR_H + +#include "../core/build.h" +#ifndef ASMJIT_NO_JIT + +#include "../core/globals.h" +#include "../core/virtmem.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_jit +//! \{ + +// ============================================================================ +// [asmjit::JitAllocator] +// ============================================================================ + +//! A simple implementation of memory manager that uses `asmjit::VirtMem` +//! functions to manage virtual memory for JIT compiled code. +//! +//! Implementation notes: +//! +//! - Granularity of allocated blocks is different than granularity for a typical +//! C malloc. In addition, the allocator can use several memory pools having a +//! different granularity to minimize the maintenance overhead. Multiple pools +//! feature requires `kFlagUseMultiplePools` flag to be set. +//! +//! - The allocator doesn't store any information in executable memory, instead, +//! the implementation uses two bit-vectors to manage allocated memory of each +//! allocator-block. The first bit-vector called 'used' is used to track used +//! memory (where each bit represents memory size defined by granularity) and +//! the second bit vector called 'stop' is used as a sentinel to mark where +//! the allocated area ends. +//! +//! - Internally, the allocator also uses RB tree to keep track of all blocks +//! across all pools. Each inserted block is added to the tree so it can be +//! matched fast during `release()` and `shrink()`. +class JitAllocator { +public: + ASMJIT_NONCOPYABLE(JitAllocator) + + struct Impl { + //! Allocator options, see \ref JitAllocator::Options. + uint32_t options; + //! Base block size (0 if the allocator is not initialized). + uint32_t blockSize; + //! Base granularity (0 if the allocator is not initialized). + uint32_t granularity; + //! A pattern that is used to fill unused memory if secure mode is enabled. + uint32_t fillPattern; + }; + + //! Allocator implementation (private). + Impl* _impl; + + enum Options : uint32_t { + //! Enables the use of an anonymous memory-mapped memory that is mapped into + //! two buffers having a different pointer. The first buffer has read and + //! execute permissions and the second buffer has read+write permissions. + //! + //! See \ref VirtMem::allocDualMapping() for more details about this feature. + kOptionUseDualMapping = 0x00000001u, + + //! Enables the use of multiple pools with increasing granularity instead of + //! a single pool. This flag would enable 3 internal pools in total having + //! 64, 128, and 256 bytes granularity. + //! + //! This feature is only recommended for users that generate a lot of code + //! and would like to minimize the overhead of `JitAllocator` itself by + //! having blocks of different allocation granularities. Using this feature + //! only for few allocations won't pay off as the allocator may need to + //! create more blocks initially before it can take the advantage of + //! variable block granularity. + kOptionUseMultiplePools = 0x00000002u, + + //! Always fill reserved memory by a fill-pattern. + //! + //! Causes a new block to be cleared by the fill pattern and freshly + //! released memory to be cleared before making it ready for another use. + kOptionFillUnusedMemory = 0x00000004u, + + //! When this flag is set the allocator would immediately release unused + //! blocks during `release()` or `reset()`. When this flag is not set the + //! allocator would keep one empty block in each pool to prevent excessive + //! virtual memory allocations and deallocations in border cases, which + //! involve constantly allocating and deallocating a single block caused + //! by repetitive calling `alloc()` and `release()` when the allocator has + //! either no blocks or have all blocks fully occupied. + kOptionImmediateRelease = 0x00000008u, + + //! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`. + kOptionCustomFillPattern = 0x10000000u + }; + + //! \name Construction & Destruction + //! \{ + + //! Parameters that can be passed to `JitAllocator` constructor. + //! + //! Use it like this: + //! + //! ``` + //! // Zero initialize (zero means the default value) and change what you need. + //! JitAllocator::CreateParams params {}; + //! params.blockSize = 1024 * 1024; + //! + //! // Create the allocator. + //! JitAllocator allocator(¶ms); + //! ``` + struct CreateParams { + // Reset the content of `CreateParams`. + inline void reset() noexcept { memset(this, 0, sizeof(*this)); } + + //! Allocator options, see \ref JitAllocator::Options. + //! + //! No options are used by default. + uint32_t options; + + //! Base size of a single block in bytes (default 64kB). + //! + //! \remarks Block size must be equal or greater to page size and must be + //! power of 2. If the input is not valid then the default block size will + //! be used instead. + uint32_t blockSize; + + //! Base granularity (and also natural alignment) of allocations in bytes + //! (default 64). + //! + //! Since the `JitAllocator` uses bit-arrays to mark used memory the + //! granularity also specifies how many bytes correspond to a single bit in + //! such bit-array. Higher granularity means more waste of virtual memory + //! (as it increases the natural alignment), but smaller bit-arrays as less + //! bits would be required per a single block. + uint32_t granularity; + + //! Patter to use to fill unused memory. + //! + //! Only used if \ref kOptionCustomFillPattern is set. + uint32_t fillPattern; + }; + + //! Creates a `JitAllocator` instance. + explicit ASMJIT_API JitAllocator(const CreateParams* params = nullptr) noexcept; + //! Destroys the `JitAllocator` instance and release all blocks held. + ASMJIT_API ~JitAllocator() noexcept; + + inline bool isInitialized() const noexcept { return _impl->blockSize == 0; } + + //! Free all allocated memory - makes all pointers returned by `alloc()` invalid. + //! + //! \remarks This function is not thread-safe as it's designed to be used when + //! nobody else is using allocator. The reason is that there is no point of + //1 calling `reset()` when the allocator is still in use. + ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept; + + //! \} + + //! \name Accessors + //! \{ + + //! Returns allocator options, see `Flags`. + inline uint32_t options() const noexcept { return _impl->options; } + //! Tests whether the allocator has the given `option` set. + inline bool hasOption(uint32_t option) const noexcept { return (_impl->options & option) != 0; } + + //! Returns a base block size (a minimum size of block that the allocator would allocate). + inline uint32_t blockSize() const noexcept { return _impl->blockSize; } + //! Returns granularity of the allocator. + inline uint32_t granularity() const noexcept { return _impl->granularity; } + //! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set. + inline uint32_t fillPattern() const noexcept { return _impl->fillPattern; } + + //! \} + + //! \name Alloc & Release + //! \{ + + //! Allocate `size` bytes of virtual memory. + //! + //! \remarks This function is thread-safe. + ASMJIT_API Error alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept; + + //! Release a memory returned by `alloc()`. + //! + //! \remarks This function is thread-safe. + ASMJIT_API Error release(void* ro) noexcept; + + //! Free extra memory allocated with `p` by restricting it to `newSize` size. + //! + //! \remarks This function is thread-safe. + ASMJIT_API Error shrink(void* ro, size_t newSize) noexcept; + + //! \} + + //! \name Statistics + //! \{ + + //! Statistics about `JitAllocator`. + struct Statistics { + inline void reset() noexcept { + _blockCount = 0; + _usedSize = 0; + _reservedSize = 0; + _overheadSize = 0; + } + + //! Returns count of blocks managed by `JitAllocator` at the moment. + inline size_t blockCount() const noexcept { return _blockCount; } + + //! Returns how many bytes are currently used. + inline size_t usedSize() const noexcept { return _usedSize; } + //! Returns the number of bytes unused by the allocator at the moment. + inline size_t unusedSize() const noexcept { return _reservedSize - _usedSize; } + //! Returns the total number of bytes bytes reserved by the allocator (sum of sizes of all blocks). + inline size_t reservedSize() const noexcept { return _reservedSize; } + //! Returns the number of bytes the allocator needs to manage the allocated memory. + inline size_t overheadSize() const noexcept { return _overheadSize; } + + inline double usedSizeAsPercent() const noexcept { + return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0; + } + + inline double unusedSizeAsPercent() const noexcept { + return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0; + } + + inline double overheadSizeAsPercent() const noexcept { + return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0; + } + + //! Number of blocks `JitAllocator` maintains. + size_t _blockCount; + //! How many bytes are currently used / allocated. + size_t _usedSize; + //! How many bytes are currently reserved by the allocator. + size_t _reservedSize; + //! Allocation overhead (in bytes) required to maintain all blocks. + size_t _overheadSize; + }; + + //! Returns JIT allocator statistics. + //! + //! \remarks This function is thread-safe. + ASMJIT_API Statistics statistics() const noexcept; + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif +#endif diff --git a/src/asmjit/core/jitruntime.cpp b/src/asmjit/core/jitruntime.cpp new file mode 100644 index 0000000..f8d23c9 --- /dev/null +++ b/src/asmjit/core/jitruntime.cpp @@ -0,0 +1,142 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/build.h" +#ifndef ASMJIT_NO_JIT + +#include "../core/cpuinfo.h" +#include "../core/jitruntime.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::JitRuntime - Utilities] +// ============================================================================ + +// Only useful on non-x86 architectures. +static inline void JitRuntime_flushInstructionCache(const void* p, size_t size) noexcept { +#if defined(_WIN32) && !ASMJIT_ARCH_X86 + // Windows has a built-in support in `kernel32.dll`. + ::FlushInstructionCache(::GetCurrentProcess(), p, size); +#else + ASMJIT_UNUSED(p); + ASMJIT_UNUSED(size); +#endif +} + +// X86 Target +// ---------- +// +// - 32-bit - Linux, OSX, BSD, and apparently also Haiku guarantee 16-byte +// stack alignment. Other operating systems are assumed to have +// 4-byte alignment by default for safety reasons. +// - 64-bit - stack must be aligned to 16 bytes. +// +// ARM Target +// ---------- +// +// - 32-bit - Stack must be aligned to 8 bytes. +// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement). +static inline uint32_t JitRuntime_detectNaturalStackAlignment() noexcept { +#if ASMJIT_ARCH_BITS == 64 || \ + defined(__APPLE__ ) || \ + defined(__DragonFly__) || \ + defined(__HAIKU__ ) || \ + defined(__FreeBSD__ ) || \ + defined(__NetBSD__ ) || \ + defined(__OpenBSD__ ) || \ + defined(__bsdi__ ) || \ + defined(__linux__ ) + return 16; +#elif ASMJIT_ARCH_ARM + return 8; +#else + return uint32_t(sizeof(uintptr_t)); +#endif +} + +// ============================================================================ +// [asmjit::JitRuntime - Construction / Destruction] +// ============================================================================ + +JitRuntime::JitRuntime(const JitAllocator::CreateParams* params) noexcept + : _allocator(params) { + + // Setup target properties. + _targetType = kTargetJit; + _codeInfo._archInfo = CpuInfo::host().archInfo(); + _codeInfo._stackAlignment = uint8_t(JitRuntime_detectNaturalStackAlignment()); + _codeInfo._cdeclCallConv = CallConv::kIdHostCDecl; + _codeInfo._stdCallConv = CallConv::kIdHostStdCall; + _codeInfo._fastCallConv = CallConv::kIdHostFastCall; +} +JitRuntime::~JitRuntime() noexcept {} + +// ============================================================================ +// [asmjit::JitRuntime - Interface] +// ============================================================================ + +Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept { + *dst = nullptr; + + ASMJIT_PROPAGATE(code->flatten()); + ASMJIT_PROPAGATE(code->resolveUnresolvedLinks()); + + size_t estimatedCodeSize = code->codeSize(); + if (ASMJIT_UNLIKELY(estimatedCodeSize == 0)) + return DebugUtils::errored(kErrorNoCodeGenerated); + + uint8_t* ro; + uint8_t* rw; + ASMJIT_PROPAGATE(_allocator.alloc((void**)&ro, (void**)&rw, estimatedCodeSize)); + + // Relocate the code. + Error err = code->relocateToBase(uintptr_t((void*)ro)); + if (ASMJIT_UNLIKELY(err)) { + _allocator.release(ro); + return err; + } + + // Recalculate the final code size and shrink the memory we allocated for it + // in case that some relocations didn't require records in an address table. + size_t codeSize = code->codeSize(); + + for (Section* section : code->_sections) { + size_t offset = size_t(section->offset()); + size_t bufferSize = size_t(section->bufferSize()); + size_t virtualSize = size_t(section->virtualSize()); + + ASMJIT_ASSERT(offset + bufferSize <= codeSize); + memcpy(rw + offset, section->data(), bufferSize); + + if (virtualSize > bufferSize) { + ASMJIT_ASSERT(offset + virtualSize <= codeSize); + memset(rw + offset + bufferSize, 0, virtualSize - bufferSize); + } + } + + if (codeSize < estimatedCodeSize) + _allocator.shrink(ro, codeSize); + + flush(ro, codeSize); + *dst = ro; + + return kErrorOk; +} + +Error JitRuntime::_release(void* p) noexcept { + return _allocator.release(p); +} + +void JitRuntime::flush(const void* p, size_t size) noexcept { + JitRuntime_flushInstructionCache(p, size); +} + +ASMJIT_END_NAMESPACE + +#endif diff --git a/src/asmjit/core/jitruntime.h b/src/asmjit/core/jitruntime.h new file mode 100644 index 0000000..242d1bb --- /dev/null +++ b/src/asmjit/core/jitruntime.h @@ -0,0 +1,109 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_JITRUNTIME_H +#define _ASMJIT_CORE_JITRUNTIME_H + +#include "../core/build.h" +#ifndef ASMJIT_NO_JIT + +#include "../core/codeholder.h" +#include "../core/jitallocator.h" +#include "../core/target.h" + +ASMJIT_BEGIN_NAMESPACE + +class CodeHolder; + +//! \addtogroup asmjit_jit +//! \{ + +// ============================================================================ +// [asmjit::JitRuntime] +// ============================================================================ + +//! JIT execution runtime is a special `Target` that is designed to store and +//! execute the generated code. +class ASMJIT_VIRTAPI JitRuntime : public Target { +public: + ASMJIT_NONCOPYABLE(JitRuntime) + + //! Virtual memory allocator. + JitAllocator _allocator; + + //! \name Construction & Destruction + //! \{ + + //! Creates a `JitRuntime` instance. + explicit ASMJIT_API JitRuntime(const JitAllocator::CreateParams* params = nullptr) noexcept; + //! Destroys the `JitRuntime` instance. + ASMJIT_API virtual ~JitRuntime() noexcept; + + inline void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept { + _allocator.reset(resetPolicy); + } + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the associated `JitAllocator`. + inline JitAllocator* allocator() const noexcept { return const_cast(&_allocator); } + + //! \} + + //! \name Utilities + //! \{ + + // NOTE: To allow passing function pointers to `add()` and `release()` the + // virtual methods are prefixed with `_` and called from templates instead. + + //! Allocates memory needed for a code stored in the `CodeHolder` and relocates + //! the code to the pointer allocated. + //! + //! The beginning of the memory allocated for the function is returned in `dst`. + //! If failed `Error` code is returned and `dst` is explicitly set to `nullptr` + //! (this means that you don't have to set it to null before calling `add()`). + template + inline Error add(Func* dst, CodeHolder* code) noexcept { + return _add(Support::ptr_cast_impl(dst), code); + } + + //! Releases `p` which was obtained by calling `add()`. + template + inline Error release(Func p) noexcept { + return _release(Support::ptr_cast_impl(p)); + } + + //! Type-unsafe version of `add()`. + ASMJIT_API virtual Error _add(void** dst, CodeHolder* code) noexcept; + + //! Type-unsafe version of `release()`. + ASMJIT_API virtual Error _release(void* p) noexcept; + + //! Flushes an instruction cache. + //! + //! This member function is called after the code has been copied to the + //! destination buffer. It is only useful for JIT code generation as it + //! causes a flush of the processor's cache. + //! + //! Flushing is basically a NOP under X86, but is needed by architectures + //! that do not have a transparent instruction cache like ARM. + //! + //! This function can also be overridden to improve compatibility with tools + //! such as Valgrind, however, it's not an official part of AsmJit. + ASMJIT_API virtual void flush(const void* p, size_t size) noexcept; + + //! \} +}; + +//! \} + +ASMJIT_END_NAMESPACE + +#endif +#endif diff --git a/src/asmjit/core/logging.cpp b/src/asmjit/core/logging.cpp new file mode 100644 index 0000000..6039a47 --- /dev/null +++ b/src/asmjit/core/logging.cpp @@ -0,0 +1,515 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/build.h" +#ifndef ASMJIT_NO_LOGGING + +#include "../core/builder.h" +#include "../core/codeholder.h" +#include "../core/compiler.h" +#include "../core/emitter.h" +#include "../core/logging.h" +#include "../core/string.h" +#include "../core/support.h" +#include "../core/type.h" + +#ifdef ASMJIT_BUILD_X86 + #include "../x86/x86logging_p.h" +#endif + +#ifdef ASMJIT_BUILD_ARM + #include "../arm/armlogging_p.h" +#endif + +ASMJIT_BEGIN_NAMESPACE + +#if defined(ASMJIT_NO_COMPILER) +class VirtReg; +#endif + +// ============================================================================ +// [asmjit::Logger - Construction / Destruction] +// ============================================================================ + +Logger::Logger() noexcept + : _options() {} +Logger::~Logger() noexcept {} + +// ============================================================================ +// [asmjit::Logger - Logging] +// ============================================================================ + +Error Logger::logf(const char* fmt, ...) noexcept { + Error err; + va_list ap; + + va_start(ap, fmt); + err = logv(fmt, ap); + va_end(ap); + + return err; +} + +Error Logger::logv(const char* fmt, va_list ap) noexcept { + StringTmp<2048> sb; + ASMJIT_PROPAGATE(sb.appendVFormat(fmt, ap)); + return log(sb); +} + +Error Logger::logBinary(const void* data, size_t size) noexcept { + static const char prefix[] = "db "; + + StringTmp<256> sb; + sb.appendString(prefix, ASMJIT_ARRAY_SIZE(prefix) - 1); + + size_t i = size; + const uint8_t* s = static_cast(data); + + while (i) { + uint32_t n = uint32_t(Support::min(i, 16)); + sb.truncate(ASMJIT_ARRAY_SIZE(prefix) - 1); + sb.appendHex(s, n); + sb.appendChar('\n'); + ASMJIT_PROPAGATE(log(sb)); + s += n; + i -= n; + } + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::FileLogger - Construction / Destruction] +// ============================================================================ + +FileLogger::FileLogger(FILE* file) noexcept + : _file(nullptr) { setFile(file); } +FileLogger::~FileLogger() noexcept {} + +// ============================================================================ +// [asmjit::FileLogger - Logging] +// ============================================================================ + +Error FileLogger::_log(const char* data, size_t size) noexcept { + if (!_file) + return kErrorOk; + + if (size == SIZE_MAX) + size = strlen(data); + + fwrite(data, 1, size, _file); + return kErrorOk; +} + +// ============================================================================ +// [asmjit::StringLogger - Construction / Destruction] +// ============================================================================ + +StringLogger::StringLogger() noexcept {} +StringLogger::~StringLogger() noexcept {} + +// ============================================================================ +// [asmjit::StringLogger - Logging] +// ============================================================================ + +Error StringLogger::_log(const char* data, size_t size) noexcept { + return _content.appendString(data, size); +} + +// ============================================================================ +// [asmjit::Logging] +// ============================================================================ + +Error Logging::formatLabel( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + uint32_t labelId) noexcept { + + ASMJIT_UNUSED(flags); + + const LabelEntry* le = emitter->code()->labelEntry(labelId); + if (ASMJIT_UNLIKELY(!le)) + return sb.appendFormat("InvalidLabel[Id=%u]", labelId); + + if (le->hasName()) { + if (le->hasParent()) { + uint32_t parentId = le->parentId(); + const LabelEntry* pe = emitter->code()->labelEntry(parentId); + + if (ASMJIT_UNLIKELY(!pe)) + ASMJIT_PROPAGATE(sb.appendFormat("InvalidLabel[Id=%u]", labelId)); + else if (ASMJIT_UNLIKELY(!pe->hasName())) + ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId)); + else + ASMJIT_PROPAGATE(sb.appendString(pe->name())); + + ASMJIT_PROPAGATE(sb.appendChar('.')); + } + return sb.appendString(le->name()); + } + else { + return sb.appendFormat("L%u", labelId); + } +} + +Error Logging::formatRegister( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + uint32_t archId, + uint32_t regType, + uint32_t regId) noexcept { + + #ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) + return x86::LoggingInternal::formatRegister(sb, flags, emitter, archId, regType, regId); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId)) + return arm::LoggingInternal::formatRegister(sb, flags, emitter, archId, regType, regId); + #endif + + return kErrorInvalidArch; +} + +Error Logging::formatOperand( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + uint32_t archId, + const Operand_& op) noexcept { + + #ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) + return x86::LoggingInternal::formatOperand(sb, flags, emitter, archId, op); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId)) + return arm::LoggingInternal::formatOperand(sb, flags, emitter, archId, op); + #endif + + return kErrorInvalidArch; +} + +Error Logging::formatInstruction( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + uint32_t archId, + const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept { + + #ifdef ASMJIT_BUILD_X86 + if (ArchInfo::isX86Family(archId)) + return x86::LoggingInternal::formatInstruction(sb, flags, emitter, archId, inst, operands, opCount); + #endif + + #ifdef ASMJIT_BUILD_ARM + if (ArchInfo::isArmFamily(archId)) + return arm::LoggingInternal::formatInstruction(sb, flags, emitter, archId, inst, operands, opCount); + #endif + + return kErrorInvalidArch; +} + +Error Logging::formatTypeId(String& sb, uint32_t typeId) noexcept { + if (typeId == Type::kIdVoid) + return sb.appendString("void"); + + if (!Type::isValid(typeId)) + return sb.appendString("unknown"); + + const char* typeName = "unknown"; + uint32_t typeSize = Type::sizeOf(typeId); + + uint32_t baseId = Type::baseOf(typeId); + switch (baseId) { + case Type::kIdIntPtr : typeName = "iptr" ; break; + case Type::kIdUIntPtr: typeName = "uptr" ; break; + case Type::kIdI8 : typeName = "i8" ; break; + case Type::kIdU8 : typeName = "u8" ; break; + case Type::kIdI16 : typeName = "i16" ; break; + case Type::kIdU16 : typeName = "u16" ; break; + case Type::kIdI32 : typeName = "i32" ; break; + case Type::kIdU32 : typeName = "u32" ; break; + case Type::kIdI64 : typeName = "i64" ; break; + case Type::kIdU64 : typeName = "u64" ; break; + case Type::kIdF32 : typeName = "f32" ; break; + case Type::kIdF64 : typeName = "f64" ; break; + case Type::kIdF80 : typeName = "f80" ; break; + case Type::kIdMask8 : typeName = "mask8" ; break; + case Type::kIdMask16 : typeName = "mask16"; break; + case Type::kIdMask32 : typeName = "mask32"; break; + case Type::kIdMask64 : typeName = "mask64"; break; + case Type::kIdMmx32 : typeName = "mmx32" ; break; + case Type::kIdMmx64 : typeName = "mmx64" ; break; + } + + uint32_t baseSize = Type::sizeOf(baseId); + if (typeSize > baseSize) { + uint32_t count = typeSize / baseSize; + return sb.appendFormat("%sx%u", typeName, unsigned(count)); + } + else { + return sb.appendString(typeName); + } + +} + +#ifndef ASMJIT_NO_BUILDER +static Error formatFuncValue(String& sb, uint32_t flags, const BaseEmitter* emitter, FuncValue value) noexcept { + uint32_t typeId = value.typeId(); + ASMJIT_PROPAGATE(Logging::formatTypeId(sb, typeId)); + + if (value.isReg()) { + ASMJIT_PROPAGATE(sb.appendChar('@')); + ASMJIT_PROPAGATE(Logging::formatRegister(sb, flags, emitter, emitter->archId(), value.regType(), value.regId())); + } + + if (value.isStack()) { + ASMJIT_PROPAGATE(sb.appendFormat("@[%d]", int(value.stackOffset()))); + } + + return kErrorOk; +} + +static Error formatFuncRets( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + const FuncDetail& fd, + VirtReg* const* vRegs) noexcept { + + if (!fd.hasRet()) + return sb.appendString("void"); + + for (uint32_t i = 0; i < fd.retCount(); i++) { + if (i) ASMJIT_PROPAGATE(sb.appendString(", ")); + ASMJIT_PROPAGATE(formatFuncValue(sb, flags, emitter, fd.ret(i))); + + #ifndef ASMJIT_NO_COMPILER + if (vRegs) { + static const char nullRet[] = ""; + ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[i] ? vRegs[i]->name() : nullRet)); + } + #endif + } + + return kErrorOk; +} + +static Error formatFuncArgs( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + const FuncDetail& fd, + VirtReg* const* vRegs) noexcept { + + uint32_t count = fd.argCount(); + if (!count) + return sb.appendString("void"); + + for (uint32_t i = 0; i < count; i++) { + if (i) ASMJIT_PROPAGATE(sb.appendString(", ")); + ASMJIT_PROPAGATE(formatFuncValue(sb, flags, emitter, fd.arg(i))); + + #ifndef ASMJIT_NO_COMPILER + if (vRegs) { + static const char nullArg[] = ""; + ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[i] ? vRegs[i]->name() : nullArg)); + } + #endif + } + + return kErrorOk; +} + +Error Logging::formatNode( + String& sb, + uint32_t flags, + const BaseBuilder* cb, + const BaseNode* node_) noexcept { + + if (node_->hasPosition() && (flags & FormatOptions::kFlagPositions) != 0) + ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node_->position())); + + switch (node_->type()) { + case BaseNode::kNodeInst: { + const InstNode* node = node_->as(); + ASMJIT_PROPAGATE( + Logging::formatInstruction(sb, flags, cb, + cb->archId(), + node->baseInst(), node->operands(), node->opCount())); + break; + } + + case BaseNode::kNodeSection: { + const SectionNode* node = node_->as(); + if (cb->_code->isSectionValid(node->id())) { + const Section* section = cb->_code->sectionById(node->id()); + ASMJIT_PROPAGATE(sb.appendFormat(".section %s", section->name())); + } + break; + } + + case BaseNode::kNodeLabel: { + const LabelNode* node = node_->as(); + ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->id())); + ASMJIT_PROPAGATE(sb.appendString(":")); + break; + } + + case BaseNode::kNodeAlign: { + const AlignNode* node = node_->as(); + ASMJIT_PROPAGATE( + sb.appendFormat(".align %u (%s)", + node->alignment(), + node->alignMode() == kAlignCode ? "code" : "data")); + break; + } + + case BaseNode::kNodeEmbedData: { + const EmbedDataNode* node = node_->as(); + ASMJIT_PROPAGATE(sb.appendFormat(".embed (%u bytes)", node->size())); + break; + } + + case BaseNode::kNodeEmbedLabel: { + const EmbedLabelNode* node = node_->as(); + ASMJIT_PROPAGATE(sb.appendString(".label ")); + ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->id())); + break; + } + + case BaseNode::kNodeEmbedLabelDelta: { + const EmbedLabelDeltaNode* node = node_->as(); + ASMJIT_PROPAGATE(sb.appendString(".label (")); + ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->id())); + ASMJIT_PROPAGATE(sb.appendString(" - ")); + ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->baseId())); + ASMJIT_PROPAGATE(sb.appendString(")")); + break; + } + + case BaseNode::kNodeComment: { + const CommentNode* node = node_->as(); + ASMJIT_PROPAGATE(sb.appendFormat("; %s", node->inlineComment())); + break; + } + + case BaseNode::kNodeSentinel: { + const SentinelNode* node = node_->as(); + const char* sentinelName = nullptr; + + switch (node->sentinelType()) { + case SentinelNode::kSentinelFuncEnd: + sentinelName = "[FuncEnd]"; + break; + + default: + sentinelName = "[Sentinel]"; + break; + } + + ASMJIT_PROPAGATE(sb.appendString(sentinelName)); + break; + } + + #ifndef ASMJIT_NO_COMPILER + case BaseNode::kNodeFunc: { + const FuncNode* node = node_->as(); + + ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->id())); + ASMJIT_PROPAGATE(sb.appendString(": ")); + + ASMJIT_PROPAGATE(formatFuncRets(sb, flags, cb, node->detail(), nullptr)); + ASMJIT_PROPAGATE(sb.appendString(" Func(")); + ASMJIT_PROPAGATE(formatFuncArgs(sb, flags, cb, node->detail(), node->args())); + ASMJIT_PROPAGATE(sb.appendString(")")); + break; + } + + case BaseNode::kNodeFuncRet: { + const FuncRetNode* node = node_->as(); + ASMJIT_PROPAGATE(sb.appendString("[FuncRet]")); + + for (uint32_t i = 0; i < 2; i++) { + const Operand_& op = node->_opArray[i]; + if (!op.isNone()) { + ASMJIT_PROPAGATE(sb.appendString(i == 0 ? " " : ", ")); + ASMJIT_PROPAGATE(formatOperand(sb, flags, cb, cb->archId(), op)); + } + } + break; + } + + case BaseNode::kNodeFuncCall: { + const FuncCallNode* node = node_->as(); + ASMJIT_PROPAGATE( + Logging::formatInstruction(sb, flags, cb, + cb->archId(), + node->baseInst(), node->operands(), node->opCount())); + break; + } + #endif + + default: { + ASMJIT_PROPAGATE(sb.appendFormat("[User:%u]", node_->type())); + break; + } + } + + return kErrorOk; +} +#endif + +Error Logging::formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept { + size_t currentSize = sb.size(); + size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0; + + ASMJIT_ASSERT(binSize >= dispSize); + const size_t kNoBinSize = std::numeric_limits::max(); + + if ((binSize != 0 && binSize != kNoBinSize) || commentSize) { + size_t align = kMaxInstLineSize; + char sep = ';'; + + for (size_t i = (binSize == kNoBinSize); i < 2; i++) { + size_t begin = sb.size(); + ASMJIT_PROPAGATE(sb.padEnd(align)); + + if (sep) { + ASMJIT_PROPAGATE(sb.appendChar(sep)); + ASMJIT_PROPAGATE(sb.appendChar(' ')); + } + + // Append binary data or comment. + if (i == 0) { + ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - dispSize - immSize)); + ASMJIT_PROPAGATE(sb.appendChars('.', dispSize * 2)); + ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize)); + if (commentSize == 0) break; + } + else { + ASMJIT_PROPAGATE(sb.appendString(comment, commentSize)); + } + + currentSize += sb.size() - begin; + align += kMaxBinarySize; + sep = '|'; + } + } + + return sb.appendChar('\n'); +} + +ASMJIT_END_NAMESPACE + +#endif diff --git a/src/asmjit/core/logging.h b/src/asmjit/core/logging.h new file mode 100644 index 0000000..16e8b97 --- /dev/null +++ b/src/asmjit/core/logging.h @@ -0,0 +1,338 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_LOGGING_H +#define _ASMJIT_CORE_LOGGING_H + +#include "../core/inst.h" +#include "../core/string.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_core +//! \{ + +#ifndef ASMJIT_NO_LOGGING + +// ============================================================================ +// [Forward Declarations] +// ============================================================================ + +class BaseEmitter; +class BaseReg; +class Logger; +struct Operand_; + +#ifndef ASMJIT_NO_BUILDER +class BaseBuilder; +class BaseNode; +#endif + +// ============================================================================ +// [asmjit::FormatOptions] +// ============================================================================ + +class FormatOptions { +public: + uint32_t _flags; + uint8_t _indentation[4]; + + enum Flags : uint32_t { + //!< Show also binary form of each logged instruction (assembler). + kFlagMachineCode = 0x00000001u, + //!< Show a text explanation of some immediate values. + kFlagExplainImms = 0x00000002u, + //!< Use hexadecimal notation of immediate values. + kFlagHexImms = 0x00000004u, + //!< Use hexadecimal notation of address offsets. + kFlagHexOffsets = 0x00000008u, + //!< Show casts between virtual register types (compiler). + kFlagRegCasts = 0x00000010u, + //!< Show positions associated with nodes (compiler). + kFlagPositions = 0x00000020u, + //!< Annotate nodes that are lowered by passes. + kFlagAnnotations = 0x00000040u, + + // TODO: These must go, keep this only for formatting. + //!< Show an additional output from passes. + kFlagDebugPasses = 0x00000080u, + //!< Show an additional output from RA. + kFlagDebugRA = 0x00000100u + }; + + enum IndentationType : uint32_t { + //! Indentation used for instructions and directives. + kIndentationCode = 0u, + //! Indentation used for labels and function nodes. + kIndentationLabel = 1u, + //! Indentation used for comments (not inline comments). + kIndentationComment = 2u, + kIndentationReserved = 3u + }; + + //! \name Construction & Destruction + //! \{ + + constexpr FormatOptions() noexcept + : _flags(0), + _indentation { 0, 0, 0, 0 } {} + + constexpr FormatOptions(const FormatOptions& other) noexcept = default; + inline FormatOptions& operator=(const FormatOptions& other) noexcept = default; + + inline void reset() noexcept { + _flags = 0; + _indentation[0] = 0; + _indentation[1] = 0; + _indentation[2] = 0; + _indentation[3] = 0; + } + + //! \} + + //! \name Accessors + //! \{ + + constexpr uint32_t flags() const noexcept { return _flags; } + constexpr bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; } + inline void setFlags(uint32_t flags) noexcept { _flags = flags; } + inline void addFlags(uint32_t flags) noexcept { _flags |= flags; } + inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; } + + constexpr uint8_t indentation(uint32_t type) const noexcept { return _indentation[type]; } + inline void setIndentation(uint32_t type, uint32_t n) noexcept { _indentation[type] = uint8_t(n); } + inline void resetIndentation(uint32_t type) noexcept { _indentation[type] = uint8_t(0); } + + //! \} +}; + +// ============================================================================ +// [asmjit::Logger] +// ============================================================================ + +//! Abstract logging interface and helpers. +//! +//! This class can be inherited and reimplemented to fit into your logging +//! subsystem. When reimplementing use `Logger::_log()` method to log into +//! a custom stream. +//! +//! There are two `Logger` implementations offered by AsmJit: +//! - `FileLogger` - allows to log into `FILE*`. +//! - `StringLogger` - logs into a `String`. +class ASMJIT_VIRTAPI Logger { +public: + ASMJIT_BASE_CLASS(Logger) + ASMJIT_NONCOPYABLE(Logger) + + //! Format options. + FormatOptions _options; + + //! \name Construction & Destruction + //! \{ + + //! Creates a `Logger` instance. + ASMJIT_API Logger() noexcept; + //! Destroys the `Logger` instance. + ASMJIT_API virtual ~Logger() noexcept; + + //! \} + + //! \name Format Options + //! \{ + + inline FormatOptions& options() noexcept { return _options; } + inline const FormatOptions& options() const noexcept { return _options; } + + inline uint32_t flags() const noexcept { return _options.flags(); } + inline bool hasFlag(uint32_t flag) const noexcept { return _options.hasFlag(flag); } + inline void setFlags(uint32_t flags) noexcept { _options.setFlags(flags); } + inline void addFlags(uint32_t flags) noexcept { _options.addFlags(flags); } + inline void clearFlags(uint32_t flags) noexcept { _options.clearFlags(flags); } + + inline uint32_t indentation(uint32_t type) const noexcept { return _options.indentation(type); } + inline void setIndentation(uint32_t type, uint32_t n) noexcept { _options.setIndentation(type, n); } + inline void resetIndentation(uint32_t type) noexcept { _options.resetIndentation(type); } + + //! \} + + //! \name Logging Interface + //! \{ + + //! Logs `str` - must be reimplemented. + virtual Error _log(const char* data, size_t size) noexcept = 0; + + //! Logs string `str`, which is either null terminated or having size `size`. + inline Error log(const char* data, size_t size = SIZE_MAX) noexcept { return _log(data, size); } + //! Logs content of a string `str`. + inline Error log(const String& str) noexcept { return _log(str.data(), str.size()); } + + //! Formats the message by using `snprintf()` and then sends the result + //! to `log()`. + ASMJIT_API Error logf(const char* fmt, ...) noexcept; + + //! Formats the message by using `vsnprintf()` and then sends the result + //! to `log()`. + ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept; + + //! Logs binary data. + ASMJIT_API Error logBinary(const void* data, size_t size) noexcept; + + //! \} +}; + +// ============================================================================ +// [asmjit::FileLogger] +// ============================================================================ + +//! Logger that can log to a `FILE*`. +class ASMJIT_VIRTAPI FileLogger : public Logger { +public: + ASMJIT_NONCOPYABLE(FileLogger) + + FILE* _file; + + //! \name Construction & Destruction + //! \{ + + //! Creates a new `FileLogger` that logs to `FILE*`. + ASMJIT_API FileLogger(FILE* file = nullptr) noexcept; + //! Destroys the `FileLogger`. + ASMJIT_API virtual ~FileLogger() noexcept; + + //! \} + + //! \name Accessors + //! \{ + + //! Returns the logging output stream or null if the logger has no output + //! stream. + inline FILE* file() const noexcept { return _file; } + + //! Sets the logging output stream to `stream` or null. + //! + //! \note If the `file` is null the logging will be disabled. When a logger + //! is attached to `CodeHolder` or any emitter the logging API will always + //! be called regardless of the output file. This means that if you really + //! want to disable logging at emitter level you must not attach a logger + //! to it. + inline void setFile(FILE* file) noexcept { _file = file; } + + //! \} + + ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override; +}; + +// ============================================================================ +// [asmjit::StringLogger] +// ============================================================================ + +//! Logger that stores everything in an internal string buffer. +class ASMJIT_VIRTAPI StringLogger : public Logger { +public: + ASMJIT_NONCOPYABLE(StringLogger) + + //! Logger data as string. + String _content; + + //! \name Construction & Destruction + //! \{ + + //! Create new `StringLogger`. + ASMJIT_API StringLogger() noexcept; + //! Destroys the `StringLogger`. + ASMJIT_API virtual ~StringLogger() noexcept; + + //! \} + + //! \name Logger Data Accessors + //! \{ + + //! Returns aggregated logger data as `char*` pointer. + //! + //! The pointer is owned by `StringLogger`, it can't be modified or freed. + inline const char* data() const noexcept { return _content.data(); } + //! Returns size of the data returned by `data()`. + inline size_t dataSize() const noexcept { return _content.size(); } + + //! \} + + //! \name Logger Data Manipulation + //! \{ + + //! Clears the accumulated logger data. + inline void clear() noexcept { _content.clear(); } + + //! \} + + ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override; +}; + +// ============================================================================ +// [asmjit::Logging] +// ============================================================================ + +struct Logging { + ASMJIT_API static Error formatRegister( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + uint32_t archId, + uint32_t regType, + uint32_t regId) noexcept; + + ASMJIT_API static Error formatLabel( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + uint32_t labelId) noexcept; + + ASMJIT_API static Error formatOperand( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + uint32_t archId, + const Operand_& op) noexcept; + + ASMJIT_API static Error formatInstruction( + String& sb, + uint32_t flags, + const BaseEmitter* emitter, + uint32_t archId, + const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept; + + ASMJIT_API static Error formatTypeId( + String& sb, + uint32_t typeId) noexcept; + + #ifndef ASMJIT_NO_BUILDER + ASMJIT_API static Error formatNode( + String& sb, + uint32_t flags, + const BaseBuilder* cb, + const BaseNode* node_) noexcept; + #endif + + // Only used by AsmJit internals, not available to users. + #if defined(ASMJIT_EXPORTS) + enum { + // Has to be big to be able to hold all metadata compiler can assign to a + // single instruction. + kMaxInstLineSize = 44, + kMaxBinarySize = 26 + }; + + static Error formatLine( + String& sb, + const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept; + #endif +}; +#endif + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_LOGGER_H diff --git a/src/asmjit/core/misc_p.h b/src/asmjit/core/misc_p.h new file mode 100644 index 0000000..f8548f1 --- /dev/null +++ b/src/asmjit/core/misc_p.h @@ -0,0 +1,32 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_MISC_P_H +#define _ASMJIT_CORE_MISC_P_H + +#include "../core/build.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \cond INTERNAL +//! \addtogroup asmjit_support +//! \{ + +#define ASMJIT_LOOKUP_TABLE_8(T, I) T((I)), T((I+1)), T((I+2)), T((I+3)), T((I+4)), T((I+5)), T((I+6)), T((I+7)) +#define ASMJIT_LOOKUP_TABLE_16(T, I) ASMJIT_LOOKUP_TABLE_8(T, I), ASMJIT_LOOKUP_TABLE_8(T, I + 8) +#define ASMJIT_LOOKUP_TABLE_32(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16) +#define ASMJIT_LOOKUP_TABLE_64(T, I) ASMJIT_LOOKUP_TABLE_32(T, I), ASMJIT_LOOKUP_TABLE_32(T, I + 32) +#define ASMJIT_LOOKUP_TABLE_128(T, I) ASMJIT_LOOKUP_TABLE_64(T, I), ASMJIT_LOOKUP_TABLE_64(T, I + 64) +#define ASMJIT_LOOKUP_TABLE_256(T, I) ASMJIT_LOOKUP_TABLE_128(T, I), ASMJIT_LOOKUP_TABLE_128(T, I + 128) +#define ASMJIT_LOOKUP_TABLE_512(T, I) ASMJIT_LOOKUP_TABLE_256(T, I), ASMJIT_LOOKUP_TABLE_256(T, I + 256) +#define ASMJIT_LOOKUP_TABLE_1024(T, I) ASMJIT_LOOKUP_TABLE_512(T, I), ASMJIT_LOOKUP_TABLE_512(T, I + 512) + +//! \} +//! \endcond + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_MISC_P_H diff --git a/src/asmjit/core/operand.cpp b/src/asmjit/core/operand.cpp new file mode 100644 index 0000000..6156d6f --- /dev/null +++ b/src/asmjit/core/operand.cpp @@ -0,0 +1,113 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/operand.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::Operand - Unit] +// ============================================================================ + +#if defined(ASMJIT_TEST) +UNIT(asmjit_core_operand) { + INFO("Checking operand sizes"); + EXPECT(sizeof(Operand) == 16); + EXPECT(sizeof(BaseReg) == 16); + EXPECT(sizeof(BaseMem) == 16); + EXPECT(sizeof(Imm) == 16); + EXPECT(sizeof(Label) == 16); + + INFO("Checking basic functionality of Operand"); + Operand a, b; + Operand dummy; + + EXPECT(a.isNone() == true); + EXPECT(a.isReg() == false); + EXPECT(a.isMem() == false); + EXPECT(a.isImm() == false); + EXPECT(a.isLabel() == false); + EXPECT(a == b); + EXPECT(a._data64 == 0); + + INFO("Checking basic functionality of Label"); + Label label; + EXPECT(label.isValid() == false); + EXPECT(label.id() == Globals::kInvalidId); + + INFO("Checking basic functionality of BaseReg"); + EXPECT(BaseReg().isReg() == true); + EXPECT(BaseReg().isValid() == false); + EXPECT(BaseReg()._data64 == 0); + EXPECT(dummy.as().isValid() == false); + + // Create some register (not specific to any architecture). + uint32_t rSig = Operand::kOpReg | (1 << Operand::kSignatureRegTypeShift ) | + (2 << Operand::kSignatureRegGroupShift) | + (8 << Operand::kSignatureSizeShift ) ; + BaseReg r1(rSig, 5); + + EXPECT(r1.isValid() == true); + EXPECT(r1.isReg() == true); + EXPECT(r1.isReg(1) == true); + EXPECT(r1.isPhysReg() == true); + EXPECT(r1.isVirtReg() == false); + EXPECT(r1.signature() == rSig); + EXPECT(r1.type() == 1); + EXPECT(r1.group() == 2); + EXPECT(r1.size() == 8); + EXPECT(r1.id() == 5); + EXPECT(r1.isReg(1, 5) == true); // RegType and Id. + EXPECT(r1._data64 == 0); + + // The same type of register having different id. + BaseReg r2(r1, 6); + EXPECT(r2.isValid() == true); + EXPECT(r2.isReg() == true); + EXPECT(r2.isReg(1) == true); + EXPECT(r2.isPhysReg() == true); + EXPECT(r2.isVirtReg() == false); + EXPECT(r2.signature() == rSig); + EXPECT(r2.type() == r1.type()); + EXPECT(r2.group() == r1.group()); + EXPECT(r2.size() == r1.size()); + EXPECT(r2.id() == 6); + EXPECT(r2.isReg(1, 6) == true); + + r1.reset(); + EXPECT(!r1.isReg()); + EXPECT(!r1.isValid()); + + INFO("Checking basic functionality of BaseMem"); + BaseMem m; + EXPECT(m.isMem()); + EXPECT(m == BaseMem()); + EXPECT(m.hasBase() == false); + EXPECT(m.hasIndex() == false); + EXPECT(m.hasOffset() == false); + EXPECT(m.isOffset64Bit() == true); + EXPECT(m.offset() == 0); + + m.setOffset(-1); + EXPECT(m.offsetLo32() == -1); + EXPECT(m.offset() == -1); + + int64_t x = int64_t(0xFF00FF0000000001u); + int32_t xHi = int32_t(0xFF00FF00u); + + m.setOffset(x); + EXPECT(m.offset() == x); + EXPECT(m.offsetLo32() == 1); + EXPECT(m.offsetHi32() == xHi); + + INFO("Checking basic functionality of Imm"); + EXPECT(Imm(-1).i64() == int64_t(-1)); +} +#endif + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/operand.h b/src/asmjit/core/operand.h new file mode 100644 index 0000000..27610dc --- /dev/null +++ b/src/asmjit/core/operand.h @@ -0,0 +1,1316 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_OPERAND_H +#define _ASMJIT_CORE_OPERAND_H + +#include "../core/support.h" + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [Macros] +// ============================================================================ + +//! Adds a template specialization for `REG_TYPE` into the local `RegTraits`. +#define ASMJIT_DEFINE_REG_TRAITS(REG, REG_TYPE, GROUP, SIZE, COUNT, TYPE_ID) \ +template<> \ +struct RegTraits { \ + typedef REG RegT; \ + \ + static constexpr uint32_t kValid = 1; \ + static constexpr uint32_t kCount = COUNT; \ + static constexpr uint32_t kTypeId = TYPE_ID; \ + \ + static constexpr uint32_t kType = REG_TYPE; \ + static constexpr uint32_t kGroup = GROUP; \ + static constexpr uint32_t kSize = SIZE; \ + \ + static constexpr uint32_t kSignature = \ + (Operand::kOpReg << Operand::kSignatureOpShift ) | \ + (kType << Operand::kSignatureRegTypeShift ) | \ + (kGroup << Operand::kSignatureRegGroupShift) | \ + (kSize << Operand::kSignatureSizeShift ) ; \ +} + +//! Adds constructors and member functions to a class that implements abstract +//! register. Abstract register is register that doesn't have type or signature +//! yet, it's a base class like `x86::Reg` or `arm::Reg`. +#define ASMJIT_DEFINE_ABSTRACT_REG(REG, BASE) \ +public: \ + /*! Default constructor that only setups basics. */ \ + constexpr REG() noexcept \ + : BASE(kSignature, kIdBad) {} \ + \ + /*! Makes a copy of the `other` register operand. */ \ + constexpr REG(const REG& other) noexcept \ + : BASE(other) {} \ + \ + /*! Makes a copy of the `other` register having id set to `rId` */ \ + constexpr REG(const BaseReg& other, uint32_t rId) noexcept \ + : BASE(other, rId) {} \ + \ + /*! Creates a register based on `signature` and `rId`. */ \ + constexpr REG(uint32_t signature, uint32_t rId) noexcept \ + : BASE(signature, rId) {} \ + \ + /*! Creates a completely uninitialized REG register operand (garbage). */ \ + inline explicit REG(Globals::NoInit_) noexcept \ + : BASE(Globals::NoInit) {} \ + \ + /*! Creates a new register from register type and id. */ \ + static inline REG fromTypeAndId(uint32_t rType, uint32_t rId) noexcept { \ + return REG(signatureOf(rType), rId); \ + } \ + \ + /*! Clones the register operand. */ \ + constexpr REG clone() const noexcept { return REG(*this); } \ + \ + inline REG& operator=(const REG& other) noexcept = default; + +//! Adds constructors and member functions to a class that implements final +//! register. Final registers MUST HAVE a valid signature. +#define ASMJIT_DEFINE_FINAL_REG(REG, BASE, TRAITS) \ +public: \ + static constexpr uint32_t kThisType = TRAITS::kType; \ + static constexpr uint32_t kThisGroup = TRAITS::kGroup; \ + static constexpr uint32_t kThisSize = TRAITS::kSize; \ + static constexpr uint32_t kSignature = TRAITS::kSignature; \ + \ + ASMJIT_DEFINE_ABSTRACT_REG(REG, BASE) \ + \ + /*! Creates a register operand having its id set to `rId`. */ \ + constexpr explicit REG(uint32_t rId) noexcept \ + : BASE(kSignature, rId) {} + +//! \addtogroup asmjit_core +//! \{ + +// ============================================================================ +// [asmjit::Operand_] +// ============================================================================ + +//! Constructor-less `Operand`. +//! +//! Contains no initialization code and can be used safely to define an array +//! of operands that won't be initialized. This is an `Operand` compatible +//! data structure designed to be statically initialized, static const, or to +//! be used by the user to define an array of operands without having them +//! default initialized. +//! +//! The key difference between `Operand` and `Operand_`: +//! +//! ``` +//! Operand_ xArray[10]; // Not initialized, contains garbage. +//! Operand yArray[10]; // All operands initialized to none. +//! ``` +struct Operand_ { + //! Operand's signature that provides operand type and additional information. + uint32_t _signature; + //! Either base id as used by memory operand or any id as used by others. + uint32_t _baseId; + + //! Memory operand data. + struct MemData { + //! Index register id. + uint32_t indexId; + //! Low part of 64-bit offset (or 32-bit offset). + uint32_t offsetLo32; + }; + + //! Additional data used by some operands. + union { + //! 32-bit data (used either by immediate or as a 32-bit view). + uint32_t _data32[2]; + //! 64-bit data (used either by immediate or as a 64-bit view). + uint64_t _data64; + + //! Memory address data. + MemData _mem; + }; + + //! Operand types that can be encoded in `Operand`. + enum OpType : uint32_t { + //! Not an operand or not initialized. + kOpNone = 0, + //! Operand is a register. + kOpReg = 1, + //! Operand is a memory. + kOpMem = 2, + //! Operand is an immediate value. + kOpImm = 3, + //! Operand is a label. + kOpLabel = 4 + }; + static_assert(kOpMem == kOpReg + 1, "asmjit::Operand requires `kOpMem` to be `kOpReg+1`."); + + // \cond INTERNAL + enum SignatureBits : uint32_t { + // Operand type (3 least significant bits). + // |........|........|........|.....XXX| + kSignatureOpShift = 0, + kSignatureOpMask = 0x07u << kSignatureOpShift, + + // Register type (5 bits). + // |........|........|........|XXXXX...| + kSignatureRegTypeShift = 3, + kSignatureRegTypeMask = 0x1Fu << kSignatureRegTypeShift, + + // Register group (4 bits). + // |........|........|....XXXX|........| + kSignatureRegGroupShift = 8, + kSignatureRegGroupMask = 0x0Fu << kSignatureRegGroupShift, + + // Memory base type (5 bits). + // |........|........|........|XXXXX...| + kSignatureMemBaseTypeShift = 3, + kSignatureMemBaseTypeMask = 0x1Fu << kSignatureMemBaseTypeShift, + + // Memory index type (5 bits). + // |........|........|...XXXXX|........| + kSignatureMemIndexTypeShift = 8, + kSignatureMemIndexTypeMask = 0x1Fu << kSignatureMemIndexTypeShift, + + // Memory base+index combined (10 bits). + // |........|........|...XXXXX|XXXXX...| + kSignatureMemBaseIndexShift = 3, + kSignatureMemBaseIndexMask = 0x3FFu << kSignatureMemBaseIndexShift, + + // Memory address type (2 bits). + // |........|........|.XX.....|........| + kSignatureMemAddrTypeShift = 13, + kSignatureMemAddrTypeMask = 0x03u << kSignatureMemAddrTypeShift, + + // This memory operand represents a home-slot or stack (BaseCompiler). + // |........|........|X.......|........| + kSignatureMemRegHomeShift = 15, + kSignatureMemRegHomeFlag = 0x01u << kSignatureMemRegHomeShift, + + // Operand size (8 most significant bits). + // |XXXXXXXX|........|........|........| + kSignatureSizeShift = 24, + kSignatureSizeMask = 0xFFu << kSignatureSizeShift + }; + //! \endcond + + //! \cond INTERNAL + //! Constants useful for VirtId <-> Index translation. + enum VirtIdConstants : uint32_t { + //! Minimum valid packed-id. + kVirtIdMin = 256, + //! Maximum valid packed-id, excludes Globals::kInvalidId. + kVirtIdMax = Globals::kInvalidId - 1, + //! Count of valid packed-ids. + kVirtIdCount = uint32_t(kVirtIdMax - kVirtIdMin + 1) + }; + + //! Tests whether the given `id` is a valid virtual register id. Since AsmJit + //! supports both physical and virtual registers it must be able to distinguish + //! between these two. The idea is that physical registers are always limited + //! in size, so virtual identifiers start from `kVirtIdMin` and end at + //! `kVirtIdMax`. + static ASMJIT_INLINE bool isVirtId(uint32_t id) noexcept { return id - kVirtIdMin < uint32_t(kVirtIdCount); } + //! Converts a real-id into a packed-id that can be stored in Operand. + static ASMJIT_INLINE uint32_t indexToVirtId(uint32_t id) noexcept { return id + kVirtIdMin; } + //! Converts a packed-id back to real-id. + static ASMJIT_INLINE uint32_t virtIdToIndex(uint32_t id) noexcept { return id - kVirtIdMin; } + //! \endcond + + //! \name Construction & Destruction + //! \{ + + //! \cond INTERNAL + //! Initializes a `BaseReg` operand from `signature` and register `id`. + inline void _initReg(uint32_t signature, uint32_t id) noexcept { + _signature = signature; + _baseId = id; + _data64 = 0; + } + + //! Initializes the operand from `other` (used by operator overloads). + inline void copyFrom(const Operand_& other) noexcept { memcpy(this, &other, sizeof(Operand_)); } + //! \endcond + + //! Resets the `Operand` to none. + //! + //! None operand is defined the following way: + //! - Its signature is zero (kOpNone, and the rest zero as well). + //! - Its id is `0`. + //! - The reserved8_4 field is set to `0`. + //! - The reserved12_4 field is set to zero. + //! + //! In other words, reset operands have all members set to zero. Reset operand + //! must match the Operand state right after its construction. Alternatively, + //! if you have an array of operands, you can simply use `memset()`. + //! + //! ``` + //! using namespace asmjit; + //! + //! Operand a; + //! Operand b; + //! assert(a == b); + //! + //! b = x86::eax; + //! assert(a != b); + //! + //! b.reset(); + //! assert(a == b); + //! + //! memset(&b, 0, sizeof(Operand)); + //! assert(a == b); + //! ``` + inline void reset() noexcept { + _signature = 0; + _baseId = 0; + _data64 = 0; + } + + //! \} + + //! \name Operator Overloads + //! \{ + + constexpr bool operator==(const Operand_& other) const noexcept { return isEqual(other); } + constexpr bool operator!=(const Operand_& other) const noexcept { return !isEqual(other); } + + //! \} + + //! \name Cast + //! \{ + + //! Casts this operand to `T` type. + template + inline T& as() noexcept { return static_cast(*this); } + + //! Casts this operand to `T` type (const). + template + inline const T& as() const noexcept { return static_cast(*this); } + + //! \} + + //! \name Accessors + //! \{ + + //! Tests whether the operand matches the given signature `sign`. + constexpr bool hasSignature(uint32_t signature) const noexcept { return _signature == signature; } + //! Tests whether the operand matches the signature of the `other` operand. + constexpr bool hasSignature(const Operand_& other) const noexcept { return _signature == other.signature(); } + + //! Returns operand signature as unsigned 32-bit integer. + //! + //! Signature is first 4 bytes of the operand data. It's used mostly for + //! operand checking as it's much faster to check 4 bytes at once than having + //! to check these bytes individually. + constexpr uint32_t signature() const noexcept { return _signature; } + + //! Sets the operand signature, see `signature()`. + //! + //! \note Improper use of `setSignature()` can lead to hard-to-debug errors. + inline void setSignature(uint32_t signature) noexcept { _signature = signature; } + + //! \cond INTERNAL + template + constexpr bool _hasSignaturePart() const noexcept { + return (_signature & mask) != 0; + } + + template + constexpr uint32_t _getSignaturePart() const noexcept { + return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask)); + } + + template + inline void _setSignaturePart(uint32_t value) noexcept { + ASMJIT_ASSERT((value & ~(mask >> Support::constCtz(mask))) == 0); + _signature = (_signature & ~mask) | (value << Support::constCtz(mask)); + } + //! \endcond + + //! Returns the type of the operand, see `OpType`. + constexpr uint32_t opType() const noexcept { return _getSignaturePart(); } + //! Tests whether the operand is none (`kOpNone`). + constexpr bool isNone() const noexcept { return _signature == 0; } + //! Tests whether the operand is a register (`kOpReg`). + constexpr bool isReg() const noexcept { return opType() == kOpReg; } + //! Tests whether the operand is a memory location (`kOpMem`). + constexpr bool isMem() const noexcept { return opType() == kOpMem; } + //! Tests whether the operand is an immediate (`kOpImm`). + constexpr bool isImm() const noexcept { return opType() == kOpImm; } + //! Tests whether the operand is a label (`kOpLabel`). + constexpr bool isLabel() const noexcept { return opType() == kOpLabel; } + + //! Tests whether the operand is a physical register. + constexpr bool isPhysReg() const noexcept { return isReg() && _baseId < 0xFFu; } + //! Tests whether the operand is a virtual register. + constexpr bool isVirtReg() const noexcept { return isReg() && _baseId > 0xFFu; } + + //! Tests whether the operand specifies a size (i.e. the size is not zero). + constexpr bool hasSize() const noexcept { return _hasSignaturePart(); } + //! Tests whether the size of the operand matches `size`. + constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; } + + //! Returns the size of the operand in bytes. + //! + //! The value returned depends on the operand type: + //! * None - Should always return zero size. + //! * Reg - Should always return the size of the register. If the register + //! size depends on architecture (like `x86::CReg` and `x86::DReg`) + //! the size returned should be the greatest possible (so it should + //! return 64-bit size in such case). + //! * Mem - Size is optional and will be in most cases zero. + //! * Imm - Should always return zero size. + //! * Label - Should always return zero size. + constexpr uint32_t size() const noexcept { return _getSignaturePart(); } + + //! Returns the operand id. + //! + //! The value returned should be interpreted accordingly to the operand type: + //! * None - Should be `0`. + //! * Reg - Physical or virtual register id. + //! * Mem - Multiple meanings - BASE address (register or label id), or + //! high value of a 64-bit absolute address. + //! * Imm - Should be `0`. + //! * Label - Label id if it was created by using `newLabel()` or + //! `Globals::kInvalidId` if the label is invalid or not + //! initialized. + constexpr uint32_t id() const noexcept { return _baseId; } + + //! Tests whether the operand is 100% equal to `other`. + constexpr bool isEqual(const Operand_& other) const noexcept { + return (_signature == other._signature) & + (_baseId == other._baseId ) & + (_data64 == other._data64 ) ; + } + + //! Tests whether the operand is a register matching `rType`. + constexpr bool isReg(uint32_t rType) const noexcept { + return (_signature & (kSignatureOpMask | kSignatureRegTypeMask)) == + ((kOpReg << kSignatureOpShift) | (rType << kSignatureRegTypeShift)); + } + + //! Tests whether the operand is register and of `rType` and `rId`. + constexpr bool isReg(uint32_t rType, uint32_t rId) const noexcept { + return isReg(rType) && id() == rId; + } + + //! Tests whether the operand is a register or memory. + constexpr bool isRegOrMem() const noexcept { + return Support::isBetween(opType(), kOpReg, kOpMem); + } + + //! \} +}; + +// ============================================================================ +// [asmjit::Operand] +// ============================================================================ + +//! Operand can contain register, memory location, immediate, or label. +class Operand : public Operand_ { +public: + //! \name Construction & Destruction + //! \{ + + //! Creates `kOpNone` operand having all members initialized to zero. + constexpr Operand() noexcept + : Operand_{ kOpNone, 0u, {{ 0u, 0u }}} {} + + //! Creates a cloned `other` operand. + constexpr Operand(const Operand& other) noexcept = default; + + //! Creates a cloned `other` operand. + constexpr explicit Operand(const Operand_& other) + : Operand_(other) {} + + //! Creates an operand initialized to raw `[u0, u1, u2, u3]` values. + constexpr Operand(Globals::Init_, uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept + : Operand_{ u0, u1, {{ u2, u3 }}} {} + + //! Creates an uninitialized operand (dangerous). + inline explicit Operand(Globals::NoInit_) noexcept {} + + //! \} + + //! \name Operator Overloads + //! \{ + + inline Operand& operator=(const Operand& other) noexcept = default; + inline Operand& operator=(const Operand_& other) noexcept { return operator=(static_cast(other)); } + + //! \} + + //! \name Utilities + //! \{ + + //! Clones this operand and returns its copy. + constexpr Operand clone() const noexcept { return Operand(*this); } + + //! \} +}; + +static_assert(sizeof(Operand) == 16, "asmjit::Operand must be exactly 16 bytes long"); + +namespace Globals { + //! A default-constructed operand of `Operand_::kOpNone` type. + static constexpr const Operand none; +} + +// ============================================================================ +// [asmjit::Label] +// ============================================================================ + +//! Label (jump target or data location). +//! +//! Label represents a location in code typically used as a jump target, but +//! may be also a reference to some data or a static variable. Label has to be +//! explicitly created by BaseEmitter. +//! +//! Example of using labels: +//! +//! ``` +//! // Create some emitter (for example x86::Assembler). +//! x86::Assembler a; +//! +//! // Create Label instance. +//! Label L1 = a.newLabel(); +//! +//! // ... your code ... +//! +//! // Using label. +//! a.jump(L1); +//! +//! // ... your code ... +//! +//! // Bind label to the current position, see `BaseEmitter::bind()`. +//! a.bind(L1); +//! ``` +class Label : public Operand { +public: + //! Type of the Label. + enum LabelType : uint32_t { + //! Anonymous (unnamed) label. + kTypeAnonymous = 0, + //! Local label (always has parentId). + kTypeLocal = 1, + //! Global label (never has parentId). + kTypeGlobal = 2, + //! Number of label types. + kTypeCount = 3 + }; + + // TODO: Find a better place, find a better name. + enum { + //! Label tag is used as a sub-type, forming a unique signature across all + //! operand types as 0x1 is never associated with any register (reg-type). + //! This means that a memory operand's BASE register can be constructed + //! from virtually any operand (register vs. label) by just assigning its + //! type (reg type or label-tag) and operand id. + kLabelTag = 0x1 + }; + + //! \name Construction & Destruction + //! \{ + + //! Creates a label operand without ID (you must set the ID to make it valid). + constexpr Label() noexcept + : Operand(Globals::Init, kOpLabel, Globals::kInvalidId, 0, 0) {} + + //! Creates a cloned label operand of `other` . + constexpr Label(const Label& other) noexcept + : Operand(other) {} + + //! Creates a label operand of the given `id`. + constexpr explicit Label(uint32_t id) noexcept + : Operand(Globals::Init, kOpLabel, id, 0, 0) {} + + inline explicit Label(Globals::NoInit_) noexcept + : Operand(Globals::NoInit) {} + + //! Resets the label, will reset all properties and set its ID to `Globals::kInvalidId`. + inline void reset() noexcept { + _signature = kOpLabel; + _baseId = Globals::kInvalidId; + _data64 = 0; + } + + //! \} + + //! \name Overloaded Operators + //! \{ + + inline Label& operator=(const Label& other) noexcept = default; + + //! \} + + //! \name Accessors + //! \{ + + //! Tests whether the label was created by CodeHolder and/or an attached emitter. + constexpr bool isValid() const noexcept { return _baseId != Globals::kInvalidId; } + //! Sets the label `id`. + inline void setId(uint32_t id) noexcept { _baseId = id; } + + //! \} +}; + +// ============================================================================ +// [asmjit::BaseRegTraits] +// ============================================================================ + +//! \cond INTERNAL +//! Default register traits. +struct BaseRegTraits { + //! RegType is not valid by default. + static constexpr uint32_t kValid = 0; + //! Count of registers (0 if none). + static constexpr uint32_t kCount = 0; + //! Everything is void by default. + static constexpr uint32_t kTypeId = 0; + + //! Zero type by default. + static constexpr uint32_t kType = 0; + //! Zero group by default. + static constexpr uint32_t kGroup = 0; + //! No size by default. + static constexpr uint32_t kSize = 0; + + //! Empty signature by default. + static constexpr uint32_t kSignature = Operand::kOpReg; +}; +//! \endcond + +// ============================================================================ +// [asmjit::BaseReg] +// ============================================================================ + +//! Structure that allows to extract a register information based on the signature. +//! +//! This information is compatible with operand's signature (32-bit integer) +//! and `RegInfo` just provides easy way to access it. +struct RegInfo { + inline void reset() noexcept { _signature = 0; } + inline void setSignature(uint32_t signature) noexcept { _signature = signature; } + + template + constexpr uint32_t _getSignaturePart() const noexcept { + return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask)); + } + + constexpr bool isValid() const noexcept { return _signature != 0; } + constexpr uint32_t signature() const noexcept { return _signature; } + constexpr uint32_t opType() const noexcept { return _getSignaturePart(); } + constexpr uint32_t group() const noexcept { return _getSignaturePart(); } + constexpr uint32_t type() const noexcept { return _getSignaturePart(); } + constexpr uint32_t size() const noexcept { return _getSignaturePart(); } + + uint32_t _signature; +}; + +//! Physical/Virtual register operand. +class BaseReg : public Operand { +public: + //! Architecture neutral register types. + //! + //! These must be reused by any platform that contains that types. All GP + //! and VEC registers are also allowed by design to be part of a BASE|INDEX + //! of a memory operand. + enum RegType : uint32_t { + //! No register - unused, invalid, multiple meanings. + kTypeNone = 0, + + // (1 is used as a LabelTag) + + //! 8-bit low general purpose register (X86). + kTypeGp8Lo = 2, + //! 8-bit high general purpose register (X86). + kTypeGp8Hi = 3, + //! 16-bit general purpose register (X86). + kTypeGp16 = 4, + //! 32-bit general purpose register (X86|ARM). + kTypeGp32 = 5, + //! 64-bit general purpose register (X86|ARM). + kTypeGp64 = 6, + //! 32-bit view of a vector register (ARM). + kTypeVec32 = 7, + //! 64-bit view of a vector register (ARM). + kTypeVec64 = 8, + //! 128-bit view of a vector register (X86|ARM). + kTypeVec128 = 9, + //! 256-bit view of a vector register (X86). + kTypeVec256 = 10, + //! 512-bit view of a vector register (X86). + kTypeVec512 = 11, + //! 1024-bit view of a vector register (future). + kTypeVec1024 = 12, + //! Other0 register, should match `kOther0` group. + kTypeOther0 = 13, + //! Other1 register, should match `kOther1` group. + kTypeOther1 = 14, + //! Universal id of IP/PC register (if separate). + kTypeIP = 15, + //! Start of platform dependent register types (must be honored). + kTypeCustom = 16, + //! Maximum possible register id of all architectures. + kTypeMax = 31 + }; + + //! Register group (architecture neutral), and some limits. + enum RegGroup : uint32_t { + //! General purpose register group compatible with all backends. + kGroupGp = 0, + //! Vector register group compatible with all backends. + kGroupVec = 1, + //! Group that is architecture dependent. + kGroupOther0 = 2, + //! Group that is architecture dependent. + kGroupOther1 = 3, + //! Count of register groups used by virtual registers. + kGroupVirt = 4, + //! Count of register groups used by physical registers. + kGroupCount = 16 + }; + + enum Id : uint32_t { + //! None or any register (mostly internal). + kIdBad = 0xFFu + }; + + static constexpr uint32_t kSignature = kOpReg; + + //! \name Construction & Destruction + //! \{ + + //! Creates a dummy register operand. + constexpr BaseReg() noexcept + : Operand(Globals::Init, kSignature, kIdBad, 0, 0) {} + + //! Creates a new register operand which is the same as `other` . + constexpr BaseReg(const BaseReg& other) noexcept + : Operand(other) {} + + //! Creates a new register operand compatible with `other`, but with a different `rId`. + constexpr BaseReg(const BaseReg& other, uint32_t rId) noexcept + : Operand(Globals::Init, other._signature, rId, 0, 0) {} + + //! Creates a register initialized to `signature` and `rId`. + constexpr BaseReg(uint32_t signature, uint32_t rId) noexcept + : Operand(Globals::Init, signature, rId, 0, 0) {} + + inline explicit BaseReg(Globals::NoInit_) noexcept + : Operand(Globals::NoInit) {} + + //! \} + + //! \name Overloaded Operators + //! \{ + + inline BaseReg& operator=(const BaseReg& other) noexcept = default; + + //! \} + + //! \name Accessors + //! \{ + + //! Tests whether this register is the same as `other`. + //! + //! This is just an optimization. Registers by default only use the first + //! 8 bytes of the Operand, so this method takes advantage of this knowledge + //! and only compares these 8 bytes. If both operands were created correctly + //! then `isEqual()` and `isSame()` should give the same answer, however, if + //! some one of the two operand contains a garbage or other metadata in the + //! upper 8 bytes then `isSame()` may return `true` in cases where `isEqual()` + //! returns false. + constexpr bool isSame(const BaseReg& other) const noexcept { + return (_signature == other._signature) & + (_baseId == other._baseId ) ; + } + + //! Tests whether the register is valid (either virtual or physical). + constexpr bool isValid() const noexcept { return (_signature != 0) & (_baseId != kIdBad); } + + //! Tests whether this is a physical register. + constexpr bool isPhysReg() const noexcept { return _baseId < kIdBad; } + //! Tests whether this is a virtual register. + constexpr bool isVirtReg() const noexcept { return _baseId > kIdBad; } + + //! Tests whether the register type matches `type` - same as `isReg(type)`, provided for convenience. + constexpr bool isType(uint32_t type) const noexcept { return (_signature & kSignatureRegTypeMask) == (type << kSignatureRegTypeShift); } + //! Tests whether the register group matches `group`. + constexpr bool isGroup(uint32_t group) const noexcept { return (_signature & kSignatureRegGroupMask) == (group << kSignatureRegGroupShift); } + + //! Tests whether the register is a general purpose register (any size). + constexpr bool isGp() const noexcept { return isGroup(kGroupGp); } + //! Tests whether the register is a vector register. + constexpr bool isVec() const noexcept { return isGroup(kGroupVec); } + + using Operand_::isReg; + + //! Same as `isType()`, provided for convenience. + constexpr bool isReg(uint32_t rType) const noexcept { return isType(rType); } + //! Tests whether the register type matches `type` and register id matches `rId`. + constexpr bool isReg(uint32_t rType, uint32_t rId) const noexcept { return isType(rType) && id() == rId; } + + //! Returns the type of the register. + constexpr uint32_t type() const noexcept { return _getSignaturePart(); } + //! Returns the register group. + constexpr uint32_t group() const noexcept { return _getSignaturePart(); } + + //! Clones the register operand. + constexpr BaseReg clone() const noexcept { return BaseReg(*this); } + + //! Casts this register to `RegT` by also changing its signature. + //! + //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors. + template + constexpr RegT cloneAs() const noexcept { return RegT(RegT::kSignature, id()); } + + //! Casts this register to `other` by also changing its signature. + //! + //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors. + template + constexpr RegT cloneAs(const RegT& other) const noexcept { return RegT(other.signature(), id()); } + + //! Sets the register id to `rId`. + inline void setId(uint32_t rId) noexcept { _baseId = rId; } + + //! Sets a 32-bit operand signature based on traits of `RegT`. + template + inline void setSignatureT() noexcept { _signature = RegT::kSignature; } + + //! Sets the register `signature` and `rId`. + inline void setSignatureAndId(uint32_t signature, uint32_t rId) noexcept { + _signature = signature; + _baseId = rId; + } + + //! \} + + //! \name Static Functions + //! \{ + + static inline bool isGp(const Operand_& op) noexcept { + // Check operand type and register group. Not interested in register type and size. + const uint32_t kSgn = (kOpReg << kSignatureOpShift ) | + (kGroupGp << kSignatureRegGroupShift) ; + return (op.signature() & (kSignatureOpMask | kSignatureRegGroupMask)) == kSgn; + } + + //! Tests whether the `op` operand is either a low or high 8-bit GPB register. + static inline bool isVec(const Operand_& op) noexcept { + // Check operand type and register group. Not interested in register type and size. + const uint32_t kSgn = (kOpReg << kSignatureOpShift ) | + (kGroupVec << kSignatureRegGroupShift) ; + return (op.signature() & (kSignatureOpMask | kSignatureRegGroupMask)) == kSgn; + } + + static inline bool isGp(const Operand_& op, uint32_t rId) noexcept { return isGp(op) & (op.id() == rId); } + static inline bool isVec(const Operand_& op, uint32_t rId) noexcept { return isVec(op) & (op.id() == rId); } + + //! \} +}; + +// ============================================================================ +// [asmjit::RegOnly] +// ============================================================================ + +//! RegOnly is 8-byte version of `BaseReg` that allows to store either register +//! or nothing. +//! +//! This class was designed to decrease the space consumed by each extra "operand" +//! in `BaseEmitter` and `InstNode` classes. +struct RegOnly { + //! Type of the operand, either `kOpNone` or `kOpReg`. + uint32_t _signature; + //! Physical or virtual register id. + uint32_t _id; + + //! \name Construction & Destruction + //! \{ + + //! Initializes the `RegOnly` instance to hold register `signature` and `id`. + inline void init(uint32_t signature, uint32_t id) noexcept { + _signature = signature; + _id = id; + } + + inline void init(const BaseReg& reg) noexcept { init(reg.signature(), reg.id()); } + inline void init(const RegOnly& reg) noexcept { init(reg.signature(), reg.id()); } + + //! Resets the `RegOnly` members to zeros (none). + inline void reset() noexcept { init(0, 0); } + + //! \} + + //! \name Accessors + //! \{ + + //! Tests whether this ExtraReg is none (same as calling `Operand_::isNone()`). + constexpr bool isNone() const noexcept { return _signature == 0; } + //! Tests whether the register is valid (either virtual or physical). + constexpr bool isReg() const noexcept { return _signature != 0; } + + //! Tests whether this is a physical register. + constexpr bool isPhysReg() const noexcept { return _id < BaseReg::kIdBad; } + //! Tests whether this is a virtual register (used by `BaseCompiler`). + constexpr bool isVirtReg() const noexcept { return _id > BaseReg::kIdBad; } + + //! Returns the register signature or 0 if no register is assigned. + constexpr uint32_t signature() const noexcept { return _signature; } + //! Returns the register id. + //! + //! \note Always check whether the register is assigned before using the + //! returned identifier as non-assigned `RegOnly` instance would return + //! zero id, which is still a valid register id. + constexpr uint32_t id() const noexcept { return _id; } + + //! Sets the register id. + inline void setId(uint32_t id) noexcept { _id = id; } + + //! \cond INTERNAL + //! + //! Extracts information from operand's signature. + template + constexpr uint32_t _getSignaturePart() const noexcept { + return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask)); + } + //! \endcond + + //! Returns the type of the register. + constexpr uint32_t type() const noexcept { return _getSignaturePart(); } + //! Returns the register group. + constexpr uint32_t group() const noexcept { return _getSignaturePart(); } + + //! \} + + //! \name Utilities + //! \{ + + //! Converts this ExtraReg to a real `RegT` operand. + template + constexpr RegT toReg() const noexcept { return RegT(_signature, _id); } + + //! \} +}; + +// ============================================================================ +// [asmjit::BaseMem] +// ============================================================================ + +//! Base class for all memory operands. +//! +//! \note It's tricky to pack all possible cases that define a memory operand +//! into just 16 bytes. The `BaseMem` splits data into the following parts: +//! +//! BASE - Base register or label - requires 36 bits total. 4 bits are used to +//! encode the type of the BASE operand (label vs. register type) and +//! the remaining 32 bits define the BASE id, which can be a physical or +//! virtual register index. If BASE type is zero, which is never used as +//! a register-type and label doesn't use it as well then BASE field +//! contains a high DWORD of a possible 64-bit absolute address, which is +//! possible on X64. +//! +//! INDEX - Index register (or theoretically Label, which doesn't make sense). +//! Encoding is similar to BASE - it also requires 36 bits and splits +//! the encoding to INDEX type (4 bits defining the register type) and +//! id (32-bits). +//! +//! OFFSET - A relative offset of the address. Basically if BASE is specified +//! the relative displacement adjusts BASE and an optional INDEX. if +//! BASE is not specified then the OFFSET should be considered as ABSOLUTE +//! address (at least on X86). In that case its low 32 bits are stored in +//! DISPLACEMENT field and the remaining high 32 bits are stored in BASE. +//! +//! OTHER - There is rest 8 bits that can be used for whatever purpose. The +//! x86::Mem operand uses these bits to store segment override prefix and +//! index shift (scale). +class BaseMem : public Operand { +public: + enum AddrType : uint32_t { + kAddrTypeDefault = 0, + kAddrTypeAbs = 1, + kAddrTypeRel = 2 + }; + + // Shortcuts. + enum SignatureMem : uint32_t { + kSignatureMemAbs = kAddrTypeAbs << kSignatureMemAddrTypeShift, + kSignatureMemRel = kAddrTypeRel << kSignatureMemAddrTypeShift + }; + + //! \cond INTERNAL + //! Used internally to construct `BaseMem` operand from decomposed data. + struct Decomposed { + uint32_t baseType; + uint32_t baseId; + uint32_t indexType; + uint32_t indexId; + int32_t offset; + uint32_t size; + uint32_t flags; + }; + //! \endcond + + //! \name Construction & Destruction + //! \{ + + //! Creates a default `BaseMem` operand, that points to [0]. + constexpr BaseMem() noexcept + : Operand(Globals::Init, kOpMem, 0, 0, 0) {} + + //! Creates a `BaseMem` operand that is a clone of `other`. + constexpr BaseMem(const BaseMem& other) noexcept + : Operand(other) {} + + //! \cond INTERNAL + + //! Creates a `BaseMem` operand from 4 integers as used by `Operand_` struct. + constexpr BaseMem(Globals::Init_, uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept + : Operand(Globals::Init, u0, u1, u2, u3) {} + + constexpr BaseMem(const Decomposed& d) noexcept + : Operand(Globals::Init, + kOpMem | (d.baseType << kSignatureMemBaseTypeShift ) + | (d.indexType << kSignatureMemIndexTypeShift) + | (d.size << kSignatureSizeShift ) + | d.flags, + d.baseId, + d.indexId, + uint32_t(d.offset)) {} + + //! \endcond + + //! Creates a completely uninitialized `BaseMem` operand. + inline explicit BaseMem(Globals::NoInit_) noexcept + : Operand(Globals::NoInit) {} + + //! Resets the memory operand - after the reset the memory points to [0]. + inline void reset() noexcept { + _signature = kOpMem; + _baseId = 0; + _data64 = 0; + } + + //! \} + + //! \name Overloaded Operators + //! \{ + + inline BaseMem& operator=(const BaseMem& other) noexcept { copyFrom(other); return *this; } + + //! \} + + //! \name Accessors + //! \{ + + //! Clones the memory operand. + constexpr BaseMem clone() const noexcept { return BaseMem(*this); } + + constexpr uint32_t addrType() const noexcept { return _getSignaturePart(); } + inline void setAddrType(uint32_t addrType) noexcept { _setSignaturePart(addrType); } + inline void resetAddrType() noexcept { _setSignaturePart(0); } + + constexpr bool isAbs() const noexcept { return addrType() == kAddrTypeAbs; } + inline void setAbs() noexcept { setAddrType(kAddrTypeAbs); } + + constexpr bool isRel() const noexcept { return addrType() == kAddrTypeRel; } + inline void setRel() noexcept { setAddrType(kAddrTypeRel); } + + constexpr bool isRegHome() const noexcept { return _hasSignaturePart(); } + inline void setRegHome() noexcept { _signature |= kSignatureMemRegHomeFlag; } + inline void clearRegHome() noexcept { _signature &= ~kSignatureMemRegHomeFlag; } + + //! Tests whether the memory operand has a BASE register or label specified. + constexpr bool hasBase() const noexcept { return (_signature & kSignatureMemBaseTypeMask) != 0; } + //! Tests whether the memory operand has an INDEX register specified. + constexpr bool hasIndex() const noexcept { return (_signature & kSignatureMemIndexTypeMask) != 0; } + //! Tests whether the memory operand has BASE and INDEX register. + constexpr bool hasBaseOrIndex() const noexcept { return (_signature & kSignatureMemBaseIndexMask) != 0; } + //! Tests whether the memory operand has BASE and INDEX register. + constexpr bool hasBaseAndIndex() const noexcept { return (_signature & kSignatureMemBaseTypeMask) != 0 && (_signature & kSignatureMemIndexTypeMask) != 0; } + + //! Tests whether the BASE operand is a register (registers start after `kLabelTag`). + constexpr bool hasBaseReg() const noexcept { return (_signature & kSignatureMemBaseTypeMask) > (Label::kLabelTag << kSignatureMemBaseTypeShift); } + //! Tests whether the BASE operand is a label. + constexpr bool hasBaseLabel() const noexcept { return (_signature & kSignatureMemBaseTypeMask) == (Label::kLabelTag << kSignatureMemBaseTypeShift); } + //! Tests whether the INDEX operand is a register (registers start after `kLabelTag`). + constexpr bool hasIndexReg() const noexcept { return (_signature & kSignatureMemIndexTypeMask) > (Label::kLabelTag << kSignatureMemIndexTypeShift); } + + //! Returns the type of the BASE register (0 if this memory operand doesn't + //! use the BASE register). + //! + //! \note If the returned type is one (a value never associated to a register + //! type) the BASE is not register, but it's a label. One equals to `kLabelTag`. + //! You should always check `hasBaseLabel()` before using `baseId()` result. + constexpr uint32_t baseType() const noexcept { return _getSignaturePart(); } + + //! Returns the type of an INDEX register (0 if this memory operand doesn't + //! use the INDEX register). + constexpr uint32_t indexType() const noexcept { return _getSignaturePart(); } + + //! This is used internally for BASE+INDEX validation. + constexpr uint32_t baseAndIndexTypes() const noexcept { return _getSignaturePart(); } + + //! Returns both BASE (4:0 bits) and INDEX (9:5 bits) types combined into a + //! single value. + //! + //! \remarks Returns id of the BASE register or label (if the BASE was + //! specified as label). + constexpr uint32_t baseId() const noexcept { return _baseId; } + + //! Returns the id of the INDEX register. + constexpr uint32_t indexId() const noexcept { return _mem.indexId; } + + //! Sets the id of the BASE register (without modifying its type). + inline void setBaseId(uint32_t rId) noexcept { _baseId = rId; } + //! Sets the id of the INDEX register (without modifying its type). + inline void setIndexId(uint32_t rId) noexcept { _mem.indexId = rId; } + + //! Sets the base register to type and id of the given `base` operand. + inline void setBase(const BaseReg& base) noexcept { return _setBase(base.type(), base.id()); } + //! Sets the index register to type and id of the given `index` operand. + inline void setIndex(const BaseReg& index) noexcept { return _setIndex(index.type(), index.id()); } + + inline void _setBase(uint32_t rType, uint32_t rId) noexcept { + _setSignaturePart(rType); + _baseId = rId; + } + + inline void _setIndex(uint32_t rType, uint32_t rId) noexcept { + _setSignaturePart(rType); + _mem.indexId = rId; + } + + //! Resets the memory operand's BASE register or label. + inline void resetBase() noexcept { _setBase(0, 0); } + //! Resets the memory operand's INDEX register. + inline void resetIndex() noexcept { _setIndex(0, 0); } + + //! Sets the memory operand size (in bytes). + inline void setSize(uint32_t size) noexcept { _setSignaturePart(size); } + + //! Tests whether the memory operand has a 64-bit offset or absolute address. + //! + //! If this is true then `hasBase()` must always report false. + constexpr bool isOffset64Bit() const noexcept { return baseType() == 0; } + + //! Tests whether the memory operand has a non-zero offset or absolute address. + constexpr bool hasOffset() const noexcept { + return (_mem.offsetLo32 | uint32_t(_baseId & Support::bitMaskFromBool(isOffset64Bit()))) != 0; + } + + //! Returns either relative offset or absolute address as 64-bit integer. + constexpr int64_t offset() const noexcept { + return isOffset64Bit() ? int64_t(uint64_t(_mem.offsetLo32) | (uint64_t(_baseId) << 32)) + : int64_t(int32_t(_mem.offsetLo32)); // Sign extend 32-bit offset. + } + + //! Returns a 32-bit low part of a 64-bit offset or absolute address. + constexpr int32_t offsetLo32() const noexcept { return int32_t(_mem.offsetLo32); } + //! Returns a 32-but high part of a 64-bit offset or absolute address. + //! + //! \note This function is UNSAFE and returns garbage if `isOffset64Bit()` + //! returns false. Never use it blindly without checking it first. + constexpr int32_t offsetHi32() const noexcept { return int32_t(_baseId); } + + //! Sets a 64-bit offset or an absolute address to `offset`. + //! + //! \note This functions attempts to set both high and low parts of a 64-bit + //! offset, however, if the operand has a BASE register it will store only the + //! low 32 bits of the offset / address as there is no way to store both BASE + //! and 64-bit offset, and there is currently no architecture that has such + //! capability targeted by AsmJit. + inline void setOffset(int64_t offset) noexcept { + uint32_t lo = uint32_t(uint64_t(offset) & 0xFFFFFFFFu); + uint32_t hi = uint32_t(uint64_t(offset) >> 32); + uint32_t hiMsk = Support::bitMaskFromBool(isOffset64Bit()); + + _mem.offsetLo32 = lo; + _baseId = (hi & hiMsk) | (_baseId & ~hiMsk); + } + //! Sets a low 32-bit offset to `offset` (don't use without knowing how BaseMem works). + inline void setOffsetLo32(int32_t offset) noexcept { _mem.offsetLo32 = uint32_t(offset); } + + //! Adjusts the offset by `offset`. + //! + //! \note This is a fast function that doesn't use the HI 32-bits of a + //! 64-bit offset. Use it only if you know that there is a BASE register + //! and the offset is only 32 bits anyway. + + //! Adjusts the offset by a 64-bit `offset`. + inline void addOffset(int64_t offset) noexcept { + if (isOffset64Bit()) { + int64_t result = offset + int64_t(uint64_t(_mem.offsetLo32) | (uint64_t(_baseId) << 32)); + _mem.offsetLo32 = uint32_t(uint64_t(result) & 0xFFFFFFFFu); + _baseId = uint32_t(uint64_t(result) >> 32); + } + else { + _mem.offsetLo32 += uint32_t(uint64_t(offset) & 0xFFFFFFFFu); + } + } + + //! Adds `offset` to a low 32-bit offset part (don't use without knowing how + //! BaseMem works). + inline void addOffsetLo32(int32_t offset) noexcept { _mem.offsetLo32 += uint32_t(offset); } + + //! Resets the memory offset to zero. + inline void resetOffset() noexcept { setOffset(0); } + + //! Resets the lo part of the memory offset to zero (don't use without knowing + //! how BaseMem works). + inline void resetOffsetLo32() noexcept { setOffsetLo32(0); } + + //! \} +}; + +// ============================================================================ +// [asmjit::Imm] +// ============================================================================ + +//! Immediate operand. +//! +//! Immediate operand is usually part of instruction itself. It's inlined after +//! or before the instruction opcode. Immediates can be only signed or unsigned +//! integers. +//! +//! To create an immediate operand use `asmjit::imm()` helper, which can be used +//! with any type, not just the default 64-bit int. +class Imm : public Operand { +public: + //! \name Construction & Destruction + //! \{ + + //! Creates a new immediate value (initial value is 0). + constexpr Imm() noexcept + : Operand(Globals::Init, kOpImm, 0, 0, 0) {} + + //! Creates a new immediate value from `other`. + constexpr Imm(const Imm& other) noexcept + : Operand(other) {} + + //! Creates a new signed immediate value, assigning the value to `val`. + constexpr explicit Imm(int64_t val) noexcept + : Operand(Globals::Init, kOpImm, 0, Support::unpackU32At0(val), Support::unpackU32At1(val)) {} + + inline explicit Imm(Globals::NoInit_) noexcept + : Operand(Globals::NoInit) {} + + //! \} + + //! \name Overloaded Operators + //! \{ + + //! Assigns the value of the `other` operand to this immediate. + inline Imm& operator=(const Imm& other) noexcept { copyFrom(other); return *this; } + + //! \} + + //! \name Accessors + //! \{ + + //! Tests whether the immediate can be casted to 8-bit signed integer. + constexpr bool isInt8() const noexcept { return Support::isInt8(int64_t(_data64)); } + //! Tests whether the immediate can be casted to 8-bit unsigned integer. + constexpr bool isUInt8() const noexcept { return Support::isUInt8(int64_t(_data64)); } + //! Tests whether the immediate can be casted to 16-bit signed integer. + constexpr bool isInt16() const noexcept { return Support::isInt16(int64_t(_data64)); } + //! Tests whether the immediate can be casted to 16-bit unsigned integer. + constexpr bool isUInt16() const noexcept { return Support::isUInt16(int64_t(_data64)); } + //! Tests whether the immediate can be casted to 32-bit signed integer. + constexpr bool isInt32() const noexcept { return Support::isInt32(int64_t(_data64)); } + //! Tests whether the immediate can be casted to 32-bit unsigned integer. + constexpr bool isUInt32() const noexcept { return Support::isUInt32(int64_t(_data64)); } + + //! Returns immediate value as 8-bit signed integer, possibly cropped. + constexpr int8_t i8() const noexcept { return int8_t(_data64 & 0xFFu); } + //! Returns immediate value as 8-bit unsigned integer, possibly cropped. + constexpr uint8_t u8() const noexcept { return uint8_t(_data64 & 0xFFu); } + //! Returns immediate value as 16-bit signed integer, possibly cropped. + constexpr int16_t i16() const noexcept { return int16_t(_data64 & 0xFFFFu);} + //! Returns immediate value as 16-bit unsigned integer, possibly cropped. + constexpr uint16_t u16() const noexcept { return uint16_t(_data64 & 0xFFFFu);} + //! Returns immediate value as 32-bit signed integer, possibly cropped. + constexpr int32_t i32() const noexcept { return int32_t(_data64 & 0xFFFFFFFFu); } + //! Returns low 32-bit signed integer. + constexpr int32_t i32Lo() const noexcept { return int32_t(_data64 & 0xFFFFFFFFu); } + //! Returns high 32-bit signed integer. + constexpr int32_t i32Hi() const noexcept { return int32_t(_data64 >> 32); } + //! Returns immediate value as 32-bit unsigned integer, possibly cropped. + constexpr uint32_t u32() const noexcept { return uint32_t(_data64 & 0xFFFFFFFFu); } + //! Returns low 32-bit signed integer. + constexpr uint32_t u32Lo() const noexcept { return uint32_t(_data64 & 0xFFFFFFFFu); } + //! Returns high 32-bit signed integer. + constexpr uint32_t u32Hi() const noexcept { return uint32_t(_data64 >> 32); } + //! Returns immediate value as 64-bit signed integer. + constexpr int64_t i64() const noexcept { return int64_t(_data64); } + //! Returns immediate value as 64-bit unsigned integer. + constexpr uint64_t u64() const noexcept { return _data64; } + //! Returns immediate value as `intptr_t`, possibly cropped if size of `intptr_t` is 32 bits. + constexpr intptr_t iptr() const noexcept { return (sizeof(intptr_t) == sizeof(int64_t)) ? intptr_t(_data64) : intptr_t(i32()); } + //! Returns immediate value as `uintptr_t`, possibly cropped if size of `uintptr_t` is 32 bits. + constexpr uintptr_t uptr() const noexcept { return (sizeof(uintptr_t) == sizeof(uint64_t)) ? uintptr_t(_data64) : uintptr_t(u32()); } + + //! Sets immediate value to 8-bit signed integer `val`. + inline void setI8(int8_t val) noexcept { _data64 = uint64_t(int64_t(val)); } + //! Sets immediate value to 8-bit unsigned integer `val`. + inline void setU8(uint8_t val) noexcept { _data64 = uint64_t(val); } + //! Sets immediate value to 16-bit signed integer `val`. + inline void setI16(int16_t val) noexcept { _data64 = uint64_t(int64_t(val)); } + //! Sets immediate value to 16-bit unsigned integer `val`. + inline void setU16(uint16_t val) noexcept { _data64 = uint64_t(val); } + //! Sets immediate value to 32-bit signed integer `val`. + inline void setI32(int32_t val) noexcept { _data64 = uint64_t(int64_t(val)); } + //! Sets immediate value to 32-bit unsigned integer `val`. + inline void setU32(uint32_t val) noexcept { _data64 = uint64_t(val); } + //! Sets immediate value to 64-bit signed integer `val`. + inline void setI64(int64_t val) noexcept { _data64 = uint64_t(val); } + //! Sets immediate value to 64-bit unsigned integer `val`. + inline void setU64(uint64_t val) noexcept { _data64 = val; } + //! Sets immediate value to intptr_t `val`. + inline void setIPtr(intptr_t val) noexcept { _data64 = uint64_t(int64_t(val)); } + //! Sets immediate value to uintptr_t `val`. + inline void setUPtr(uintptr_t val) noexcept { _data64 = uint64_t(val); } + + //! Sets immediate value to `val`. + template + inline void setValue(T val) noexcept { setI64(int64_t(Support::asNormalized(val))); } + + inline void setDouble(double d) noexcept { + _data64 = Support::bitCast(d); + } + + //! \} + + //! \name Utilities + //! \{ + + //! Clones the immediate operand. + constexpr Imm clone() const noexcept { return Imm(*this); } + + inline void signExtend8Bits() noexcept { _data64 = uint64_t(int64_t(i8())); } + inline void signExtend16Bits() noexcept { _data64 = uint64_t(int64_t(i16())); } + inline void signExtend32Bits() noexcept { _data64 = uint64_t(int64_t(i32())); } + + inline void zeroExtend8Bits() noexcept { _data64 &= 0x000000FFu; } + inline void zeroExtend16Bits() noexcept { _data64 &= 0x0000FFFFu; } + inline void zeroExtend32Bits() noexcept { _data64 &= 0xFFFFFFFFu; } + + //! \} +}; + +//! Creates a new immediate operand. +//! +//! Using `imm(x)` is much nicer than using `Imm(x)` as this is a template +//! which can accept any integer including pointers and function pointers. +template +static constexpr Imm imm(T val) noexcept { + return Imm(std::is_signed::value ? int64_t(val) : int64_t(uint64_t(val))); +} + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_OPERAND_H diff --git a/src/asmjit/core/osutils.cpp b/src/asmjit/core/osutils.cpp new file mode 100644 index 0000000..3115101 --- /dev/null +++ b/src/asmjit/core/osutils.cpp @@ -0,0 +1,90 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#define ASMJIT_EXPORTS + +#include "../core/osutils.h" +#include "../core/support.h" + +#if defined(_WIN32) + #include +#elif defined(__APPLE__) + #include +#else + #include + #include +#endif + +ASMJIT_BEGIN_NAMESPACE + +// ============================================================================ +// [asmjit::OSUtils - GetTickCount] +// ============================================================================ + +uint32_t OSUtils::getTickCount() noexcept { +#if defined(_WIN32) + enum HiResStatus : uint32_t { + kHiResUnknown = 0, + kHiResAvailable = 1, + kHiResNotAvailable = 2 + }; + + static std::atomic _hiResStatus(kHiResUnknown); + static volatile double _hiResFreq(0); + + uint32_t status = _hiResStatus.load(); + LARGE_INTEGER now, qpf; + + if (status != kHiResNotAvailable && ::QueryPerformanceCounter(&now)) { + double freq = _hiResFreq; + if (status == kHiResUnknown) { + // Detects the availability of high resolution counter. + if (::QueryPerformanceFrequency(&qpf)) { + freq = double(qpf.QuadPart) / 1000.0; + _hiResFreq = freq; + _hiResStatus.compare_exchange_strong(status, kHiResAvailable); + status = kHiResAvailable; + } + else { + // High resolution not available. + _hiResStatus.compare_exchange_strong(status, kHiResNotAvailable); + } + } + + if (status == kHiResAvailable) + return uint32_t(uint64_t(int64_t(double(now.QuadPart) / freq)) & 0xFFFFFFFFu); + } + + // Bail to `GetTickCount()` if we cannot use high resolution. + return ::GetTickCount(); +#elif defined(__APPLE__) + // See Apple's QA1398. + static mach_timebase_info_data_t _machTime; + + uint32_t denom = _machTime.denom; + if (ASMJIT_UNLIKELY(!denom)) { + if (mach_timebase_info(&_machTime) != KERN_SUCCESS || !(denom = _machTime.denom)) + return 0; + } + + // `mach_absolute_time()` returns nanoseconds, we want milliseconds. + uint64_t t = mach_absolute_time() / 1000000u; + t = (t * _machTime.numer) / _machTime.denom; + return uint32_t(t & 0xFFFFFFFFu); +#elif defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0 + struct timespec ts; + if (ASMJIT_UNLIKELY(clock_gettime(CLOCK_MONOTONIC, &ts) != 0)) + return 0; + + uint64_t t = (uint64_t(ts.tv_sec ) * 1000u) + (uint64_t(ts.tv_nsec) / 1000000u); + return uint32_t(t & 0xFFFFFFFFu); +#else + #pragma message("asmjit::OSUtils::getTickCount() doesn't have implementation for the target OS.") + return 0; +#endif +} + +ASMJIT_END_NAMESPACE diff --git a/src/asmjit/core/osutils.h b/src/asmjit/core/osutils.h new file mode 100644 index 0000000..4b82231 --- /dev/null +++ b/src/asmjit/core/osutils.h @@ -0,0 +1,96 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_OSUTILS_H +#define _ASMJIT_CORE_OSUTILS_H + +#include "../core/globals.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \addtogroup asmjit_support +//! \{ + +// ============================================================================ +// [asmjit::OSUtils] +// ============================================================================ + +//! Operating system utilities. +namespace OSUtils { + //! Gets the current CPU tick count, used for benchmarking (1ms resolution). + ASMJIT_API uint32_t getTickCount() noexcept; +}; + +// ============================================================================ +// [asmjit::Lock] +// ============================================================================ + +//! \cond INTERNAL + +//! Lock. +class Lock { +public: + ASMJIT_NONCOPYABLE(Lock) + + #if defined(_WIN32) + + typedef CRITICAL_SECTION Handle; + Handle _handle; + + inline Lock() noexcept { InitializeCriticalSection(&_handle); } + inline ~Lock() noexcept { DeleteCriticalSection(&_handle); } + + inline void lock() noexcept { EnterCriticalSection(&_handle); } + inline void unlock() noexcept { LeaveCriticalSection(&_handle); } + + #elif !defined(__EMSCRIPTEN__) + + typedef pthread_mutex_t Handle; + Handle _handle; + + inline Lock() noexcept { pthread_mutex_init(&_handle, nullptr); } + inline ~Lock() noexcept { pthread_mutex_destroy(&_handle); } + + inline void lock() noexcept { pthread_mutex_lock(&_handle); } + inline void unlock() noexcept { pthread_mutex_unlock(&_handle); } + + #else + + // Browser or other unsupported OS. + inline Lock() noexcept {} + inline ~Lock() noexcept {} + + inline void lock() noexcept {} + inline void unlock() noexcept {} + + #endif +}; + +//! \endcond + +// ============================================================================ +// [asmjit::ScopedLock] +// ============================================================================ + +//! \cond INTERNAL + +//! Scoped lock. +struct ScopedLock { + ASMJIT_NONCOPYABLE(ScopedLock) + + Lock& _target; + + inline ScopedLock(Lock& target) noexcept : _target(target) { _target.lock(); } + inline ~ScopedLock() noexcept { _target.unlock(); } +}; + +//! \endcond + +//! \} + +ASMJIT_END_NAMESPACE + +#endif // _ASMJIT_CORE_OSUTILS_H diff --git a/src/asmjit/core/raassignment_p.h b/src/asmjit/core/raassignment_p.h new file mode 100644 index 0000000..664e899 --- /dev/null +++ b/src/asmjit/core/raassignment_p.h @@ -0,0 +1,384 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_RAASSIGNMENT_P_H +#define _ASMJIT_CORE_RAASSIGNMENT_P_H + +#include "../core/build.h" +#ifndef ASMJIT_NO_COMPILER + +#include "../core/radefs_p.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \cond INTERNAL +//! \addtogroup asmjit_ra +//! \{ + +// ============================================================================ +// [asmjit::RAAssignment] +// ============================================================================ + +class RAAssignment { + ASMJIT_NONCOPYABLE(RAAssignment) + +public: + enum Ids : uint32_t { + kPhysNone = 0xFF, + kWorkNone = RAWorkReg::kIdNone + }; + + enum DirtyBit : uint32_t { + kClean = 0, + kDirty = 1 + }; + + struct Layout { + inline void reset() noexcept { + physIndex.reset(); + physCount.reset(); + physTotal = 0; + workCount = 0; + workRegs = nullptr; + } + + RARegIndex physIndex; //!< Index of architecture registers per group. + RARegCount physCount; //!< Count of architecture registers per group. + uint32_t physTotal; //!< Count of physical registers of all groups. + uint32_t workCount; //!< Count of work registers. + const RAWorkRegs* workRegs; //!< WorkRegs data (vector). + }; + + struct PhysToWorkMap { + static inline size_t sizeOf(uint32_t count) noexcept { + return sizeof(PhysToWorkMap) - sizeof(uint32_t) + size_t(count) * sizeof(uint32_t); + } + + inline void reset(uint32_t count) noexcept { + assigned.reset(); + dirty.reset(); + + for (uint32_t i = 0; i < count; i++) + workIds[i] = kWorkNone; + } + + inline void copyFrom(const PhysToWorkMap* other, uint32_t count) noexcept { + size_t size = sizeOf(count); + memcpy(this, other, size); + } + + RARegMask assigned; //!< Assigned registers (each bit represents one physical reg). + RARegMask dirty; //!< Dirty registers (spill slot out of sync or no spill slot). + uint32_t workIds[1 /* ... */]; //!< PhysReg to WorkReg mapping. + }; + + struct WorkToPhysMap { + static inline size_t sizeOf(uint32_t count) noexcept { + return size_t(count) * sizeof(uint8_t); + } + + inline void reset(uint32_t count) noexcept { + for (uint32_t i = 0; i < count; i++) + physIds[i] = kPhysNone; + } + + inline void copyFrom(const WorkToPhysMap* other, uint32_t count) noexcept { + size_t size = sizeOf(count); + if (ASMJIT_LIKELY(size)) + memcpy(this, other, size); + } + + uint8_t physIds[1 /* ... */]; //!< WorkReg to PhysReg mapping + }; + + //! Physical registers layout. + Layout _layout; + //! WorkReg to PhysReg mapping. + WorkToPhysMap* _workToPhysMap; + //! PhysReg to WorkReg mapping and assigned/dirty bits. + PhysToWorkMap* _physToWorkMap; + //! Optimization to translate PhysRegs to WorkRegs faster. + uint32_t* _physToWorkIds[BaseReg::kGroupVirt]; + + //! \name Construction & Destruction + //! \{ + + inline RAAssignment() noexcept { + _layout.reset(); + resetMaps(); + } + + inline void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept { + // Layout must be initialized before data. + ASMJIT_ASSERT(_physToWorkMap == nullptr); + ASMJIT_ASSERT(_workToPhysMap == nullptr); + + _layout.physIndex.buildIndexes(physCount); + _layout.physCount = physCount; + _layout.physTotal = uint32_t(_layout.physIndex[BaseReg::kGroupVirt - 1]) + + uint32_t(_layout.physCount[BaseReg::kGroupVirt - 1]) ; + _layout.workCount = workRegs.size(); + _layout.workRegs = &workRegs; + } + + inline void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept { + _physToWorkMap = physToWorkMap; + _workToPhysMap = workToPhysMap; + for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) + _physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group); + } + + inline void resetMaps() noexcept { + _physToWorkMap = nullptr; + _workToPhysMap = nullptr; + for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) + _physToWorkIds[group] = nullptr; + } + + //! \} + + //! \name Accessors + //! \{ + + inline PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; } + inline WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; } + + inline RARegMask& assigned() noexcept { return _physToWorkMap->assigned; } + inline const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; } + inline uint32_t assigned(uint32_t group) const noexcept { return _physToWorkMap->assigned[group]; } + + inline RARegMask& dirty() noexcept { return _physToWorkMap->dirty; } + inline const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; } + inline uint32_t dirty(uint32_t group) const noexcept { return _physToWorkMap->dirty[group]; } + + inline uint32_t workToPhysId(uint32_t group, uint32_t workId) const noexcept { + ASMJIT_UNUSED(group); + ASMJIT_ASSERT(workId != kWorkNone); + ASMJIT_ASSERT(workId < _layout.workCount); + return _workToPhysMap->physIds[workId]; + } + + inline uint32_t physToWorkId(uint32_t group, uint32_t physId) const noexcept { + ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); + return _physToWorkIds[group][physId]; + } + + inline bool isPhysAssigned(uint32_t group, uint32_t physId) const noexcept { + ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); + return Support::bitTest(_physToWorkMap->assigned[group], physId); + } + + inline bool isPhysDirty(uint32_t group, uint32_t physId) const noexcept { + ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); + return Support::bitTest(_physToWorkMap->dirty[group], physId); + } + + //! \} + + //! \name Assignment + //! \{ + + // These are low-level allocation helpers that are used to update the current + // mappings between physical and virt/work registers and also to update masks + // that represent allocated and dirty registers. These functions don't emit + // any code; they are only used to update and keep all mappings in sync. + + //! Assign [VirtReg/WorkReg] to a physical register. + ASMJIT_INLINE void assign(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept { + ASMJIT_ASSERT(workToPhysId(group, workId) == kPhysNone); + ASMJIT_ASSERT(physToWorkId(group, physId) == kWorkNone); + ASMJIT_ASSERT(!isPhysAssigned(group, physId)); + ASMJIT_ASSERT(!isPhysDirty(group, physId)); + + _workToPhysMap->physIds[workId] = uint8_t(physId); + _physToWorkIds[group][physId] = workId; + + uint32_t regMask = Support::bitMask(physId); + _physToWorkMap->assigned[group] |= regMask; + _physToWorkMap->dirty[group] |= regMask & Support::bitMaskFromBool(dirty); + + verify(); + } + + //! Reassign [VirtReg/WorkReg] to `dstPhysId` from `srcPhysId`. + ASMJIT_INLINE void reassign(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept { + ASMJIT_ASSERT(dstPhysId != srcPhysId); + ASMJIT_ASSERT(workToPhysId(group, workId) == srcPhysId); + ASMJIT_ASSERT(physToWorkId(group, srcPhysId) == workId); + ASMJIT_ASSERT(isPhysAssigned(group, srcPhysId) == true); + ASMJIT_ASSERT(isPhysAssigned(group, dstPhysId) == false); + + _workToPhysMap->physIds[workId] = uint8_t(dstPhysId); + _physToWorkIds[group][srcPhysId] = kWorkNone; + _physToWorkIds[group][dstPhysId] = workId; + + uint32_t srcMask = Support::bitMask(srcPhysId); + uint32_t dstMask = Support::bitMask(dstPhysId); + + uint32_t dirty = (_physToWorkMap->dirty[group] & srcMask) != 0; + uint32_t regMask = dstMask | srcMask; + + _physToWorkMap->assigned[group] ^= regMask; + _physToWorkMap->dirty[group] ^= regMask & Support::bitMaskFromBool(dirty); + + verify(); + } + + ASMJIT_INLINE void swap(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept { + ASMJIT_ASSERT(aPhysId != bPhysId); + ASMJIT_ASSERT(workToPhysId(group, aWorkId) == aPhysId); + ASMJIT_ASSERT(workToPhysId(group, bWorkId) == bPhysId); + ASMJIT_ASSERT(physToWorkId(group, aPhysId) == aWorkId); + ASMJIT_ASSERT(physToWorkId(group, bPhysId) == bWorkId); + ASMJIT_ASSERT(isPhysAssigned(group, aPhysId)); + ASMJIT_ASSERT(isPhysAssigned(group, bPhysId)); + + _workToPhysMap->physIds[aWorkId] = uint8_t(bPhysId); + _workToPhysMap->physIds[bWorkId] = uint8_t(aPhysId); + _physToWorkIds[group][aPhysId] = bWorkId; + _physToWorkIds[group][bPhysId] = aWorkId; + + uint32_t aMask = Support::bitMask(aPhysId); + uint32_t bMask = Support::bitMask(bPhysId); + + uint32_t flipMask = Support::bitMaskFromBool( + ((_physToWorkMap->dirty[group] & aMask) != 0) ^ + ((_physToWorkMap->dirty[group] & bMask) != 0)); + + uint32_t regMask = aMask | bMask; + _physToWorkMap->dirty[group] ^= regMask & flipMask; + + verify(); + } + + //! Unassign [VirtReg/WorkReg] from a physical register. + ASMJIT_INLINE void unassign(uint32_t group, uint32_t workId, uint32_t physId) noexcept { + ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); + ASMJIT_ASSERT(workToPhysId(group, workId) == physId); + ASMJIT_ASSERT(physToWorkId(group, physId) == workId); + ASMJIT_ASSERT(isPhysAssigned(group, physId)); + + _workToPhysMap->physIds[workId] = kPhysNone; + _physToWorkIds[group][physId] = kWorkNone; + + uint32_t regMask = Support::bitMask(physId); + _physToWorkMap->assigned[group] &= ~regMask; + _physToWorkMap->dirty[group] &= ~regMask; + + verify(); + } + + inline void makeClean(uint32_t group, uint32_t workId, uint32_t physId) noexcept { + ASMJIT_UNUSED(workId); + + uint32_t regMask = Support::bitMask(physId); + _physToWorkMap->dirty[group] &= ~regMask; + } + + inline void makeDirty(uint32_t group, uint32_t workId, uint32_t physId) noexcept { + ASMJIT_UNUSED(workId); + + uint32_t regMask = Support::bitMask(physId); + _physToWorkMap->dirty[group] |= regMask; + } + + //! \} + + //! \name Utilities + //! \{ + + inline void swap(RAAssignment& other) noexcept { + std::swap(_workToPhysMap, other._workToPhysMap); + std::swap(_physToWorkMap, other._physToWorkMap); + + for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) + std::swap(_physToWorkIds[group], other._physToWorkIds[group]); + } + + inline void copyFrom(const PhysToWorkMap* physToWorkMap, const WorkToPhysMap* workToPhysMap) noexcept { + memcpy(_physToWorkMap, physToWorkMap, PhysToWorkMap::sizeOf(_layout.physTotal)); + memcpy(_workToPhysMap, workToPhysMap, WorkToPhysMap::sizeOf(_layout.workCount)); + } + + inline void copyFrom(const RAAssignment& other) noexcept { + copyFrom(other.physToWorkMap(), other.workToPhysMap()); + } + + // Not really useful outside of debugging. + bool equals(const RAAssignment& other) const noexcept { + // Layout should always match. + if (_layout.physIndex != other._layout.physIndex || + _layout.physCount != other._layout.physCount || + _layout.physTotal != other._layout.physTotal || + _layout.workCount != other._layout.workCount || + _layout.workRegs != other._layout.workRegs) + return false; + + uint32_t physTotal = _layout.physTotal; + uint32_t workCount = _layout.workCount; + + for (uint32_t physId = 0; physId < physTotal; physId++) { + uint32_t thisWorkId = _physToWorkMap->workIds[physId]; + uint32_t otherWorkId = other._physToWorkMap->workIds[physId]; + if (thisWorkId != otherWorkId) + return false; + } + + for (uint32_t workId = 0; workId < workCount; workId++) { + uint32_t thisPhysId = _workToPhysMap->physIds[workId]; + uint32_t otherPhysId = other._workToPhysMap->physIds[workId]; + if (thisPhysId != otherPhysId) + return false; + } + + if (_physToWorkMap->assigned != other._physToWorkMap->assigned || + _physToWorkMap->dirty != other._physToWorkMap->dirty ) + return false; + + return true; + } + +#if defined(ASMJIT_BUILD_DEBUG) + ASMJIT_NOINLINE void verify() noexcept { + // Verify WorkToPhysMap. + { + for (uint32_t workId = 0; workId < _layout.workCount; workId++) { + uint32_t physId = _workToPhysMap->physIds[workId]; + if (physId != kPhysNone) { + const RAWorkReg* workReg = _layout.workRegs->at(workId); + uint32_t group = workReg->group(); + ASMJIT_ASSERT(_physToWorkIds[group][physId] == workId); + } + } + } + + // Verify PhysToWorkMap. + { + for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) { + uint32_t physCount = _layout.physCount[group]; + for (uint32_t physId = 0; physId < physCount; physId++) { + uint32_t workId = _physToWorkIds[group][physId]; + if (workId != kWorkNone) { + ASMJIT_ASSERT(_workToPhysMap->physIds[workId] == physId); + } + } + } + } + } +#else + inline void verify() noexcept {} +#endif + + //! \} +}; + +//! \} +//! \endcond + +ASMJIT_END_NAMESPACE + +#endif // !ASMJIT_NO_COMPILER +#endif // _ASMJIT_CORE_RAASSIGNMENT_P_H diff --git a/src/asmjit/core/rabuilders_p.h b/src/asmjit/core/rabuilders_p.h new file mode 100644 index 0000000..cb1eac8 --- /dev/null +++ b/src/asmjit/core/rabuilders_p.h @@ -0,0 +1,420 @@ +// [AsmJit] +// Machine Code Generation for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +#ifndef _ASMJIT_CORE_RABUILDERS_P_H +#define _ASMJIT_CORE_RABUILDERS_P_H + +#include "../core/build.h" +#ifndef ASMJIT_NO_COMPILER + +#include "../core/rapass_p.h" + +ASMJIT_BEGIN_NAMESPACE + +//! \cond INTERNAL +//! \addtogroup asmjit_ra +//! \{ + +// ============================================================================ +// [asmjit::RACFGBuilder] +// ============================================================================ + +template +class RACFGBuilder { +public: + RAPass* _pass; + BaseCompiler* _cc; + RABlock* _curBlock; + RABlock* _retBlock; + + // NOTE: This is a bit hacky. There are some nodes which are processed twice + // (see `onBeforeCall()` and `onBeforeRet()`) as they can insert some nodes + // around them. Since we don't have any flags to mark these we just use their + // position that is [at that time] unassigned. + static constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu; + + inline RACFGBuilder(RAPass* pass) noexcept + : _pass(pass), + _cc(pass->cc()), + _curBlock(nullptr), + _retBlock(nullptr) {} + + inline BaseCompiler* cc() const noexcept { return _cc; } + + Error run() noexcept { + #ifndef ASMJIT_NO_LOGGING + Logger* logger = _pass->debugLogger(); + uint32_t flags = FormatOptions::kFlagPositions; + RABlock* lastPrintedBlock = nullptr; + StringTmp<512> sb; + #endif + + ASMJIT_RA_LOG_FORMAT("[RAPass::BuildCFG]\n"); + + FuncNode* func = _pass->func(); + BaseNode* node = nullptr; + + // Create entry and exit blocks. + _retBlock = _pass->newBlockOrExistingAt(func->exitNode(), &node); + if (ASMJIT_UNLIKELY(!_retBlock)) + return DebugUtils::errored(kErrorOutOfMemory); + ASMJIT_PROPAGATE(_pass->addExitBlock(_retBlock)); + + if (node != func) { + _curBlock = _pass->newBlock(); + if (ASMJIT_UNLIKELY(!_curBlock)) + return DebugUtils::errored(kErrorOutOfMemory); + } + else { + // Function that has no code at all. + _curBlock = _retBlock; + } + + ASMJIT_PROPAGATE(_pass->addBlock(_curBlock)); + + RARegsStats blockRegStats; + blockRegStats.reset(); + RAInstBuilder ib; + + bool hasCode = false; + uint32_t exitLabelId = func->exitNode()->id(); + + ASMJIT_RA_LOG_COMPLEX({ + flags |= logger->flags(); + + Logging::formatNode(sb, flags, cc(), func); + logger->logf(" %s\n", sb.data()); + + lastPrintedBlock = _curBlock; + logger->logf(" {#%u}\n", lastPrintedBlock->blockId()); + }); + + node = func->next(); + if (ASMJIT_UNLIKELY(!node)) + return DebugUtils::errored(kErrorInvalidState); + + _curBlock->setFirst(node); + _curBlock->setLast(node); + + for (;;) { + BaseNode* next = node->next(); + ASMJIT_ASSERT(node->position() == 0 || node->position() == kNodePositionDidOnBefore); + + if (node->isInst()) { + if (ASMJIT_UNLIKELY(!_curBlock)) { + // If this code is unreachable then it has to be removed. + ASMJIT_RA_LOG_COMPLEX({ + sb.clear(); + Logging::formatNode(sb, flags, cc(), node); + logger->logf(" %s\n", sb.data()); + }); + cc()->removeNode(node); + node = next; + continue; + } + else { + // Handle `InstNode`, `FuncCallNode`, and `FuncRetNode`. All of + // these share the `InstNode` interface and contain operands. + hasCode = true; + + if (node->type() != BaseNode::kNodeInst) { + if (node->position() != kNodePositionDidOnBefore) { + // Call and Reg are complicated as they may insert some surrounding + // code around them. The simplest approach is to get the previous + // node, call the `onBefore()` handlers and then check whether + // anything changed and restart if so. By restart we mean that the + // current `node` would go back to the first possible inserted node + // by `onBeforeCall()` or `onBeforeRet()`. + BaseNode* prev = node->prev(); + if (node->type() == BaseNode::kNodeFuncCall) { + ASMJIT_PROPAGATE(static_cast(this)->onBeforeCall(node->as())); + } + else if (node->type() == BaseNode::kNodeFuncRet) { + ASMJIT_PROPAGATE(static_cast(this)->onBeforeRet(node->as())); + } + + if (prev != node->prev()) { + // If this was the first node in the block and something was + // inserted before it then we have to update the first block. + if (_curBlock->first() == node) + _curBlock->setFirst(prev->next()); + + node->setPosition(kNodePositionDidOnBefore); + node = prev->next(); + + // `onBeforeCall()` and `onBeforeRet()` can only insert instructions. + ASMJIT_ASSERT(node->isInst()); + } + + // Necessary if something was inserted after `node`, but nothing before. + next = node->next(); + } + else { + // Change the position back to its original value. + node->setPosition(0); + } + } + + InstNode* inst = node->as(); + ASMJIT_RA_LOG_COMPLEX({ + sb.clear(); + Logging::formatNode(sb, flags, cc(), node); + logger->logf(" %s\n", sb.data()); + }); + + uint32_t controlType = BaseInst::kControlNone; + ib.reset(); + ASMJIT_PROPAGATE(static_cast(this)->onInst(inst, controlType, ib)); + + if (node->type() != BaseNode::kNodeInst) { + if (node->type() == BaseNode::kNodeFuncCall) { + ASMJIT_PROPAGATE(static_cast(this)->onCall(inst->as(), ib)); + } + else if (node->type() == BaseNode::kNodeFuncRet) { + ASMJIT_PROPAGATE(static_cast(this)->onRet(inst->as(), ib)); + controlType = BaseInst::kControlReturn; + } + } + + ASMJIT_PROPAGATE(_pass->assignRAInst(inst, _curBlock, ib)); + blockRegStats.combineWith(ib._stats); + + if (controlType != BaseInst::kControlNone) { + // Support for conditional and unconditional jumps. + if (controlType == BaseInst::kControlJump || controlType == BaseInst::kControlBranch) { + _curBlock->setLast(node); + _curBlock->addFlags(RABlock::kFlagHasTerminator); + _curBlock->makeConstructed(blockRegStats); + + if (!(inst->instOptions() & BaseInst::kOptionUnfollow)) { + // Jmp/Jcc/Call/Loop/etc... + uint32_t opCount = inst->opCount(); + const Operand* opArray = inst->operands(); + + // The last operand must be label (this supports also instructions + // like jecx in explicit form). + if (ASMJIT_UNLIKELY(opCount == 0 || !opArray[opCount - 1].isLabel())) + return DebugUtils::errored(kErrorInvalidState); + + LabelNode* cbLabel; + ASMJIT_PROPAGATE(cc()->labelNodeOf(&cbLabel, opArray[opCount - 1].as