Merged asmjit:next branch (#149)

This commit is contained in:
Petr Kobalicek
2017-01-26 15:55:03 +01:00
committed by GitHub
parent fb9f82cb61
commit b7f6d1e369
116 changed files with 52554 additions and 52966 deletions

78
.appveyor.yml Normal file
View File

@@ -0,0 +1,78 @@
version: "{build}"
image: Visual Studio 2015
clone_folder: c:\dev\asmjit
environment:
matrix:
- BUILD_TYPE: Debug
MINGW_PATH: C:\msys64\mingw64
TOOLCHAIN: "MinGW Makefiles"
- BUILD_TYPE: Release
MINGW_PATH: C:\msys64\mingw64
TOOLCHAIN: "MinGW Makefiles"
- BUILD_TYPE: Debug
TOOLCHAIN: "Visual Studio 10 2010"
- BUILD_TYPE: Release
TOOLCHAIN: "Visual Studio 10 2010"
- BUILD_TYPE: Debug
TOOLCHAIN: "Visual Studio 10 2010 Win64"
- BUILD_TYPE: Release
TOOLCHAIN: "Visual Studio 10 2010 Win64"
- BUILD_TYPE: Debug
TOOLCHAIN: "Visual Studio 12 2013"
- BUILD_TYPE: Release
TOOLCHAIN: "Visual Studio 12 2013"
- BUILD_TYPE: Debug
TOOLCHAIN: "Visual Studio 12 2013 Win64"
- BUILD_TYPE: Release
TOOLCHAIN: "Visual Studio 12 2013 Win64"
- BUILD_TYPE: Debug
TOOLCHAIN: "Visual Studio 14 2015"
- BUILD_TYPE: Release
TOOLCHAIN: "Visual Studio 14 2015"
- BUILD_TYPE: Debug
TOOLCHAIN: "Visual Studio 14 2015 Win64"
- BUILD_TYPE: Release
TOOLCHAIN: "Visual Studio 14 2015 Win64"
install:
- if "%TOOLCHAIN%"=="MinGW Makefiles" set PATH=%PATH:C:\Program Files\Git\usr\bin;=%
- if "%TOOLCHAIN%"=="MinGW Makefiles" set PATH=%MINGW_PATH%\bin;%PATH%
build_script:
- cd c:\dev\asmjit
- md build
- cd build
- if "%TOOLCHAIN%"=="MinGW Makefiles" (
cmake .. -G"%TOOLCHAIN%" -DCMAKE_PREFIX_PATH="%MINGW_PATH%" -DCMAKE_BUILD_TYPE="%BUILD_TYPE%" -DASMJIT_BUILD_TEST=1 &&
mingw32-make
)
else (
cmake .. -G"%TOOLCHAIN%" -DASMJIT_BUILD_TEST=1 &&
msbuild /m /nologo /v:quiet /p:Configuration=%BUILD_TYPE% asmjit.sln
)
test_script:
- if "%TOOLCHAIN%"=="MinGW Makefiles" (
cd c:\dev\asmjit\build
)
else (
cd c:\dev\asmjit\build\%BUILD_TYPE%
)
- asmjit_test_unit.exe
- asmjit_test_x86_asm.exe
- asmjit_test_x86_cc.exe

4
.gitignore vendored
View File

@@ -1 +1,5 @@
.kdev4
*.kdev4
build
build_*
tools/asmdb

View File

@@ -1,12 +1,12 @@
language: cpp
os:
- linux
- osx
os: [linux, osx]
compiler: [gcc, clang]
compiler:
- clang
- gcc
addons:
apt:
packages: [cmake, gcc-multilib, g++-multilib, valgrind]
sources: [ubuntu-toolchain-r-test]
env:
matrix:
@@ -20,20 +20,16 @@ matrix:
- os: osx
compiler: gcc
before_install:
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
CMAKE_PACKAGE="cmake-3.3.2-Linux-x86_64"
&& wget https://www.cmake.org/files/v3.3/${CMAKE_PACKAGE}.tar.gz --no-check-certificate
&& sudo apt-get -qq update
&& sudo apt-get -qq install gcc-multilib g++-multilib valgrind
&& tar -xzf ${CMAKE_PACKAGE}.tar.gz
&& sudo cp -fR ${CMAKE_PACKAGE}/* /usr
;
install:
- |
if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then
CMAKE_PACKAGE="https://cmake.org/files/v3.6/cmake-3.6.2-Linux-x86_64.tar.gz"
mkdir -p deps/cmake
wget --no-check-certificate --quiet -O - ${CMAKE_PACKAGE} | tar --strip-components=1 -xz -C deps/cmake
export PATH=${TRAVIS_BUILD_DIR}/deps/cmake/bin:${PATH}
else
brew update
&& brew unlink cmake
&& brew install -v cmake
;
brew update
brew outdated cmake || brew upgrade cmake
fi
before_script:
@@ -49,7 +45,11 @@ script:
- cd ..
- ./build/asmjit_test_unit
- ./build/asmjit_test_x86
- ./build/asmjit_test_opcode > /dev/null
- ./build/asmjit_test_x86_asm
- ./build/asmjit_test_x86_cc
after_success:
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then valgrind --leak-check=full --show-reachable=yes ./build/asmjit_test_unit; fi;
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then valgrind --leak-check=full --show-reachable=yes ./build/asmjit_test_x86_asm; fi;
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then valgrind --leak-check=full --show-reachable=yes ./build/asmjit_test_x86_cc; fi;

View File

@@ -1,86 +1,75 @@
2016-03-21
CpuInfo has been completely redesigned. It now supports multiple CPUs without having to inherit it to support a specific architecture. Also all CpuInfo-related constants have been moved to CpuInfo.
Change:
```
const X86CpuInfo* cpu = X86CpuInfo::getHost();
cpu->hasFeature(kX86CpuFeatureSSE4_1);
```
to
```
const CpuInfo& cpu = CpuInfo::getHost();
cpu.hasFeature(CpuInfo::kX86FeatureSSE4_1);
```
The whole code-base now uses `noexcept` keyword to inform API users that these functions won't throw an exception. Moreover, the possibility to throw exception through `ErrorHandler` has been removed as it seems that nobody has ever used it. `Assembler::emit()` and friends are still not marked as `noexcept` in case this decision is taken back. If there is no complaint even `emit()` functions will be marked `noexcept` in the near future.
2015-12-07
2016-07-20
----------
Compiler now attaches to Assembler. This change was required to create resource sharing where Assembler is the central part and Compiler is a "high-level" part that serializes to it. It's an incremental work to implement sections and to allow code generators to create executables and libraries.
* Global `asmjit_cast<>` removed and introduced a more type-safe `asmjit::ptr_cast<>`, which can cast a function to `void*` (and vice-versa), but will refuse to cast a function to `void**`, for example. Just change `asmjit_cast` to `asmjit::ptr_cast` and everything should work as usual. As a consequence, the Runtime now contains a typesafe (templated) `add()` and `remove()` methods that accept a function type directly, no need to cast manually to `void*` and `void**`. If you use your own runtime rename your virtual methods from `add` to `_add` and from `release` to `_release` and enjoy the type-safe wrappers.
* Removed `Logger::Style` and `uint32_t style` parameter in Logging API. It was never used for anything so it was removed.
* There is a new `CodeEmitter` base class that defines assembler building blocks that are implemented by `Assembler` and `CodeBuilder`. `CodeCompiler` is now based on `CodeBuilder` and shares its instruction storage functionality. Most API haven't changed, just base classes and new functionality has been added. It's now possible to serialize code for further processing by using `CodeBuilder`.
* Renamed compile-time macro `ASMJIT_DISABLE_LOGGER` to `ASMJIT_DISABLE_LOGGING`. There is a new `Formatter` class which is also disabled with this option.
Also, Compiler has no longer Logger interface, it uses Assembler's one after it's attached to it.
* Operand API is mostly intact, omitting Var/Reg should fix most compile-time errors. There is now no difference between a register index and register id internally. If you ever used `reg.getRegIndex()` then use `reg.getId()` instead. Also renamed `isInitialized()` to `isValid()`.
* There are much more changes, but they are mostly internal and keeping most operand methods compatible.
* Added new functionality into `asmjit::x86` namespace related to operands.
* X86Xmm/X86Ymm/X86Zmm register operands now inherit from X86Vec.
* Register kind (was register class) is now part of `Reg` operand, you can get it by using `reg.getRegKind()`.
* Register class enum moved to `X86Reg`, `kX86RegClassGp` is now `X86Reg::kKindGp`.
* Register type enum moved to `X86Reg`, `kX86RegTypeXmm` is now `X86Reg::kRegXmm`.
* Register index enum moved to `X86Gp`, `kX86RegIndexAx` is now `X86Gp::kIdAx`.
* Segment index enum moved to `X86Seg`, `kX86SegFs` is now `X86Seg::kIdFs`.
* If you used `asmjit::noOperand` for any reason, change it to `Operand()`.
```
JitRuntime runtime;
X86Compiler c(&runtime);
* CodeBuilder and CodeCompiler now contain different prefix of their nodes to distinguish between them:
// ... code generation ...
* Rename `HLNode` to `CBNode` (CodeBuilder node).
* Rename all other `HL` to `CB`.
* Rename `X86FuncNode` to `CCFunc` (CodeCompiler function), no more arch specific prefixes here.
* Rename `X86CallNode` to `CCFuncCall` (CodeCompiler function-call), also, no more X86 prefix.
void* p = c.make();
* AsmJit now uses CodeHolder to hold code. You don't need `Runtime` anymore if you don't plan to execute the code or if you plan to relocate it yourself:
```c++
CodeHolder code; // Create CodeHolder (holds the code).
code.init(CodeInfo(ArchInfo::kIdX64)); // Initialize CodeHolder to hold X64 code.
// Everything else as usual:
X86Assembler a(&code); // Create the emitter (Assembler, CodeBuilder, CodeCompiler).
```
to
* Initializing with JitRuntime involves using CodeHolder:
```
JitRuntime runtime;
X86Assembler a(&runtime);
X86Compiler c(&a);
```c++
JitRuntime rt; // Create JitRuntime.
// ... code generation ...
CodeHolder code; // Create CodeHolder.
code.init(rt.getCodeInfo()); // Initialize CodeHolder to match the JitRuntime.
c.finalize();
void* p = a.make();
X86Assembler a(&code); // Create the emitter (Assembler, CodeBuilder, CodeCompiler).
... // Generate some code.
typedef void (*SomeFunc)(void); // Prototype of the function you generated.
SomeFunc func; // Function pointer.
Error err = rt.add(&func, &code); // Add the generated function to the runtime.
rt.remove(func); // Remove the generated function from the runtime.
```
All nodes were prefixed with HL, except for platform-specific nodes, change:
* Merged virtual registers (known as variables or Vars) into registers themselves, making the interface simpler:
```
Node -> HLNode
FuncNode -> HLFunc
X86FuncNode -> X86Func
X86CallNode -> X86Call
```c++
X86GpReg/X86GpVar merged to X86Gp
X86MmReg/X86MmVar merged to X86Mm
X86XmmReg/X86XmmVar merged to X86Xmm
X86YmmReg/X86YmmVar merged to X86Ymm
```
`FuncConv` renamed to `CallConv` and is now part of a function prototype, change:
* Refactored instruction database, moved many enums related to instructions into `X86Inst`. Also some instructions were wrong (having wrong signature in Assembler and Compiler) and were fixed.
```c++
X86InstInfo renamed to X86Inst
kX86InstIdSomething renamed to X86Inst::kIdSomething
kX86InstOptionSomething renamed to X86Inst::kOptionSomething
kX86CondSomething renamed to X86Inst::kCondSomething
kX86CmpSomething renamed to X86Inst::kCmpSomething
kX86VCmpSomething renamed to X86Inst::kVCmpSomething
kX86PrefetchSomething renamed to X86Inst::kPrefetchSomething
```
compiler.addFunc(kFuncConvHost, FuncBuilder0<Void>());
```
to
```
compiler.addFunc(FuncBuilder0<Void>(kCallConvHost));
```
Operand constructors that accept Assembler or Compiler are deprecated. Variables can now be created by using handy shortcuts like newInt32(), newIntPtr(), newXmmPd(), etc... Change:
```
X86Compiler c(...);
Label L(c);
X86GpVar x(c, kVarTypeIntPtr, "x");
```
to
```
X86Compiler c(...);
Label L = c.newLabel();
X86GpVar x = c.newIntPtr("x");
```

View File

@@ -1,150 +1,61 @@
cmake_minimum_required(VERSION 3.1)
include(CheckCXXCompilerFlag)
# Don't create a project if it was already created by another CMakeLists.txt.
# This allows one library to embed another library without a project collision.
if(NOT CMAKE_PROJECT_NAME OR "${CMAKE_PROJECT_NAME}" STREQUAL "asmjit")
project(asmjit C CXX)
endif()
# =============================================================================
# [AsmJit - Configuration]
# =============================================================================
# Embedding mode, asmjit will not create any targets (default FALSE).
# set(ASMJIT_EMBED FALSE)
set(ASMJIT_DIR "${CMAKE_CURRENT_LIST_DIR}" CACHE PATH "Location of 'asmjit'")
# Whether to build a static library (default FALSE).
# set(ASMJIT_STATIC FALSE)
# Used for debugging asmjit itself (default FALSE).
# set(ASMJIT_TRACE FALSE)
# Whether to build ARM32 backend (TRUE if building for ARM32).
# set(ASMJIT_BUILD_ARM32 FALSE)
# Whether to build ARM64 backend (TRUE if building for ARM64).
# set(ASMJIT_BUILD_ARM64 FALSE)
# Whether to build X86 backend (TRUE if building for X86).
# set(ASMJIT_BUILD_X86 FALSE)
# Whether to build X64 backend (TRUE if building for X64).
# set(ASMJIT_BUILD_X64 FALSE)
# Whether to build tests and samples (default FALSE).
# set(ASMJIT_BUILD_TEST FALSE)
set(ASMJIT_EMBED FALSE CACHE BOOLEAN "Embed 'asmjit' library (no targets)")
set(ASMJIT_STATIC FALSE CACHE BOOLEAN "Build 'asmjit' library as static")
set(ASMJIT_BUILD_ARM FALSE CACHE BOOLEAN "Build ARM32/ARM64 backends")
set(ASMJIT_BUILD_X86 FALSE CACHE BOOLEAN "Build X86/X64 backends")
set(ASMJIT_BUILD_TEST FALSE CACHE BOOLEAN "Build 'asmjit_test' applications")
# =============================================================================
# [AsmJit - Build / Embed]
# [AsmJit - Project]
# =============================================================================
# Do not create a project if this CMakeLists.txt is included from another
# project. This makes it easy to embed or create a static library.
if(NOT CMAKE_PROJECT_NAME OR "${CMAKE_PROJECT_NAME}" MATCHES "^asmjit$")
project(asmjit C CXX)
set(ASMJIT_SIGNATURE "Standalone")
else()
set(ASMJIT_SIGNATURE "Included")
endif()
if(ASMJIT_EMBED)
set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Mode=Embed")
set(ASMJIT_STATIC TRUE) # Implies ASMJIT_STATIC.
elseif(ASMJIT_STATIC)
set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Mode=Static")
else()
set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Mode=Shared")
endif()
if(ASMJIT_BUILD_TEST)
set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Test=On")
else()
set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Test=Off")
endif()
if(NOT ASMJIT_DIR)
set(ASMJIT_DIR ${CMAKE_CURRENT_LIST_DIR})
endif()
message("-- [asmjit] ${ASMJIT_SIGNATURE}")
message("-- [asmjit] ASMJIT_DIR=${ASMJIT_DIR}")
# =============================================================================
# [NP-Utilities]
# =============================================================================
function(np_detect_options out)
set(out_array)
foreach(flag ${ARGN})
check_cxx_compiler_flag("${flag}" ok)
if(ok)
list(APPEND out_array "${flag}")
endif()
unset(ok)
endforeach()
set(${out} "${out_array}" PARENT_SCOPE)
endfunction()
# =============================================================================
# [AsmJit - Flags / Deps]
# =============================================================================
set(ASMJIT_SOURCE_DIR "${ASMJIT_DIR}/src") # Asmjit source directory.
set(ASMJIT_INCLUDE_DIR "${ASMJIT_SOURCE_DIR}") # Asmjit include directory.
set(ASMJIT_CFLAGS) # Asmjit CFLAGS / CXXFLAGS.
set(ASMJIT_DEPS) # Asmjit dependencies (list of libraries) for the linker.
set(ASMJIT_LIBS) # Asmjit dependencies with asmjit included, for consumers.
# Internal, never use.
set(ASMJIT_D "-D") # Used to define a C/C++ preprocessor parameter (-D or /D).
set(ASMJIT_PRIVATE_CFLAGS) # Compiler flags independent of build type.
set(ASMJIT_PRIVATE_LFLAGS "") # Linker flags used by the library and tests.
set(ASMJIT_PRIVATE_CFLAGS_DBG) # Compiler flags used only by debug build.
set(ASMJIT_PRIVATE_CFLAGS_REL) # Compiler flags used only by release build.
include("${ASMJIT_DIR}/CxxProject.cmake")
cxx_project(asmjit)
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
set(ASMJIT_D "/D")
set(ASMJIT_PRIVATE_LFLAGS "/OPT:REF /OPT:ICF")
list(APPEND ASMJIT_PRIVATE_CFLAGS /GF)
list(APPEND ASMJIT_PRIVATE_CFLAGS_DBG /GS /GR-)
list(APPEND ASMJIT_PRIVATE_CFLAGS_REL /Oi /Oy /GS- /GR-)
# Enable multi-process compilation.
if(NOT MSVC60 AND NOT MSVC70 AND NOT MSVC71)
list(APPEND ASMJIT_PRIVATE_CFLAGS /MP) # Enable multi-process compilation.
list(APPEND ASMJIT_PRIVATE_CFLAGS /MP)
endif()
endif()
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(GNU|Clang)$")
# Keep only the first option detected.
np_detect_options(ASMJIT_CC_OPTIONS
"-std=c++14"
"-std=c++11"
"-std=c++0x")
if(ASMJIT_CC_OPTIONS)
list(GET ASMJIT_CC_OPTIONS 0 ASMJIT_CC_OPTIONS)
list(APPEND ASMJIT_PRIVATE_CFLAGS ${ASMJIT_CC_OPTIONS})
endif()
np_detect_options(ASMJIT_CC_OPTIONS
"-fno-exceptions"
cxx_detect_standard(ASMJIT_PRIVATE_CFLAGS)
cxx_detect_cflags(ASMJIT_PRIVATE_CFLAGS
"-fno-tree-vectorize"
"-fvisibility=hidden")
list(APPEND ASMJIT_PRIVATE_CFLAGS ${ASMJIT_CC_OPTIONS})
list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -fmerge-all-constants)
unset(ASMJIT_CC_OPTIONS)
endif()
if(ASMJIT_EMBED)
list(APPEND ASMJIT_PRIVATE_CFLAGS "${ASMJIT_D}ASMJIT_EMBED")
elseif(ASMJIT_STATIC)
list(APPEND ASMJIT_PRIVATE_CFLAGS "${ASMJIT_D}ASMJIT_STATIC")
"-fvisibility=hidden"
"-Winconsistent-missing-override")
cxx_detect_cflags(ASMJIT_PRIVATE_CFLAGS_REL
"-O2" # CMake by default uses -O3, which does nothing useful.
"-fno-keep-static-consts"
"-fmerge-all-constants")
endif()
if(ASMJIT_TRACE)
list(APPEND ASMJIT_PRIVATE_CFLAGS "${ASMJIT_D}ASMJIT_TRACE")
list(APPEND ASMJIT_PRIVATE_CFLAGS "${CXX_DEFINE}ASMJIT_TRACE")
endif()
if(WIN32)
list(APPEND ASMJIT_PRIVATE_CFLAGS "${ASMJIT_D}_UNICODE")
list(APPEND ASMJIT_PRIVATE_CFLAGS "${CXX_DEFINE}_UNICODE")
else()
list(APPEND ASMJIT_DEPS pthread)
endif()
@@ -158,72 +69,21 @@ if(NOT ASMJIT_EMBED)
list(INSERT ASMJIT_LIBS 0 asmjit)
endif()
if(ASMJIT_BUILD_ARM32)
List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_ARM32")
endif()
if(ASMJIT_BUILD_ARM64)
List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_ARM64")
endif()
if(ASMJIT_BUILD_X86)
List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_X86")
endif()
if(ASMJIT_BUILD_X64)
List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_X64")
endif()
set(ASMJIT_PRIVATE_CFLAGS_DBG ${ASMJIT_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_DBG})
set(ASMJIT_PRIVATE_CFLAGS_REL ${ASMJIT_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_REL})
message("-- [asmjit] ASMJIT_DEPS=${ASMJIT_DEPS}")
message("-- [asmjit] ASMJIT_LIBS=${ASMJIT_LIBS}")
message("-- [asmjit] ASMJIT_CFLAGS=${ASMJIT_CFLAGS}")
# =============================================================================
# [AsmJit - Macros]
# =============================================================================
macro(asmjit_add_source _out_dst _src_dir)
set(_src_path "${ASMJIT_SOURCE_DIR}/${_src_dir}")
set(_src_list)
foreach(_arg ${ARGN})
set(_src_file "${_src_path}/${_arg}")
list(APPEND _src_list ${_src_file})
endforeach()
list(APPEND "${_out_dst}" ${_src_list})
source_group(${_src_dir} FILES ${_src_list})
endmacro()
macro(asmjit_add_library _target _src _deps _cflags _cflags_dbg _cflags_rel)
if(NOT ASMJIT_STATIC)
add_library(${_target} SHARED ${_src})
else()
add_library(${_target} STATIC ${_src})
foreach(BUILD_OPTION
ASMJIT_BUILD_ARM
ASMJIT_BUILD_X86
ASMJIT_DISABLE_BUILDER
ASMJIT_DISABLE_COMPILER
ASMJIT_DISABLE_TEXT
ASMJIT_DISABLE_LOGGING
ASMJIT_DISABLE_VALIDATION)
if(${BUILD_OPTION})
List(APPEND ASMJIT_CFLAGS "${CXX_DEFINE}${BUILD_OPTION}")
List(APPEND ASMJIT_PRIVATE_CFLAGS "${CXX_DEFINE}${BUILD_OPTION}")
endif()
endforeach()
target_link_libraries(${_target} ${_deps})
set_target_properties(${_target} PROPERTIES LINK_FLAGS "${ASMJIT_PRIVATE_LFLAGS}")
if(CMAKE_BUILD_TYPE)
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
target_compile_options(${_target} PRIVATE ${_cflags} ${_cflags_dbg})
else()
target_compile_options(${_target} PRIVATE ${_cflags} ${_cflags_rel})
endif()
else()
target_compile_options(${_target} PRIVATE ${_cflags}
$<$<CONFIG:Debug>:${_cflags_dbg}>
$<$<NOT:$<CONFIG:Debug>>:${_cflags_rel}>)
endif()
if(NOT ASMJIT_STATIC)
install(TARGETS ${_target} DESTINATION "lib${LIB_SUFFIX}")
endif()
endmacro()
cxx_project_info(asmjit)
# =============================================================================
# [AsmJit - Source]
@@ -231,48 +91,53 @@ endmacro()
set(ASMJIT_SRC "")
asmjit_add_source(ASMJIT_SRC asmjit
apibegin.h
apiend.h
cxx_add_source(asmjit ASMJIT_SRC asmjit
asmjit.h
asmjit_apibegin.h
asmjit_apiend.h
asmjit_build.h
base.h
build.h
host.h
arm.h
x86.h
)
asmjit_add_source(ASMJIT_SRC asmjit/base
cxx_add_source(asmjit ASMJIT_SRC asmjit/base
arch.cpp
arch.h
assembler.cpp
assembler.h
compiler.cpp
compiler.h
compilercontext.cpp
compilercontext_p.h
compilerfunc.h
codebuilder.cpp
codebuilder.h
codecompiler.cpp
codecompiler.h
codeemitter.cpp
codeemitter.h
codeholder.cpp
codeholder.h
constpool.cpp
constpool.h
containers.cpp
containers.h
cpuinfo.cpp
cpuinfo.h
func.cpp
func.h
globals.cpp
globals.h
hlstream.cpp
hlstream.h
logger.cpp
logger.h
logging.cpp
logging.h
misc_p.h
operand.cpp
operand.h
podvector.cpp
podvector.h
osutils.cpp
osutils.h
regalloc.cpp
regalloc_p.h
runtime.cpp
runtime.h
simdtypes.h
string.cpp
string.h
utils.cpp
utils.h
vectypes.h
vmem.cpp
vmem.h
zone.cpp
@@ -280,7 +145,7 @@ asmjit_add_source(ASMJIT_SRC asmjit/base
)
if(0)
asmjit_add_source(ASMJIT_SRC asmjit/arm
cxx_add_source(asmjit ASMJIT_SRC asmjit/arm
armassembler.cpp
armassembler.h
arminst.cpp
@@ -291,20 +156,29 @@ asmjit_add_source(ASMJIT_SRC asmjit/arm
)
endif()
asmjit_add_source(ASMJIT_SRC asmjit/x86
cxx_add_source(asmjit ASMJIT_SRC asmjit/x86
x86assembler.cpp
x86assembler.h
x86builder.cpp
x86builder.h
x86compiler.cpp
x86compiler.h
x86compilercontext.cpp
x86compilercontext_p.h
x86compilerfunc.cpp
x86compilerfunc.h
x86emitter.h
x86globals.h
x86internal.cpp
x86internal_p.h
x86inst.cpp
x86inst.h
x86logging.cpp
x86logging_p.h
x86misc.h
x86operand.cpp
x86operand_regs.cpp
x86operand.h
x86regalloc.cpp
x86regalloc_p.h
x86ssetoavxpass.cpp
x86ssetoavxpass_p.h
)
# =============================================================================
@@ -313,13 +187,12 @@ asmjit_add_source(ASMJIT_SRC asmjit/x86
if(NOT ASMJIT_EMBED)
# Add `asmjit` library.
asmjit_add_library(asmjit
cxx_add_library(asmjit asmjit
"${ASMJIT_SRC}"
"${ASMJIT_DEPS}"
""
"${ASMJIT_PRIVATE_CFLAGS}"
"${ASMJIT_PRIVATE_CFLAGS_DBG}"
"${ASMJIT_PRIVATE_CFLAGS_REL}"
)
"${ASMJIT_PRIVATE_CFLAGS_REL}")
foreach(_src_file ${ASMJIT_SRC})
get_filename_component(_src_dir ${_src_file} PATH)
@@ -334,30 +207,16 @@ if(NOT ASMJIT_EMBED)
# Add `asmjit` tests and samples.
if(ASMJIT_BUILD_TEST)
set(ASMJIT_TEST_SRC "")
set(ASMJIT_TEST_CFLAGS ${ASMJIT_CFLAGS} ${ASMJIT_D}ASMJIT_TEST ${ASMJIT_D}ASMJIT_EMBED)
asmjit_add_source(ASMJIT_TEST_SRC test asmjit_test_unit.cpp broken.cpp broken.h)
cxx_add_source(asmjit ASMJIT_TEST_SRC ../test asmjit_test_unit.cpp broken.cpp broken.h)
cxx_add_executable(asmjit asmjit_test_unit
"${ASMJIT_SRC};${ASMJIT_TEST_SRC}"
"${ASMJIT_DEPS}"
"${ASMJIT_PRIVATE_CFLAGS};${CXX_DEFINE}ASMJIT_TEST;${CXX_DEFINE}ASMJIT_EMBED"
"${ASMJIT_PRIVATE_CFLAGS_DBG}"
"${ASMJIT_PRIVATE_CFLAGS_REL}")
add_executable(asmjit_test_unit ${ASMJIT_SRC} ${ASMJIT_TEST_SRC})
target_link_libraries(asmjit_test_unit ${ASMJIT_DEPS})
set_target_properties(asmjit_test_unit PROPERTIES LINK_FLAGS "${ASMJIT_PRIVATE_LFLAGS}")
if(CMAKE_BUILD_TYPE)
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
target_compile_options(asmjit_test_unit PRIVATE ${ASMJIT_TEST_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_DBG})
else()
target_compile_options(asmjit_test_unit PRIVATE ${ASMJIT_TEST_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_REL})
endif()
else()
target_compile_options(asmjit_test_unit PRIVATE ${ASMJIT_TEST_CFLAGS}
$<$<CONFIG:Debug>:${ASMJIT_PRIVATE_CFLAGS_DBG}>
$<$<NOT:$<CONFIG:Debug>>:${ASMJIT_PRIVATE_CFLAGS_REL}>)
endif()
foreach(_target asmjit_bench_x86 asmjit_test_opcode asmjit_test_x86)
add_executable(${_target} "src/test/${_target}.cpp")
target_compile_options(${_target} PRIVATE ${ASMJIT_CFLAGS})
target_link_libraries(${_target} ${ASMJIT_LIBS})
foreach(_target asmjit_bench_x86 asmjit_test_opcode asmjit_test_x86_asm asmjit_test_x86_cc)
cxx_add_executable(asmjit ${_target} "test/${_target}.cpp" "${ASMJIT_LIBS}" "${ASMJIT_CFLAGS}" "" "")
endforeach()
endif()
endif()

335
CxxProject.cmake Normal file
View File

@@ -0,0 +1,335 @@
# CxxProject 1.0.0
# ----------------
if (NOT __CXX_INCLUDED)
set(__CXX_INCLUDED TRUE)
include(CheckCXXCompilerFlag)
# ---------------------------------------------------------------------------
# C++ COMPILER SUPPORT:
#
# * cxx_detect_cflags(out, ...)
# * cxx_detect_standard(out)
# ---------------------------------------------------------------------------
function(cxx_detect_cflags out)
set(out_array ${${out}})
foreach(flag ${ARGN})
string(REGEX REPLACE "[-=:;/.]" "_" flag_signature "${flag}")
check_cxx_compiler_flag(${flag} "__CxxFlag_${flag_signature}")
if(${__CxxFlag_${flag_signature}})
list(APPEND out_array "${flag}")
endif()
endforeach()
set(${out} "${out_array}" PARENT_SCOPE)
endfunction()
function(cxx_detect_standard out)
set(out_array)
cxx_detect_cflags(out_array "-std=c++14" "-std=c++11" "-std=c++0x")
# Keep only the first flag detected, which keeps the highest version supported.
if(out_array)
list(GET out_array 0 out_array)
endif()
set(out_array ${${out}} ${out_array})
set(${out} "${out_array}" PARENT_SCOPE)
endfunction()
function(cxx_print_cflags cflags_any cflags_dbg cflags_rel)
foreach(flag ${cflags_any})
message(" ${flag}")
endforeach()
foreach(flag ${cflags_dbg})
message(" ${flag} [DEBUG]")
endforeach()
foreach(flag ${cflags_rel})
message(" ${flag} [RELEASE]")
endforeach()
endfunction()
# -----------------------------------------------------------------------------
# This part detects the c++ compiler and fills basic CXX_... variables to make
# integration with that compiler easier. It provides the most common flags in
# a cross-platform way.
# -----------------------------------------------------------------------------
set(CXX_DEFINE "-D") # Define a preprocessor macro: "${CXX_DEFINE}VAR=1"
set(CXX_INCLUDE "-I") # Define an include directory: "${CXX_INCLUDE}PATH"
set(CXX_CFLAGS_SSE "") # Compiler flags to build a file that uses SSE intrinsics.
set(CXX_CFLAGS_SSE2 "") # Compiler flags to build a file that uses SSE2 intrinsics.
set(CXX_CFLAGS_SSE3 "") # Compiler flags to build a file that uses SSE3 intrinsics.
set(CXX_CFLAGS_SSSE3 "") # Compiler flags to build a file that uses SSSE3 intrinsics.
set(CXX_CFLAGS_SSE4_1 "") # Compiler flags to build a file that uses SSE4.1 intrinsics.
set(CXX_CFLAGS_SSE4_2 "") # Compiler flags to build a file that uses SSE4.2 intrinsics.
set(CXX_CFLAGS_AVX "") # Compiler flags to build a file that uses AVX intrinsics.
set(CXX_CFLAGS_AVX2 "") # Compiler flags to build a file that uses AVX2 intrinsics.
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
set(CXX_DEFINE "/D")
set(CXX_INCLUDE "/I")
if(CMAKE_CL_64)
# 64-bit MSVC compiler doesn't like /arch:SSE[2] as it's implicit.
list(APPEND CXX_CFLAGS_SSE "${CXX_DEFINE}__SSE__=1")
list(APPEND CXX_CFLAGS_SSE2 "${CXX_DEFINE}__SSE__=1;${CXX_DEFINE}__SSE2__=1")
else()
cxx_detect_cflags(CXX_CFLAGS_SSE "/arch:SSE")
if(CXX_CFLAGS_SSE)
list(APPEND CXX_CFLAGS_SSE "${CXX_DEFINE}__SSE__=1")
endif()
cxx_detect_cflags(CXX_CFLAGS_SSE2 "/arch:SSE2")
if(CXX_CFLAGS_SSE2)
list(APPEND CXX_CFLAGS_SSE2 "${CXX_DEFINE}__SSE__=1;${CXX_DEFINE}__SSE2__=1")
endif()
endif()
# MSVC doesn't provide any preprocessor definitions to detect SSE3+,
# these unify MSVC with definitions defined by Clang|GCC|Intel.
if(CXX_CFLAGS_SSE2)
list(APPEND CXX_CFLAGS_SSE3 "${CXX_CFLAGS_SSE2};${CXX_DEFINE}__SSE3__=1")
list(APPEND CXX_CFLAGS_SSSE3 "${CXX_CFLAGS_SSE3};${CXX_DEFINE}__SSSE3__=1")
list(APPEND CXX_CFLAGS_SSE4_1 "${CXX_CFLAGS_SSSE3};${CXX_DEFINE}__SSE4_1__=1")
list(APPEND CXX_CFLAGS_SSE4_2 "${CXX_CFLAGS_SSE4_1};${CXX_DEFINE}__SSE4_2__=1")
endif()
# When using AVX and AVX2 MSVC does define '__AVX__' and '__AVX2__', respectively.
cxx_detect_cflags(CXX_CFLAGS_AVX "/arch:AVX")
cxx_detect_cflags(CXX_CFLAGS_AVX2 "/arch:AVX2")
if(CXX_CFLAGS_AVX)
list(APPEND CXX_CFLAGS_AVX "${CXX_DEFINE}__SSE__=1;${CXX_DEFINE}__SSE2__=1;${CXX_DEFINE}__SSE3__=1;${CXX_DEFINE}__SSSE3__=1;${CXX_DEFINE}__SSE4_1__=1;${CXX_DEFINE}__SSE4_2__=1")
endif()
if(CXX_CFLAGS_AVX2)
list(APPEND CXX_CFLAGS_AVX2 "${CXX_DEFINE}__SSE__=1;${CXX_DEFINE}__SSE2__=1;${CXX_DEFINE}__SSE3__=1;${CXX_DEFINE}__SSSE3__=1;${CXX_DEFINE}__SSE4_1__=1;${CXX_DEFINE}__SSE4_2__=1")
endif()
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel" AND WIN32)
# Intel on Windows uses CL syntax.
set(CXX_DEFINE "/D")
set(CXX_INCLUDE "/I")
# Intel deprecated /arch:SSE, so it's implicit. In contrast to MSVC, Intel
# also provides /arch:SSE3+ options and uses the same definitions as GCC
# and Clang, so no magic needed here.
cxx_detect_cflags(CXX_CFLAGS_SSE2 "/arch:SSE2")
cxx_detect_cflags(CXX_CFLAGS_SSE3 "/arch:SSE3")
cxx_detect_cflags(CXX_CFLAGS_SSSE3 "/arch:SSSE3")
cxx_detect_cflags(CXX_CFLAGS_SSE4_1 "/arch:SSE4.1")
cxx_detect_cflags(CXX_CFLAGS_SSE4_2 "/arch:SSE4.2")
cxx_detect_cflags(CXX_CFLAGS_AVX "/arch:AVX")
cxx_detect_cflags(CXX_CFLAGS_AVX2 "/arch:AVX2")
else()
cxx_detect_cflags(CXX_CFLAGS_SSE "-msse")
cxx_detect_cflags(CXX_CFLAGS_SSE2 "-msse2")
cxx_detect_cflags(CXX_CFLAGS_SSE3 "-msse3")
cxx_detect_cflags(CXX_CFLAGS_SSSE3 "-mssse3")
cxx_detect_cflags(CXX_CFLAGS_SSE4_1 "-msse4.1")
cxx_detect_cflags(CXX_CFLAGS_SSE4_2 "-msse4.2")
cxx_detect_cflags(CXX_CFLAGS_AVX "-mavx")
cxx_detect_cflags(CXX_CFLAGS_AVX2 "-mavx2")
endif()
# ---------------------------------------------------------------------------
# Function
# cxx_project(product)
#
# Create a master project or embed other project in a master project.
# ---------------------------------------------------------------------------
function(cxx_project product)
string(TOUPPER "${product}" PRODUCT)
set(MODE_EMBED ${${PRODUCT}_EMBED})
set(MODE_STATIC ${${PRODUCT}_STATIC})
# EMBED implies STATIC.
if(MODE_EMBED)
set(MODE_STATIC TRUE)
set(${PRODUCT}_STATIC TRUE PARENT_SCOPE)
endif()
# Deduce source and include directories. By default CxxProject assumes that
# both source and include files are located at './src'.
set(SOURCE_DIR "${${PRODUCT}_SOURCE_DIR}")
set(INCLUDE_DIR "${${PRODUCT}_INCLUDE_DIR}")
if(NOT SOURCE_DIR)
set(SOURCE_DIR "${${PRODUCT}_DIR}/src")
set(${PRODUCT}_SOURCE_DIR "${SOURCE_DIR}" PARENT_SCOPE)
endif()
if(NOT INCLUDE_DIR)
set(INCLUDE_DIR "${SOURCE_DIR}")
set(${PRODUCT}_INCLUDE_DIR "${INCLUDE_DIR}" PARENT_SCOPE)
endif()
set(DEPS "") # Dependencies (list of libraries) for the linker.
set(LIBS "") # Dependencies with project included, for consumers.
set(CFLAGS "") # Public compiler flags.
set(PRIVATE_CFLAGS "") # Private compiler flags independent of build type.
set(PRIVATE_CFLAGS_DBG "") # Private compiler flags used by debug builds.
set(PRIVATE_CFLAGS_REL "") # Private compiler flags used by release builds.
set(PRIVATE_LFLAGS "") # Private linker flags.
if(MODE_EMBED)
list(APPEND CFLAGS "${CXX_DEFINE}${PRODUCT}_EMBED")
list(APPEND PRIVATE_CFLAGS "${CXX_DEFINE}${PRODUCT}_EMBED")
endif()
if(MODE_STATIC)
list(APPEND CFLAGS "${CXX_DEFINE}${PRODUCT}_STATIC")
list(APPEND PRIVATE_CFLAGS "${CXX_DEFINE}${PRODUCT}_STATIC")
endif()
# PUBLIC properties - usable by third parties.
set(${PRODUCT}_DEPS "${DEPS}" PARENT_SCOPE)
set(${PRODUCT}_LIBS "${LIBS}" PARENT_SCOPE)
set(${PRODUCT}_CFLAGS "${CFLAGS}" PARENT_SCOPE)
# PRIVATE properties - only used during build.
set(${PRODUCT}_PRIVATE_CFLAGS "${PRIVATE_CFLAGS}" PARENT_SCOPE)
set(${PRODUCT}_PRIVATE_CFLAGS_DBG "${PRIVATE_CFLAGS_DBG}" PARENT_SCOPE)
set(${PRODUCT}_PRIVATE_CFLAGS_REL "${PRIVATE_CFLAGS_REL}" PARENT_SCOPE)
set(${PRODUCT}_PRIVATE_LFLAGS "${PRIVATE_LFLAGS}" PARENT_SCOPE)
endfunction()
function(cxx_project_info product)
string(TOUPPER "${product}" PRODUCT)
set(BUILD_MODE "")
set(BUILD_TEST "")
if(${PRODUCT}_EMBED)
set(BUILD_MODE "Embed")
elseif(${PRODUCT}_STATIC)
set(BUILD_MODE "Static")
else()
set(BUILD_MODE "Shared")
endif()
if(${PRODUCT}_BUILD_TEST)
set(BUILD_TEST "On")
else()
set(BUILD_TEST "Off")
endif()
message("-- [${product}]")
message(" BuildMode=${BUILD_MODE}")
message(" BuildTest=${BUILD_TEST}")
message(" ${PRODUCT}_DIR=${${PRODUCT}_DIR}")
message(" ${PRODUCT}_DEPS=${${PRODUCT}_DEPS}")
message(" ${PRODUCT}_LIBS=${${PRODUCT}_LIBS}")
message(" ${PRODUCT}_CFLAGS=${${PRODUCT}_CFLAGS}")
message(" ${PRODUCT}_SOURCE_DIR=${${PRODUCT}_SOURCE_DIR}")
message(" ${PRODUCT}_INCLUDE_DIR=${${PRODUCT}_INCLUDE_DIR}")
message(" ${PRODUCT}_PRIVATE_CFLAGS=")
cxx_print_cflags(
"${${PRODUCT}_PRIVATE_CFLAGS}"
"${${PRODUCT}_PRIVATE_CFLAGS_DBG}"
"${${PRODUCT}_PRIVATE_CFLAGS_REL}")
endfunction()
function(cxx_add_source product out src_dir)
string(TOUPPER "${product}" PRODUCT)
set(src_path "${${PRODUCT}_SOURCE_DIR}/${src_dir}")
set(src_array)
foreach(file ${ARGN})
set(src_file "${src_path}/${file}")
set(src_cflags "")
if(file MATCHES "\\.c|\\.cc|\\.cxx|\\.cpp|\\.m|\\.mm")
if(file MATCHES "_sse\\." AND NOT "${CXX_CFLAGS_SSE}" STREQUAL "")
list(APPEND src_cflags ${CXX_CFLAGS_SSE})
endif()
if(file MATCHES "_sse2\\." AND NOT "${CXX_CFLAGS_SSE2}" STREQUAL "")
list(APPEND src_cflags ${CXX_CFLAGS_SSE2})
endif()
if(file MATCHES "_sse3\\." AND NOT "${CXX_CFLAGS_SSE3}" STREQUAL "")
list(APPEND src_cflags ${CXX_CFLAGS_SSE3})
endif()
if(file MATCHES "_ssse3\\." AND NOT "${CXX_CFLAGS_SSSE3}" STREQUAL "")
list(APPEND src_cflags ${CXX_CFLAGS_SSSE3})
endif()
if(file MATCHES "_sse4_1\\." AND NOT "${CXX_CFLAGS_SSE4_1}" STREQUAL "")
list(APPEND src_cflags ${CXX_CFLAGS_SSE4_1})
endif()
if(file MATCHES "_sse4_2\\." AND NOT "${CXX_CFLAGS_SSE4_2}" STREQUAL "")
list(APPEND src_cflags ${CXX_CFLAGS_SSE4_2})
endif()
if(file MATCHES "_avx\\." AND NOT "${CXX_CFLAGS_AVX}" STREQUAL "")
list(APPEND src_cflags ${CXX_CFLAGS_AVX})
endif()
if(file MATCHES "_avx2\\." AND NOT "${CXX_CFLAGS_AVX2}" STREQUAL "")
list(APPEND src_cflags ${CXX_CFLAGS_AVX2})
endif()
if(NOT "${src_cflags}" STREQUAL "")
set_source_files_properties(${src_file} PROPERTIES COMPILE_FLAGS ${src_cflags})
endif()
endif()
list(APPEND src_array ${src_file})
endforeach()
source_group(${src_dir} FILES ${src_array})
set(out_tmp ${${out}})
list(APPEND out_tmp ${src_array})
set("${out}" "${out_tmp}" PARENT_SCOPE)
endfunction()
function(cxx_add_library product target src deps cflags cflags_dbg cflags_rel)
string(TOUPPER "${product}" PRODUCT)
if(NOT ${PRODUCT}_STATIC)
add_library(${target} SHARED ${src})
else()
add_library(${target} STATIC ${src})
endif()
target_link_libraries(${target} ${deps})
if (NOT "${${PRODUCT}_PRIVATE_LFLAGS}" STREQUAL "")
set_target_properties(${target} PROPERTIES LINK_FLAGS "${${PRODUCT}_PRIVATE_LFLAGS}")
endif()
if(CMAKE_BUILD_TYPE)
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
target_compile_options(${target} PRIVATE ${cflags} ${cflags_dbg})
else()
target_compile_options(${target} PRIVATE ${cflags} ${cflags_rel})
endif()
else()
target_compile_options(${target} PRIVATE ${cflags} $<$<CONFIG:Debug>:${cflags_dbg}> $<$<NOT:$<CONFIG:Debug>>:${cflags_rel}>)
endif()
if(NOT ${PRODUCT}_STATIC)
install(TARGETS ${target} DESTINATION "lib${LIB_SUFFIX}")
endif()
endfunction()
function(cxx_add_executable product target src deps cflags cflags_dbg cflags_rel)
string(TOUPPER "${product}" PRODUCT)
add_executable(${target} ${src})
target_link_libraries(${target} ${deps})
if (NOT "${${PRODUCT}_PRIVATE_LFLAGS}" STREQUAL "")
set_target_properties(${target} PROPERTIES LINK_FLAGS "${${PRODUCT}_PRIVATE_LFLAGS}")
endif()
if(CMAKE_BUILD_TYPE)
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
target_compile_options(${target} PRIVATE ${cflags} ${cflags_dbg})
else()
target_compile_options(${target} PRIVATE ${cflags} ${cflags_rel})
endif()
else()
target_compile_options(${target} PRIVATE ${cflags} $<$<CONFIG:Debug>:${cflags_dbg}> $<$<NOT:$<CONFIG:Debug>>:${cflags_rel}>)
endif()
if(NOT ${PRODUCT}_STATIC)
install(TARGETS ${target} DESTINATION "lib${LIB_SUFFIX}")
endif()
endfunction()
endif()

236
Doxyfile
View File

@@ -1,236 +0,0 @@
# Doxyfile 1.8.7
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = "AsmJit"
PROJECT_NUMBER = "1.1"
PROJECT_BRIEF = "Complete Remote and JIT Assembler for x86/x64"
OUTPUT_DIRECTORY = .
CREATE_SUBDIRS = NO
ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = English
FULL_PATH_NAMES = YES
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
SHORT_NAMES = NO
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
QT_AUTOBRIEF = NO
JAVADOC_AUTOBRIEF = YES
MULTILINE_CPP_IS_BRIEF = YES
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
INHERIT_DOCS = YES
TAB_SIZE = 2
MARKDOWN_SUPPORT = YES
AUTOLINK_SUPPORT = NO
IDL_PROPERTY_SUPPORT = NO
SEPARATE_MEMBER_PAGES = NO
DISTRIBUTE_GROUP_DOC = NO
SUBGROUPING = YES
INLINE_GROUPED_CLASSES = NO
INLINE_SIMPLE_STRUCTS = NO
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
EXTRACT_ALL = NO
EXTRACT_PRIVATE = NO
EXTRACT_PACKAGE = NO
EXTRACT_STATIC = NO
EXTRACT_LOCAL_CLASSES = NO
HIDE_UNDOC_CLASSES = YES
HIDE_UNDOC_MEMBERS = NO
HIDE_FRIEND_COMPOUNDS = YES
HIDE_IN_BODY_DOCS = YES
INTERNAL_DOCS = NO
CASE_SENSE_NAMES = NO
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
SHOW_INCLUDE_FILES = NO
SHOW_GROUPED_MEMB_INC = NO
INLINE_INFO = YES
SORT_MEMBER_DOCS = NO
SORT_BRIEF_DOCS = NO
SORT_GROUP_NAMES = NO
SORT_BY_SCOPE_NAME = YES
STRICT_PROTO_MATCHING = NO
GENERATE_TODOLIST = NO
GENERATE_TESTLIST = NO
GENERATE_BUGLIST = NO
GENERATE_DEPRECATEDLIST= NO
MAX_INITIALIZER_LINES = 0
SHOW_USED_FILES = NO
SHOW_FILES = NO
SHOW_NAMESPACES = NO
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = NO
WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = NO
WARN_FORMAT = "$file:$line: $text"
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
INPUT = src/asmjit
INPUT_ENCODING = UTF-8
RECURSIVE = YES
EXCLUDE =
USE_MDFILE_AS_MAINPAGE = README.md
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
SOURCE_BROWSER = NO
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
SOURCE_TOOLTIPS = YES
VERBATIM_HEADERS = NO
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
ALPHABETICAL_INDEX = NO
#---------------------------------------------------------------------------
# Configuration options related to outputs
#---------------------------------------------------------------------------
GENERATE_HTML = YES
GENERATE_LATEX = NO
GENERATE_RTF = NO
GENERATE_MAN = NO
GENERATE_XML = YES
XML_OUTPUT = build_xml
XML_PROGRAMLISTING = NO
HTML_OUTPUT = build_doc
HTML_FILE_EXTENSION = .html
LAYOUT_FILE = tools/doc-layout.xml
HTML_HEADER = tools/doc-header.html
HTML_FOOTER = tools/doc-footer.html
HTML_STYLESHEET = tools/doc-style.css
HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
HTML_COLORSTYLE_HUE = 220
HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
HTML_TIMESTAMP = NO
HTML_DYNAMIC_SECTIONS = NO
HTML_INDEX_NUM_ENTRIES = 0
SEARCHENGINE = NO
#---------------------------------------------------------------------------
# Configuration options related to the CHM output
#---------------------------------------------------------------------------
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler ( hhc.exe). If non-empty
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The BINARY_TOC flag controls whether a binary table of contents is generated (
# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
DISABLE_INDEX = NO
GENERATE_TREEVIEW = NO
ENUM_VALUES_PER_LINE = 0
TREEVIEW_WIDTH = 250
EXT_LINKS_IN_WINDOW = NO
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
PREDEFINED = ASMJIT_DOCGEN \
ASMJIT_BUILD_X86 \
ASMJIT_BUILD_X64 \
ASMJIT_API
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
CLASS_DIAGRAMS = NO
CLASS_GRAPH = NO

View File

@@ -1,5 +1,5 @@
AsmJit - Complete x86/x64 JIT and Remote Assembler for C++
Copyright (c) 2008-2015, Petr Kobalicek <kobalicek.petr@gmail.com>
Copyright (c) 2008-2016, Petr Kobalicek <kobalicek.petr@gmail.com>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages

2125
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,76 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Dependencies]
#if !defined(_ASMJIT_BUILD_H)
#include "./build.h"
#endif // !_ASMJIT_BUILD_H
// [Guard]
#if !defined(ASMJIT_API_SCOPE)
# define ASMJIT_API_SCOPE
#else
# error "[asmjit] Api-Scope is already active, previous scope not closed by apiend.h?"
#endif // ASMJIT_API_SCOPE
// [NoExcept]
#if !ASMJIT_CC_HAS_NOEXCEPT && !defined(noexcept)
# define noexcept ASMJIT_NOEXCEPT
# define ASMJIT_UNDEF_NOEXCEPT
#endif // !ASMJIT_CC_HAS_NOEXCEPT && !noexcept
// [NullPtr]
#if !ASMJIT_CC_HAS_NULLPTR && !defined(nullptr)
# define nullptr NULL
# define ASMJIT_UNDEF_NULLPTR
#endif // !ASMJIT_CC_HAS_NULLPTR && !nullptr
// [Override]
#if !ASMJIT_CC_HAS_OVERRIDE && !defined(override)
# define override
# define ASMJIT_UNDEF_OVERRIDE
#endif // !ASMJIT_CC_HAS_OVERRIDE && !override
// [CLang]
#if ASMJIT_CC_CLANG
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunnamed-type-template-args"
#endif // ASMJIT_CC_CLANG
// [GCC]
#if ASMJIT_CC_GCC
# pragma GCC diagnostic push
# pragma GCC diagnostic warning "-Winline"
#endif // ASMJIT_CC_GCC
// [MSC]
#if ASMJIT_CC_MSC
# pragma warning(push)
# pragma warning(disable: 4127) // conditional expression is constant
# pragma warning(disable: 4201) // nameless struct/union
# pragma warning(disable: 4244) // '+=' : conversion from 'int' to 'x', possible
// loss of data
# pragma warning(disable: 4251) // struct needs to have dll-interface to be used
// by clients of struct ...
# pragma warning(disable: 4275) // non dll-interface struct ... used as base for
// dll-interface struct
# pragma warning(disable: 4355) // this used in base member initializer list
# pragma warning(disable: 4480) // specifying underlying type for enum
# pragma warning(disable: 4800) // forcing value to bool 'true' or 'false'
// TODO: Check if these defines are needed and for which version of MSC. There are
// news about these as they are part of C99.
# if !defined(vsnprintf)
# define ASMJIT_UNDEF_VSNPRINTF
# define vsnprintf _vsnprintf
# endif // !vsnprintf
# if !defined(snprintf)
# define ASMJIT_UNDEF_SNPRINTF
# define snprintf _snprintf
# endif // !snprintf
#endif // ASMJIT_CC_MSC

View File

@@ -1,53 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#if defined(ASMJIT_API_SCOPE)
# undef ASMJIT_API_SCOPE
#else
# error "[asmjit] Api-Scope not active, forgot to include apibegin.h?"
#endif // ASMJIT_API_SCOPE
// [NoExcept]
#if defined(ASMJIT_UNDEF_NOEXCEPT)
# undef noexcept
# undef ASMJIT_UNDEF_NOEXCEPT
#endif // ASMJIT_UNDEF_NOEXCEPT
// [NullPtr]
#if defined(ASMJIT_UNDEF_NULLPTR)
# undef nullptr
# undef ASMJIT_UNDEF_NULLPTR
#endif // ASMJIT_UNDEF_NULLPTR
// [Override]
#if defined(ASMJIT_UNDEF_OVERRIDE)
# undef override
# undef ASMJIT_UNDEF_OVERRIDE
#endif // ASMJIT_UNDEF_OVERRIDE
// [CLang]
#if ASMJIT_CC_CLANG
# pragma clang diagnostic pop
#endif // ASMJIT_CC_CLANG
// [GCC]
#if ASMJIT_CC_GCC
# pragma GCC diagnostic pop
#endif // ASMJIT_CC_GCC
// [MSC]
#if ASMJIT_CC_MSC
# pragma warning(pop)
# if defined(ASMJIT_UNDEF_VSNPRINTF)
# undef vsnprintf
# undef ASMJIT_UNDEF_VSNPRINTF
# endif // ASMJIT_UNDEF_VSNPRINTF
# if defined(ASMJIT_UNDEF_SNPRINTF)
# undef snprintf
# undef ASMJIT_UNDEF_SNPRINTF
# endif // ASMJIT_UNDEF_SNPRINTF
#endif // ASMJIT_CC_MSC

View File

@@ -12,6 +12,7 @@
#include "./base.h"
#include "./arm/armassembler.h"
#include "./arm/armbuilder.h"
#include "./arm/armcompiler.h"
#include "./arm/arminst.h"
#include "./arm/armoperand.h"

View File

@@ -16,345 +16,32 @@
//!
//! AsmJit - Complete x86/x64 JIT and Remote Assembler for C++.
//!
//! A complete JIT and remote assembler for C++ language. It can generate native
//! code for x86 and x64 architectures and supports the whole x86/x64 instruction
//! set - from legacy MMX to the newest AVX2. It has a type-safe API that allows
//! C++ compiler to do semantic checks at compile-time even before the assembled
//! code is generated and executed.
//!
//! AsmJit is not a virtual machine (VM). It doesn't have functionality to
//! implement VM out of the box; however, it can be be used as a JIT backend
//! of your own VM. The usage of AsmJit is not limited at all; it's suitable
//! for multimedia, VM backends, remote code generation, and many other tasks.
//!
//! \section AsmJit_Main_Concepts Code Generation Concepts
//!
//! AsmJit has two completely different code generation concepts. The difference
//! is in how the code is generated. The first concept, also referred as a low
//! level concept, is called `Assembler` and it's the same as writing RAW
//! assembly by inserting instructions that use physical registers directly. In
//! this case AsmJit does only instruction encoding, verification and final code
//! relocation.
//!
//! The second concept, also referred as a high level concept, is called
//! `Compiler`. Compiler lets you use virtually unlimited number of registers
//! (it calls them variables), which significantly simplifies the code generation
//! process. Compiler allocates these virtual registers to physical registers
//! after the code generation is done. This requires some extra effort - Compiler
//! has to generate information for each node (instruction, function declaration,
//! function call, etc...) in the code, perform a variable liveness analysis and
//! translate the code using variables to a code that uses only physical registers.
//!
//! In addition, Compiler understands functions and their calling conventions.
//! It has been designed in a way that the code generated is always a function
//! having a prototype like a real programming language. By having a function
//! prototype the Compiler is able to insert prolog and epilog sequence to the
//! function being generated and it's able to also generate a necessary code
//! to call other function from your own code.
//!
//! There is no conclusion on which concept is better. `Assembler` brings full
//! control and the best performance, while `Compiler` makes the code-generation
//! more fun and more portable.
//!
//! \section AsmJit_Main_Sections Documentation Sections
//!
//! AsmJit documentation is structured into the following sections:
//! - \ref asmjit_base "Base" - Base API (architecture independent).
//! - \ref asmjit_x86 "X86/X64" - X86/X64 API.
//!
//! \section AsmJit_Main_HomePage AsmJit Homepage
//!
//! - https://github.com/kobalicek/asmjit
// ============================================================================
// [asmjit_base]
// ============================================================================
//! Introduction provided by the project page at https://github.com/asmjit/asmjit.
//! \defgroup asmjit_base AsmJit Base API (architecture independent)
//!
//! \brief Base API.
//!
//! Base API contains all classes that are platform and architecture independent.
//!
//! Code-Generation and Operands
//! ----------------------------
//!
//! List of the most useful code-generation and operand classes:
//! - \ref asmjit::Assembler - Low-level code-generation.
//! - \ref asmjit::ExternalTool - An external tool that can serialize to `Assembler`:
//! - \ref asmjit::Compiler - High-level code-generation.
//! - \ref asmjit::Runtime - Describes where the code is stored and how it's executed:
//! - \ref asmjit::HostRuntime - Runtime that runs on the host machine:
//! - \ref asmjit::JitRuntime - Runtime designed for JIT code generation and execution.
//! - \ref asmjit::StaticRuntime - Runtime for code that starts at a specific address.
//! - \ref asmjit::Stream - Stream is a list of \ref HLNode objects stored as a double
//! linked list:
//! - \ref asmjit::HLNode - Base node interface:
//! - \ref asmjit::HLInst - Instruction node.
//! - \ref asmjit::HLData - Data node.
//! - \ref asmjit::HLAlign - Align directive node.
//! - \ref asmjit::HLLabel - Label node.
//! - \ref asmjit::HLComment - Comment node.
//! - \ref asmjit::HLSentinel - Sentinel node.
//! - \ref asmjit::HLHint - Instruction node.
//! - \ref asmjit::HLFunc - Function declaration node.
//! - \ref asmjit::HLRet - Function return node.
//! - \ref asmjit::HLCall - Function call node.
//! - \ref asmjit::HLCallArg - Function call argument node.
//! - \ref asmjit::Operand - base class for all operands:
//! - \ref asmjit::Reg - Register operand (`Assembler` only).
//! - \ref asmjit::Var - Variable operand (`Compiler` only).
//! - \ref asmjit::Mem - Memory operand.
//! - \ref asmjit::Imm - Immediate operand.
//! - \ref asmjit::Label - Label operand.
//!
//! The following snippet shows how to setup a basic JIT code generation:
//!
//! ~~~
//! using namespace asmjit;
//!
//! int main(int argc, char* argv[]) {
//! // JIT runtime is designed for JIT code generation and execution.
//! JitRuntime runtime;
//!
//! // Assembler instance requires to know the runtime to function.
//! X86Assembler a(&runtime);
//!
//! // Compiler (if you indend to use it) requires an assembler instance.
//! X86Compiler c(&a);
//!
//! return 0;
//! }
//! ~~~
//!
//! Logging and Error Handling
//! --------------------------
//!
//! AsmJit contains a robust interface that can be used to log the generated code
//! and to handle possible errors. Base logging interface is provided by \ref
//! Logger, which is abstract and can be used as a base for your own logger.
//! AsmJit also implements some trivial logging concepts out of the box to
//! simplify the development. \ref FileLogger logs into a C `FILE*` stream and
//! \ref StringLogger concatenates all log messages into a single string.
//!
//! The following snippet shows how to setup a basic logger and error handler:
//!
//! ~~~
//! using namespace asmjit;
//!
//! struct MyErrorHandler : public ErrorHandler {
//! virtual bool handleError(Error code, const char* message, void* origin) {
//! printf("Error 0x%0.8X: %s\n", code, message);
//!
//! // True - error handled and code generation can continue.
//! // False - error not handled, code generation should stop.
//! return false;
//! }
//! }
//!
//! int main(int argc, char* argv[]) {
//! JitRuntime runtime;
//! FileLogger logger(stderr);
//! MyErrorHandler eh;
//!
//! X86Assembler a(&runtime);
//! a.setLogger(&logger);
//! a.setErrorHandler(&eh);
//!
//! ...
//!
//! return 0;
//! }
//! ~~~
//!
//! AsmJit also contains an \ref ErrorHandler, which is an abstract class that
//! can be used to implement your own error handling. It can be associated with
//! \ref Assembler and used to report all errors. It's a very convenient way to
//! be aware of any error that happens during the code generation without making
//! the error handling complicated.
//!
//! List of the most useful logging and error handling classes:
//! - \ref asmjit::Logger - abstract logging interface:
//! - \ref asmjit::FileLogger - A logger that logs to `FILE*`.
//! - \ref asmjit::StringLogger - A logger that concatenates to a single string.
//! - \ref asmjit::ErrorHandler - Easy way to handle \ref Assembler and \ref
//! Compiler
//! errors.
//!
//! Zone Memory Allocator
//! ---------------------
//!
//! Zone memory allocator is an incremental memory allocator that can be used
//! to allocate data of short life-time. It has much better performance
//! characteristics than all other allocators, because the only thing it can do
//! is to increment a pointer and return its previous address. See \ref Zone
//! for more details.
//!
//! The whole AsmJit library is based on zone memory allocation for performance
//! reasons. It has many other benefits, but the performance was the main one
//! when designing the library.
//!
//! POD Containers
//! --------------
//!
//! POD containers are used by AsmJit to manage its own data structures. The
//! following classes can be used by AsmJit consumers:
//!
//! - \ref asmjit::BitArray - A fixed bit-array that is used internally.
//! - \ref asmjit::PodVector<T> - A simple array-like container for storing
//! POD data.
//! - \ref asmjit::PodList<T> - A single linked list.
//! - \ref asmjit::StringBuilder - A string builder that can append strings
//! and integers.
//!
//! Utility Functions
//! -----------------
//!
//! Utility functions are implementated static class \ref Utils. There are
//! utilities for bit manipulation and bit counting, utilities to get an
//! integer minimum / maximum and various other helpers required to perform
//! alignment checks and binary casting from float to integer and vice versa.
//!
//! String utilities are also implemented by a static class \ref Utils. They
//! are mostly used by AsmJit internals and not really important to end users.
//!
//! SIMD Utilities
//! --------------
//!
//! SIMD code generation often requires to embed constants after each function
//! or at the end of the whole code block. AsmJit contains `Vec64`, `Vec128`
//! and `Vec256` classes that can be used to prepare data useful when generating
//! SIMD code.
//!
//! X86/X64 code generators contain member functions `dmm`, `dxmm`, and `dymm`,
//! which can be used to embed 64-bit, 128-bit and 256-bit data structures into
//! the machine code.
// ============================================================================
// [asmjit_x86]
// ============================================================================
//! \brief Backend Neutral API.
//! \defgroup asmjit_x86 AsmJit X86/X64 API
//!
//! \brief X86/X64 API
//! \brief X86/X64 Backend API.
//! \defgroup asmjit_arm AsmJit ARM32/ARM64 API
//!
//! X86/X64 Code Generation
//! -----------------------
//!
//! X86/X64 code generation is realized throught:
//! - \ref X86Assembler - low-level code generation.
//! - \ref X86Compiler - high-level code generation.
//!
//! X86/X64 Registers
//! -----------------
//!
//! There are static objects that represents X86 and X64 registers. They can
//! be used directly (like `eax`, `mm`, `xmm`, ...) or created through
//! these functions:
//!
//! - `asmjit::x86::gpb_lo()` - Get an 8-bit low GPB register.
//! - `asmjit::x86::gpb_hi()` - Get an 8-bit high GPB register.
//! - `asmjit::x86::gpw()` - Get a 16-bit GPW register.
//! - `asmjit::x86::gpd()` - Get a 32-bit GPD register.
//! - `asmjit::x86::gpq()` - Get a 64-bit GPQ Gp register.
//! - `asmjit::x86::gpz()` - Get a 32-bit or 64-bit GPD/GPQ register.
//! - `asmjit::x86::fp()` - Get a 80-bit FPU register.
//! - `asmjit::x86::mm()` - Get a 64-bit MMX register.
//! - `asmjit::x86::xmm()` - Get a 128-bit XMM register.
//! - `asmjit::x86::ymm()` - Get a 256-bit YMM register.
//! - `asmjit::x86::amm()` - Get a 512-bit ZMM register.
//!
//! X86/X64 Addressing
//! ------------------
//!
//! X86 and x64 architectures contains several addressing modes and most ones
//! are possible with AsmJit library. Memory represents are represented by
//! `BaseMem` class. These functions are used to make operands that represents
//! memory addresses:
//!
//! - `asmjit::x86::ptr()` - Address size not specified.
//! - `asmjit::x86::byte_ptr()` - 1 byte.
//! - `asmjit::x86::word_ptr()` - 2 bytes (GPW size).
//! - `asmjit::x86::dword_ptr()` - 4 bytes (GPD size).
//! - `asmjit::x86::qword_ptr()` - 8 bytes (GPQ/MMX size).
//! - `asmjit::x86::tword_ptr()` - 10 bytes (FPU size).
//! - `asmjit::x86::dqword_ptr()` - 16 bytes (XMM size).
//! - `asmjit::x86::yword_ptr()` - 32 bytes (YMM size).
//! - `asmjit::x86::zword_ptr()` - 64 bytes (ZMM size).
//!
//! Most useful function to make pointer should be `asmjit::x86::ptr()`. It
//! creates a pointer to the target with an unspecified size. Unspecified size
//! works in all intrinsics where are used registers (this means that size is
//! specified by register operand or by instruction itself). For example
//! `asmjit::x86::ptr()` can't be used with `Assembler::inc()` instruction. In
//! this case the size must be specified and it's also reason to differentiate
//! between pointer sizes.
//!
//! X86 and X86 support simple address forms like `[base + displacement]` and
//! also complex address forms like `[base + index * scale + displacement]`.
//!
//! X86/X64 Immediates
//! ------------------
//!
//! Immediate values are constants thats passed directly after instruction
//! opcode. To create such value use `asmjit::imm()` or `asmjit::imm_u()`
//! methods to create a signed or unsigned immediate value.
//!
//! X86/X64 CPU Information
//! -----------------------
//!
//! The CPUID instruction can be used to get an exhaustive information about
//! the host X86/X64 processor. AsmJit contains utilities that can get the most
//! important information related to the features supported by the CPU and the
//! host operating system, in addition to host processor name and number of
//! cores. Class `CpuInfo` provides generic information about a host or target
//! processor and contains also a specific X86/X64 information.
//!
//! By default AsmJit queries the CPU information after the library is loaded
//! and the queried information is reused by all instances of `JitRuntime`.
//! The global instance of `CpuInfo` can't be changed, because it will affect
//! the code generation of all `Runtime`s. If there is a need to have a
//! specific CPU information which contains modified features or processor
//! vendor it's possible by creating a new instance of the `CpuInfo` and setting
//! up its members.
//!
//! Cpu detection is important when generating a JIT code that may or may not
//! use certain CPU features. For example there used to be a SSE/SSE2 detection
//! in the past and today there is often AVX/AVX2 detection.
//!
//! The example below shows how to detect a SSE4.1 instruction set:
//!
//! ~~~
//! using namespace asmjit;
//!
//! const CpuInfo& cpuInfo = CpuInfo::getHost();
//!
//! if (cpuInfo.hasFeature(CpuInfo::kX86FeatureSSE4_1)) {
//! // Processor has SSE4.1.
//! }
//! else if (cpuInfo.hasFeature(CpuInfo::kX86FeatureSSE2)) {
//! // Processor doesn't have SSE4.1, but has SSE2.
//! }
//! else {
//! // Processor is archaic; it's a wonder AsmJit works here!
//! }
//! ~~~
//! \brief ARM32/ARM64 Backend API.
// [Dependencies]
#include "./base.h"
// [ARM/ARM64]
#if defined(ASMJIT_BUILD_ARM32) || defined(ASMJIT_BUILD_ARM64)
#include "./arm.h"
#endif // ASMJIT_BUILD_ARM32 || ASMJIT_BUILD_ARM64
// [X86/X64]
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
#if defined(ASMJIT_BUILD_X86)
#include "./x86.h"
#endif // ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64
#endif // ASMJIT_BUILD_X86
// [Host]
#include "./host.h"
// [ARM32/ARM64]
#if defined(ASMJIT_BUILD_ARM)
#include "./arm.h"
#endif // ASMJIT_BUILD_ARM
// [Guard]
#endif // _ASMJIT_ASMJIT_H

View File

@@ -0,0 +1,117 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Dependencies]
#if !defined(_ASMJIT_BUILD_H)
# include "./build.h"
#endif // !_ASMJIT_BUILD_H
// [Guard]
#if !defined(ASMJIT_API_SCOPE)
# define ASMJIT_API_SCOPE
#else
# error "[asmjit] api-scope is already active, previous scope not closed by asmjit_apiend.h?"
#endif // ASMJIT_API_SCOPE
// ============================================================================
// [C++ Support]
// ============================================================================
// [NoExcept]
#if !ASMJIT_CC_HAS_NOEXCEPT && !defined(noexcept)
# define noexcept ASMJIT_NOEXCEPT
# define ASMJIT_UNDEF_NOEXCEPT
#endif // !ASMJIT_CC_HAS_NOEXCEPT && !noexcept
// [NullPtr]
#if !ASMJIT_CC_HAS_NULLPTR && !defined(nullptr)
# define nullptr NULL
# define ASMJIT_UNDEF_NULLPTR
#endif // !ASMJIT_CC_HAS_NULLPTR && !nullptr
// [Override]
#if !ASMJIT_CC_HAS_OVERRIDE && !defined(override)
# define override
# define ASMJIT_UNDEF_OVERRIDE
#endif // !ASMJIT_CC_HAS_OVERRIDE && !override
// ============================================================================
// [Compiler Support]
// ============================================================================
// [Clang]
#if ASMJIT_CC_CLANG
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wc++11-extensions"
# pragma clang diagnostic ignored "-Wconstant-logical-operand"
# pragma clang diagnostic ignored "-Wunnamed-type-template-args"
#endif // ASMJIT_CC_CLANG
// [GCC]
#if ASMJIT_CC_GCC
# pragma GCC diagnostic push
#endif // ASMJIT_CC_GCC
// [MSC]
#if ASMJIT_CC_MSC
# pragma warning(push)
# pragma warning(disable: 4127) // conditional expression is constant
# pragma warning(disable: 4201) // nameless struct/union
# pragma warning(disable: 4244) // '+=' : conversion from 'int' to 'x', possible loss of data
# pragma warning(disable: 4251) // struct needs to have dll-interface to be used by clients of struct ...
# pragma warning(disable: 4275) // non dll-interface struct ... used as base for dll-interface struct
# pragma warning(disable: 4355) // this used in base member initializer list
# pragma warning(disable: 4480) // specifying underlying type for enum
# pragma warning(disable: 4800) // forcing value to bool 'true' or 'false'
# if _MSC_VER < 1900
# if !defined(vsnprintf)
# define ASMJIT_UNDEF_VSNPRINTF
# define vsnprintf _vsnprintf
# endif // !vsnprintf
# if !defined(snprintf)
# define ASMJIT_UNDEF_SNPRINTF
# define snprintf _snprintf
# endif // !snprintf
# endif
#endif // ASMJIT_CC_MSC
// ============================================================================
// [Custom Macros]
// ============================================================================
// [ASMJIT_NON...]
#if ASMJIT_CC_HAS_DELETE_FUNCTION
#define ASMJIT_NONCONSTRUCTIBLE(...) \
private: \
__VA_ARGS__() = delete; \
__VA_ARGS__(const __VA_ARGS__& other) = delete; \
__VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
public:
#define ASMJIT_NONCOPYABLE(...) \
private: \
__VA_ARGS__(const __VA_ARGS__& other) = delete; \
__VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
public:
#else
#define ASMJIT_NONCONSTRUCTIBLE(...) \
private: \
inline __VA_ARGS__(); \
inline __VA_ARGS__(const __VA_ARGS__& other); \
inline __VA_ARGS__& operator=(const __VA_ARGS__& other); \
public:
#define ASMJIT_NONCOPYABLE(...) \
private: \
inline __VA_ARGS__(const __VA_ARGS__& other); \
inline __VA_ARGS__& operator=(const __VA_ARGS__& other); \
public:
#endif // ASMJIT_CC_HAS_DELETE_FUNCTION
// [ASMJIT_ENUM]
#if defined(_MSC_VER) && _MSC_VER >= 1400
# define ASMJIT_ENUM(NAME) enum NAME : uint32_t
#else
# define ASMJIT_ENUM(NAME) enum NAME
#endif

View File

@@ -0,0 +1,74 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#if defined(ASMJIT_API_SCOPE)
# undef ASMJIT_API_SCOPE
#else
# error "[asmjit] api-scope not active, forgot to include asmjit_apibegin.h?"
#endif // ASMJIT_API_SCOPE
// ============================================================================
// [C++ Support]
// ============================================================================
// [NoExcept]
#if defined(ASMJIT_UNDEF_NOEXCEPT)
# undef noexcept
# undef ASMJIT_UNDEF_NOEXCEPT
#endif // ASMJIT_UNDEF_NOEXCEPT
// [NullPtr]
#if defined(ASMJIT_UNDEF_NULLPTR)
# undef nullptr
# undef ASMJIT_UNDEF_NULLPTR
#endif // ASMJIT_UNDEF_NULLPTR
// [Override]
#if defined(ASMJIT_UNDEF_OVERRIDE)
# undef override
# undef ASMJIT_UNDEF_OVERRIDE
#endif // ASMJIT_UNDEF_OVERRIDE
// ============================================================================
// [Compiler Support]
// ============================================================================
// [Clang]
#if ASMJIT_CC_CLANG
# pragma clang diagnostic pop
#endif // ASMJIT_CC_CLANG
// [GCC]
#if ASMJIT_CC_GCC
# pragma GCC diagnostic pop
#endif // ASMJIT_CC_GCC
// [MSC]
#if ASMJIT_CC_MSC
# pragma warning(pop)
# if _MSC_VER < 1900
# if defined(ASMJIT_UNDEF_VSNPRINTF)
# undef vsnprintf
# undef ASMJIT_UNDEF_VSNPRINTF
# endif // ASMJIT_UNDEF_VSNPRINTF
# if defined(ASMJIT_UNDEF_SNPRINTF)
# undef snprintf
# undef ASMJIT_UNDEF_SNPRINTF
# endif // ASMJIT_UNDEF_SNPRINTF
# endif
#endif // ASMJIT_CC_MSC
// ============================================================================
// [Custom Macros]
// ============================================================================
// [ASMJIT_NON...]
#undef ASMJIT_NONCONSTRUCTIBLE
#undef ASMJIT_NONCOPYABLE
// [ASMJIT_ENUM]
#undef ASMJIT_ENUM

View File

@@ -13,7 +13,7 @@
// ============================================================================
// AsmJit is by default compiled only for a host processor for the purpose of
// JIT code generation. Both Assembler and Compiler code generators are compiled
// JIT code generation. Both Assembler and CodeCompiler emitters are compiled
// by default. Preprocessor macros can be used to change the default behavior.
// External Config File
@@ -48,7 +48,6 @@
//
// #define ASMJIT_DEBUG // Define to enable debug-mode.
// #define ASMJIT_RELEASE // Define to enable release-mode.
// #define ASMJIT_TRACE // Define to enable tracing.
// AsmJit Build Backends
// ---------------------
@@ -56,33 +55,36 @@
// These definitions control which backends to compile. If none of these is
// defined AsmJit will use host architecture by default (for JIT code generation).
//
// #define ASMJIT_BUILD_X86 // Define to enable x86 instruction set (32-bit).
// #define ASMJIT_BUILD_X64 // Define to enable x64 instruction set (64-bit).
// #define ASMJIT_BUILD_X86 // Define to enable X86 and X64 code-generation.
// #define ASMJIT_BUILD_ARM // Define to enable ARM32 and ARM64 code-generation.
// #define ASMJIT_BUILD_HOST // Define to enable host instruction set.
// AsmJit Build Features
// ---------------------
//
// Flags can be defined to disable standard features. These are handy especially
// when building asmjit statically and some features are not needed or unwanted
// (like Compiler).
// when building AsmJit statically and some features are not needed or unwanted
// (like CodeCompiler).
//
// AsmJit features are enabled by default.
// #define ASMJIT_DISABLE_COMPILER // Disable Compiler (completely).
// #define ASMJIT_DISABLE_LOGGER // Disable Logger (completely).
// #define ASMJIT_DISABLE_COMPILER // Disable CodeCompiler (completely).
// #define ASMJIT_DISABLE_LOGGING // Disable logging and formatting (completely).
// #define ASMJIT_DISABLE_TEXT // Disable everything that contains text
// // representation (instructions, errors, ...).
// #define ASMJIT_DISABLE_VALIDATION // Disable Validation (completely).
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_DISABLE_TEXT) && !defined(ASMJIT_DISABLE_LOGGER)
# error "[asmjit] ASMJIT_DISABLE_TEXT requires ASMJIT_DISABLE_LOGGER to be defined."
#endif // ASMJIT_DISABLE_TEXT && !ASMJIT_DISABLE_LOGGER
#if defined(ASMJIT_DISABLE_TEXT) && !defined(ASMJIT_DISABLE_LOGGING)
# error "[asmjit] ASMJIT_DISABLE_TEXT requires ASMJIT_DISABLE_LOGGING to be defined."
#endif // ASMJIT_DISABLE_TEXT && !ASMJIT_DISABLE_LOGGING
// Detect ASMJIT_DEBUG and ASMJIT_RELEASE if not forced from outside.
#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE) && !defined(NDEBUG)
# define ASMJIT_DEBUG
#else
# define ASMJIT_RELEASE
#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE)
# if !defined(NDEBUG)
# define ASMJIT_DEBUG
# else
# define ASMJIT_RELEASE
# endif
#endif
// ASMJIT_EMBED implies ASMJIT_STATIC.
@@ -305,26 +307,34 @@
// [@CC{@]
// \def ASMJIT_CC_CLANG
// True if the detected C++ compiler is CLANG (contains normalized CLANG version).
// Non-zero if the detected C++ compiler is CLANG (contains normalized CLANG version).
//
// \def ASMJIT_CC_CODEGEAR
// True if the detected C++ compiler is CODEGEAR or BORLAND (version not normalized).
// Non-zero if the detected C++ compiler is CODEGEAR or BORLAND (version not normalized).
//
// \def ASMJIT_CC_INTEL
// Non-zero if the detected C++ compiler is INTEL (version not normalized).
//
// \def ASMJIT_CC_GCC
// True if the detected C++ compiler is GCC (contains normalized GCC version).
// Non-zero if the detected C++ compiler is GCC (contains normalized GCC version).
//
// \def ASMJIT_CC_MSC
// True if the detected C++ compiler is MSC (contains normalized MSC version).
// Non-zero if the detected C++ compiler is MSC (contains normalized MSC version).
//
// \def ASMJIT_CC_MINGW
// Defined to 32 or 64 in case this is a MINGW, otherwise 0.
// Non-zero if the detected C++ compiler is MINGW32 (set to 32) or MINGW64 (set to 64).
#define ASMJIT_CC_CLANG 0
#define ASMJIT_CC_CLANG 0
#define ASMJIT_CC_CODEGEAR 0
#define ASMJIT_CC_GCC 0
#define ASMJIT_CC_MSC 0
#define ASMJIT_CC_GCC 0
#define ASMJIT_CC_INTEL 0
#define ASMJIT_CC_MSC 0
#if defined(__CODEGEARC__)
// Intel masquerades as GCC, so check for it first.
#if defined(__INTEL_COMPILER)
# undef ASMJIT_CC_INTEL
# define ASMJIT_CC_INTEL __INTEL_COMPILER
#elif defined(__CODEGEARC__)
# undef ASMJIT_CC_CODEGEAR
# define ASMJIT_CC_CODEGEAR (__CODEGEARC__)
#elif defined(__BORLANDC__)
@@ -347,12 +357,27 @@
# error "[asmjit] Unable to detect the C/C++ compiler."
#endif
#if ASMJIT_CC_GCC && defined(__GXX_EXPERIMENTAL_CXX0X__)
# define ASMJIT_CC_GCC_CXX0X 1
#else
# define ASMJIT_CC_GCC_CXX0X 0
#if ASMJIT_CC_INTEL && (defined(__GNUC__) || defined(__clang__))
# define ASMJIT_CC_INTEL_COMPAT_MODE 1
# else
# define ASMJIT_CC_INTEL_COMPAT_MODE 0
#endif
#define ASMJIT_CC_CODEGEAR_EQ(x, y) (ASMJIT_CC_CODEGEAR == (((x) << 8) + (y)))
#define ASMJIT_CC_CODEGEAR_GE(x, y) (ASMJIT_CC_CODEGEAR >= (((x) << 8) + (y)))
#define ASMJIT_CC_CLANG_EQ(x, y, z) (ASMJIT_CC_CLANG == ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_CLANG_GE(x, y, z) (ASMJIT_CC_CLANG >= ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_GCC_EQ(x, y, z) (ASMJIT_CC_GCC == ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_GCC_GE(x, y, z) (ASMJIT_CC_GCC >= ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_INTEL_EQ(x, y) (ASMJIT_CC_INTEL == (((x) * 100) + (y)))
#define ASMJIT_CC_INTEL_GE(x, y) (ASMJIT_CC_INTEL >= (((x) * 100) + (y)))
#define ASMJIT_CC_MSC_EQ(x, y, z) (ASMJIT_CC_MSC == ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_MSC_GE(x, y, z) (ASMJIT_CC_MSC >= ((x) * 10000000 + (y) * 100000 + (z)))
#if defined(__MINGW64__)
# define ASMJIT_CC_MINGW 64
#elif defined(__MINGW32__)
@@ -361,55 +386,35 @@
# define ASMJIT_CC_MINGW 0
#endif
#define ASMJIT_CC_CODEGEAR_EQ(x, y, z) (ASMJIT_CC_CODEGEAR == (x << 8) + y)
#define ASMJIT_CC_CODEGEAR_GE(x, y, z) (ASMJIT_CC_CODEGEAR >= (x << 8) + y)
#if defined(__cplusplus)
# if __cplusplus >= 201103L
# define ASMJIT_CC_CXX_VERSION __cplusplus
# elif defined(__GXX_EXPERIMENTAL_CXX0X__) || ASMJIT_CC_MSC_GE(18, 0, 0) || ASMJIT_CC_INTEL_GE(14, 0)
# define ASMJIT_CC_CXX_VERSION 201103L
# else
# define ASMJIT_CC_CXX_VERSION 199711L
# endif
#endif
#define ASMJIT_CC_CLANG_EQ(x, y, z) (ASMJIT_CC_CLANG == x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_CLANG_GE(x, y, z) (ASMJIT_CC_CLANG >= x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_GCC_EQ(x, y, z) (ASMJIT_CC_GCC == x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_GCC_GE(x, y, z) (ASMJIT_CC_GCC >= x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_MSC_EQ(x, y, z) (ASMJIT_CC_MSC == x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_MSC_GE(x, y, z) (ASMJIT_CC_MSC >= x * 10000000 + y * 100000 + z)
#if !defined(ASMJIT_CC_CXX_VERSION)
# define ASMJIT_CC_CXX_VERSION 0
#endif
// [@CC}@]
// [@CC_FEATURES{@]
// \def ASMJIT_CC_HAS_NATIVE_CHAR
// True if the C++ compiler treats char as a native type.
//
// \def ASMJIT_CC_HAS_NATIVE_WCHAR_T
// True if the C++ compiler treats wchar_t as a native type.
//
// \def ASMJIT_CC_HAS_NATIVE_CHAR16_T
// True if the C++ compiler treats char16_t as a native type.
//
// \def ASMJIT_CC_HAS_NATIVE_CHAR32_T
// True if the C++ compiler treats char32_t as a native type.
//
// \def ASMJIT_CC_HAS_OVERRIDE
// True if the C++ compiler supports override keyword.
//
// \def ASMJIT_CC_HAS_NOEXCEPT
// True if the C++ compiler supports noexcept keyword.
#if ASMJIT_CC_CLANG
# define ASMJIT_CC_HAS_ATTRIBUTE (1)
# define ASMJIT_CC_HAS_BUILTIN (1)
# define ASMJIT_CC_HAS_DECLSPEC (0)
# define ASMJIT_CC_HAS_ALIGNAS (__has_extension(__cxx_alignas__))
# define ASMJIT_CC_HAS_ALIGNOF (__has_extension(__cxx_alignof__))
# define ASMJIT_CC_HAS_ASSUME (0)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (__has_attribute(__aligned__))
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(__always_inline__))
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (__has_attribute(__noinline__))
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (__has_attribute(__noreturn__))
# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (__has_attribute(__optimize__))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned))
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable))
# define ASMJIT_CC_HAS_ALIGNAS (__has_extension(__cxx_alignas__))
# define ASMJIT_CC_HAS_ALIGNOF (__has_extension(__cxx_alignof__))
# define ASMJIT_CC_HAS_CONSTEXPR (__has_extension(__cxx_constexpr__))
# define ASMJIT_CC_HAS_DECLTYPE (__has_extension(__cxx_decltype__))
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (__has_extension(__cxx_defaulted_functions__))
@@ -418,30 +423,25 @@
# define ASMJIT_CC_HAS_INITIALIZER_LIST (__has_extension(__cxx_generalized_initializers__))
# define ASMJIT_CC_HAS_LAMBDA (__has_extension(__cxx_lambdas__))
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (__has_extension(__cxx_unicode_literals__))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (__has_extension(__cxx_unicode_literals__))
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NOEXCEPT (__has_extension(__cxx_noexcept__))
# define ASMJIT_CC_HAS_NULLPTR (__has_extension(__cxx_nullptr__))
# define ASMJIT_CC_HAS_OVERRIDE (__has_extension(__cxx_override_control__))
# define ASMJIT_CC_HAS_RVALUE (__has_extension(__cxx_rvalue_references__))
# define ASMJIT_CC_HAS_STATIC_ASSERT (__has_extension(__cxx_static_assert__))
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (__has_extension(__cxx_variadic_templates__))
#endif
#if ASMJIT_CC_CODEGEAR
# define ASMJIT_CC_HAS_ATTRIBUTE (0)
# define ASMJIT_CC_HAS_BUILTIN (0)
# define ASMJIT_CC_HAS_DECLSPEC (1)
# define ASMJIT_CC_HAS_ALIGNAS (0)
# define ASMJIT_CC_HAS_ALIGNOF (0)
# define ASMJIT_CC_HAS_ASSUME (0)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_CONSTEXPR (0)
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_ALIGNAS (0)
# define ASMJIT_CC_HAS_ALIGNOF (0)
# define ASMJIT_CC_HAS_CONSTEXPR (0)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (0)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (0)
@@ -449,65 +449,94 @@
# define ASMJIT_CC_HAS_INITIALIZER_LIST (0)
# define ASMJIT_CC_HAS_LAMBDA (0)
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (0)
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (0)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NOEXCEPT (0)
# define ASMJIT_CC_HAS_NULLPTR (0)
# define ASMJIT_CC_HAS_OVERRIDE (0)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (0)
#endif
#if ASMJIT_CC_GCC
# define ASMJIT_CC_HAS_ATTRIBUTE (1)
# define ASMJIT_CC_HAS_BUILTIN (1)
# define ASMJIT_CC_HAS_DECLSPEC (0)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_ASSUME (0)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_GCC_GE(2, 7, 0))
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_GCC_GE(4, 4, 0) && !ASMJIT_CC_MINGW)
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_GCC_GE(3, 4, 0) && !ASMJIT_CC_MINGW)
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_GCC_GE(2, 5, 0))
# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (ASMJIT_CC_GCC_GE(4, 4, 0))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0)
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (ASMJIT_CC_GCC_GE(4, 7, 0))
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (1)
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
#endif
#if ASMJIT_CC_INTEL
# define ASMJIT_CC_HAS_ATTRIBUTE (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
# define ASMJIT_CC_HAS_ASSUME (1)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (1)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_INTEL >= 1500)
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_INTEL >= 1500)
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_INTEL >= 1200)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_INTEL >= 1200)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_INTEL >= 1200)
# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_INTEL >= 1200)
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_INTEL >= 1400 || (ASMJIT_CC_INTEL_COMPAT_MODE > 0 && ASMJIT_CC_INTEL >= 1206))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_INTEL >= 1400 || (ASMJIT_CC_INTEL_COMPAT_MODE > 0 && ASMJIT_CC_INTEL >= 1206))
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_INTEL >= 1206)
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_INTEL >= 1110)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_INTEL >= 1110)
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_INTEL >= 1206)
#endif
#if ASMJIT_CC_MSC
# define ASMJIT_CC_HAS_ATTRIBUTE (0)
# define ASMJIT_CC_HAS_BUILTIN (0)
# define ASMJIT_CC_HAS_DECLSPEC (1)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_ASSUME (1)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (1)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (1)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (1)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (1)
# define ASMJIT_CC_HAS_ASSUME (1)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0))
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0))
@@ -515,38 +544,74 @@
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_MSC_GE(18, 0, 0))
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_MSC_GE(19, 0, 0))
# if defined(_NATIVE_WCHAR_T_DEFINED)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# else
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (0)
# endif
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_MSC_GE(14, 0, 0))
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_MSC_GE(18, 0, 0))
#endif
#if !ASMJIT_CC_HAS_ATTRIBUTE
// Fixup some vendor specific keywords.
#if !defined(ASMJIT_CC_HAS_ASSUME)
# define ASMJIT_CC_HAS_ASSUME (0)
#endif
#if !defined(ASMJIT_CC_HAS_ASSUME_ALIGNED)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
#endif
// Fixup compilers that don't support '__attribute__'.
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE)
# define ASMJIT_CC_HAS_ATTRIBUTE (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE)
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_NORETURN)
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE)
# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (0)
#endif
#if !ASMJIT_CC_HAS_BUILTIN
// Fixup compilers that don't support '__builtin?'.
#if !defined(ASMJIT_CC_HAS_BUILTIN_ASSUME)
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0)
#endif
#if !defined(ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED)
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (0)
#endif
#if !defined(ASMJIT_CC_HAS_BUILTIN_EXPECT)
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (0)
#endif
#if !defined(ASMJIT_CC_HAS_BUILTIN_UNREACHABLE)
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (0)
#endif
#if !ASMJIT_CC_HAS_DECLSPEC
// Fixup compilers that don't support 'declspec'.
#if !defined(ASMJIT_CC_HAS_DECLSPEC_ALIGN)
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (0)
#endif
#if !defined(ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0)
#endif
#if !defined(ASMJIT_CC_HAS_DECLSPEC_NOINLINE)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0)
#endif
#if !defined(ASMJIT_CC_HAS_DECLSPEC_NORETURN)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (0)
#endif
// [@CC_FEATURES}@]
@@ -685,7 +750,7 @@
// [@CC_REGPARM{@]
// \def ASMJIT_REGPARM(n)
// A custom calling convention which passes n arguments in registers.
#if ASMJIT_ARCH_X86 && (ASMJIT_CC_GCC || ASMJIT_CC_CLANG)
#if ASMJIT_ARCH_X86 && ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_REGPARM(n) __attribute__((__regparm__(n)))
#else
# define ASMJIT_REGPARM(n)
@@ -742,7 +807,7 @@
//
// \def ASMJIT_UNLIKELY(exp)
// Expression exp is likely to be false.
#if ASMJIT_HAS_BUILTIN_EXPECT
#if ASMJIT_CC_HAS_BUILTIN_EXPECT
# define ASMJIT_LIKELY(exp) __builtin_expect(!!(exp), 1)
# define ASMJIT_UNLIKELY(exp) __builtin_expect(!!(exp), 0)
#else
@@ -810,13 +875,9 @@ typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
# endif
# endif
# define ASMJIT_INT64_C(x) (x##i64)
# define ASMJIT_UINT64_C(x) (x##ui64)
#else
# include <stdint.h>
# include <limits.h>
# define ASMJIT_INT64_C(x) (x##ll)
# define ASMJIT_UINT64_C(x) (x##ull)
#endif
// [@STDTYPES}@]
@@ -824,12 +885,14 @@ typedef unsigned __int64 uint64_t;
// [asmjit::Build - Dependencies]
// ============================================================================
#include <new>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <new>
#if ASMJIT_OS_POSIX
# include <pthread.h>
#endif // ASMJIT_OS_POSIX
@@ -840,88 +903,46 @@ typedef unsigned __int64 uint64_t;
// Build host architecture if no architecture is selected.
#if !defined(ASMJIT_BUILD_HOST) && \
!defined(ASMJIT_BUILD_X86) && \
!defined(ASMJIT_BUILD_X64)
!defined(ASMJIT_BUILD_X86) && \
!defined(ASMJIT_BUILD_ARM)
# define ASMJIT_BUILD_HOST
#endif
// Autodetect host architecture if enabled.
// Detect host architecture if building only for host.
#if defined(ASMJIT_BUILD_HOST)
# if ASMJIT_ARCH_X86 && !defined(ASMJIT_BUILD_X86)
# if (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && !defined(ASMJIT_BUILD_X86)
# define ASMJIT_BUILD_X86
# endif // ASMJIT_ARCH_X86 && !ASMJIT_BUILD_X86
# if ASMJIT_ARCH_X64 && !defined(ASMJIT_BUILD_X64)
# define ASMJIT_BUILD_X64
# endif // ASMJIT_ARCH_X64 && !ASMJIT_BUILD_X64
# endif // ASMJIT_ARCH_X86
#endif // ASMJIT_BUILD_HOST
#if defined(_MSC_VER) && _MSC_VER >= 1400
# define ASMJIT_ENUM(name) enum name : uint32_t
#if ASMJIT_CC_MSC
# define ASMJIT_UINT64_C(x) x##ui64
#else
# define ASMJIT_ENUM(name) enum name
# define ASMJIT_UINT64_C(x) x##ull
#endif
#if ASMJIT_ARCH_LE
# define _ASMJIT_ARCH_INDEX(total, index) (index)
# define ASMJIT_PACK32_4x8(A, B, C, D) ((A) + ((B) << 8) + ((C) << 16) + ((D) << 24))
#else
# define _ASMJIT_ARCH_INDEX(total, index) ((total) - 1 - (index))
# define ASMJIT_PACK32_4x8(A, B, C, D) ((D) + ((C) << 8) + ((B) << 16) + ((A) << 24))
#endif
#if !defined(ASMJIT_ALLOC) && !defined(ASMJIT_REALLOC) && !defined(ASMJIT_FREE)
# define ASMJIT_ALLOC(size) ::malloc(size)
# define ASMJIT_REALLOC(ptr, size) ::realloc(ptr, size)
# define ASMJIT_FREE(ptr) ::free(ptr)
#else
# if !defined(ASMJIT_ALLOC) || !defined(ASMJIT_REALLOC) || !defined(ASMJIT_FREE)
# error "[asmjit] You must provide ASMJIT_ALLOC, ASMJIT_REALLOC and ASMJIT_FREE."
# endif
#endif // !ASMJIT_ALLOC && !ASMJIT_REALLOC && !ASMJIT_FREE
#define ASMJIT_NO_COPY(...) \
private: \
ASMJIT_INLINE __VA_ARGS__(const __VA_ARGS__& other) ASMJIT_NOEXCEPT; \
ASMJIT_INLINE __VA_ARGS__& operator=(const __VA_ARGS__& other) ASMJIT_NOEXCEPT; \
public:
// ============================================================================
// [asmjit::Build - Relative Path]
// ============================================================================
namespace asmjit {
namespace DebugUtils {
// Workaround that is used to convert an absolute path to a relative one at
// a C macro level, used by asserts and tracing. This workaround is needed
// as some build systems always convert the source code files to use absolute
// paths. Please note that if absolute paths are used this doesn't remove them
// from the compiled binary and can be still considered a security risk.
enum {
kSourceRelativePathOffset = int(sizeof(__FILE__) - sizeof("asmjit/build.h"))
};
// ASMJIT_TRACE is only used by sources and private headers. It's safe to make
// it unavailable outside of AsmJit.
// Internal macros that are only used when building AsmJit itself.
#if defined(ASMJIT_EXPORTS)
static inline int disabledTrace(...) { return 0; }
# if defined(ASMJIT_TRACE)
# define ASMJIT_TSEC(section) section
# define ASMJIT_TLOG ::printf
# if !defined(ASMJIT_DEBUG) && ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE
# define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
# else
# define ASMJIT_TSEC(section) ASMJIT_NOP
# define ASMJIT_TLOG 0 && ::asmjit::DebugUtils::disabledTrace
# endif // ASMJIT_TRACE
# define ASMJIT_FAVOR_SIZE
# endif
#endif // ASMJIT_EXPORTS
} // DebugUtils namespace
} // asmjit namespace
// ============================================================================
// [asmjit::Build - Test]
// ============================================================================
// Include a unit testing package if this is a `asmjit_test` build.
#if defined(ASMJIT_TEST)
# include "../test/broken.h"
# include "../../test/broken.h"
#endif // ASMJIT_TEST
// [Guard]

View File

@@ -9,27 +9,25 @@
#define _ASMJIT_BASE_H
// [Dependencies]
#include "./build.h"
#include "./base/arch.h"
#include "./base/assembler.h"
#include "./base/codebuilder.h"
#include "./base/codecompiler.h"
#include "./base/codeemitter.h"
#include "./base/codeholder.h"
#include "./base/constpool.h"
#include "./base/containers.h"
#include "./base/cpuinfo.h"
#include "./base/func.h"
#include "./base/globals.h"
#include "./base/logger.h"
#include "./base/logging.h"
#include "./base/operand.h"
#include "./base/podvector.h"
#include "./base/osutils.h"
#include "./base/runtime.h"
#include "./base/simdtypes.h"
#include "./base/string.h"
#include "./base/utils.h"
#include "./base/vectypes.h"
#include "./base/vmem.h"
#include "./base/zone.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
#include "./base/compiler.h"
#include "./base/compilerfunc.h"
#include "./base/hlstream.h"
#endif // !ASMJIT_DISABLE_COMPILER
// [Guard]
#endif // _ASMJIT_BASE_H

161
src/asmjit/base/arch.cpp Normal file
View File

@@ -0,0 +1,161 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/arch.h"
#if defined(ASMJIT_BUILD_X86)
#include "../x86/x86operand.h"
#endif // ASMJIT_BUILD_X86
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::ArchInfo]
// ============================================================================
static const uint32_t archInfoTable[] = {
// <-------------+---------------------+-----------------------+-------+
// | Type | SubType | GPInfo|
// <-------------+---------------------+-----------------------+-------+
ASMJIT_PACK32_4x8(ArchInfo::kTypeNone , ArchInfo::kSubTypeNone, 0, 0),
ASMJIT_PACK32_4x8(ArchInfo::kTypeX86 , ArchInfo::kSubTypeNone, 4, 8),
ASMJIT_PACK32_4x8(ArchInfo::kTypeX64 , ArchInfo::kSubTypeNone, 8, 16),
ASMJIT_PACK32_4x8(ArchInfo::kTypeX32 , ArchInfo::kSubTypeNone, 8, 16),
ASMJIT_PACK32_4x8(ArchInfo::kTypeA32 , ArchInfo::kSubTypeNone, 4, 16),
ASMJIT_PACK32_4x8(ArchInfo::kTypeA64 , ArchInfo::kSubTypeNone, 8, 32)
};
ASMJIT_FAVOR_SIZE void ArchInfo::init(uint32_t type, uint32_t subType) noexcept {
uint32_t index = type < ASMJIT_ARRAY_SIZE(archInfoTable) ? type : uint32_t(0);
// Make sure the `archInfoTable` array is correctly indexed.
_signature = archInfoTable[index];
ASMJIT_ASSERT(_type == index);
// Even if the architecture is not known we setup its type and sub-type,
// however, such architecture is not really useful.
_type = type;
_subType = subType;
}
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t archType, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept {
uint32_t typeId = typeIdInOut;
// Zero the signature so it's clear in case that typeId is not invalid.
regInfo._signature = 0;
#if defined(ASMJIT_BUILD_X86)
if (ArchInfo::isX86Family(archType)) {
// Passed RegType instead of TypeId?
if (typeId <= Reg::kRegMax)
typeId = x86OpData.archRegs.regTypeToTypeId[typeId];
if (ASMJIT_UNLIKELY(!TypeId::isValid(typeId)))
return DebugUtils::errored(kErrorInvalidTypeId);
// First normalize architecture dependent types.
if (TypeId::isAbstract(typeId)) {
if (typeId == TypeId::kIntPtr)
typeId = (archType == ArchInfo::kTypeX86) ? TypeId::kI32 : TypeId::kI64;
else
typeId = (archType == ArchInfo::kTypeX86) ? TypeId::kU32 : TypeId::kU64;
}
// Type size helps to construct all kinds of registers. If the size is zero
// then the TypeId is invalid.
uint32_t size = TypeId::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size))
return DebugUtils::errored(kErrorInvalidTypeId);
if (ASMJIT_UNLIKELY(typeId == TypeId::kF80))
return DebugUtils::errored(kErrorInvalidUseOfF80);
uint32_t regType = 0;
switch (typeId) {
case TypeId::kI8:
case TypeId::kU8:
regType = X86Reg::kRegGpbLo;
break;
case TypeId::kI16:
case TypeId::kU16:
regType = X86Reg::kRegGpw;
break;
case TypeId::kI32:
case TypeId::kU32:
regType = X86Reg::kRegGpd;
break;
case TypeId::kI64:
case TypeId::kU64:
if (archType == ArchInfo::kTypeX86)
return DebugUtils::errored(kErrorInvalidUseOfGpq);
regType = X86Reg::kRegGpq;
break;
// F32 and F64 are always promoted to use vector registers.
case TypeId::kF32:
typeId = TypeId::kF32x1;
regType = X86Reg::kRegXmm;
break;
case TypeId::kF64:
typeId = TypeId::kF64x1;
regType = X86Reg::kRegXmm;
break;
// Mask registers {k}.
case TypeId::kMask8:
case TypeId::kMask16:
case TypeId::kMask32:
case TypeId::kMask64:
regType = X86Reg::kRegK;
break;
// MMX registers.
case TypeId::kMmx32:
case TypeId::kMmx64:
regType = X86Reg::kRegMm;
break;
// XMM|YMM|ZMM registers.
default:
if (size <= 16)
regType = X86Reg::kRegXmm;
else if (size == 32)
regType = X86Reg::kRegYmm;
else
regType = X86Reg::kRegZmm;
break;
}
typeIdInOut = typeId;
regInfo._signature = x86OpData.archRegs.regInfo[regType].getSignature();
return kErrorOk;
}
#endif // ASMJIT_BUILD_X86
return DebugUtils::errored(kErrorInvalidArch);
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

199
src/asmjit/base/arch.h Normal file
View File

@@ -0,0 +1,199 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_ARCH_H
#define _ASMJIT_BASE_ARCH_H
// [Dependencies]
#include "../base/globals.h"
#include "../base/operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::ArchInfo]
// ============================================================================
class ArchInfo {
public:
//! Architecture type.
ASMJIT_ENUM(Type) {
kTypeNone = 0, //!< No/Unknown architecture.
// X86 architectures.
kTypeX86 = 1, //!< X86 architecture (32-bit).
kTypeX64 = 2, //!< X64 architecture (64-bit) (AMD64).
kTypeX32 = 3, //!< X32 architecture (DEAD-END).
// ARM architectures.
kTypeA32 = 4, //!< ARM 32-bit architecture (AArch32/ARM/THUMB).
kTypeA64 = 5, //!< ARM 64-bit architecture (AArch64).
//! Architecture detected at compile-time (architecture of the host).
kTypeHost = ASMJIT_ARCH_X86 ? kTypeX86 :
ASMJIT_ARCH_X64 ? kTypeX64 :
ASMJIT_ARCH_ARM32 ? kTypeA32 :
ASMJIT_ARCH_ARM64 ? kTypeA64 : kTypeNone
};
//! Architecture sub-type or execution mode.
ASMJIT_ENUM(SubType) {
kSubTypeNone = 0, //!< Default mode (or no specific mode).
// X86 sub-types.
kSubTypeX86_AVX = 1, //!< Code generation uses AVX by default (VEC instructions).
kSubTypeX86_AVX2 = 2, //!< Code generation uses AVX2 by default (VEC instructions).
kSubTypeX86_AVX512 = 3, //!< Code generation uses AVX-512F by default (+32 vector regs).
kSubTypeX86_AVX512VL = 4, //!< Code generation uses AVX-512F-VL by default (+VL extensions).
// ARM sub-types.
kSubTypeA32_Thumb = 8, //!< THUMB|THUMB2 sub-type (only ARM in 32-bit mode).
#if (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX512VL__)
kSubTypeHost = kSubTypeX86_AVX512VL
#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX512F__)
kSubTypeHost = kSubTypeX86_AVX512
#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX2__)
kSubTypeHost = kSubTypeX86_AVX2
#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX__)
kSubTypeHost = kSubTypeX86_AVX
#elif (ASMJIT_ARCH_ARM32) && (defined(_M_ARMT) || defined(__thumb__) || defined(__thumb2__))
kSubTypeHost = kSubTypeA32_Thumb
#else
kSubTypeHost = 0
#endif
};
// --------------------------------------------------------------------------
// [Utilities]
// --------------------------------------------------------------------------
static ASMJIT_INLINE bool isX86Family(uint32_t archType) noexcept { return archType >= kTypeX86 && archType <= kTypeX32; }
static ASMJIT_INLINE bool isArmFamily(uint32_t archType) noexcept { return archType >= kTypeA32 && archType <= kTypeA64; }
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE ArchInfo() noexcept : _signature(0) {}
ASMJIT_INLINE ArchInfo(const ArchInfo& other) noexcept : _signature(other._signature) {}
explicit ASMJIT_INLINE ArchInfo(uint32_t type, uint32_t subType = kSubTypeNone) noexcept { init(type, subType); }
ASMJIT_INLINE static ArchInfo host() noexcept { return ArchInfo(kTypeHost, kSubTypeHost); }
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isInitialized() const noexcept { return _type != kTypeNone; }
ASMJIT_API void init(uint32_t type, uint32_t subType = kSubTypeNone) noexcept;
ASMJIT_INLINE void reset() noexcept { _signature = 0; }
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get if the architecture is 32-bit.
ASMJIT_INLINE bool is32Bit() const noexcept { return _gpSize == 4; }
//! Get if the architecture is 64-bit.
ASMJIT_INLINE bool is64Bit() const noexcept { return _gpSize == 8; }
//! Get architecture type, see \ref Type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
//! Get architecture sub-type, see \ref SubType.
//!
//! X86 & X64
//! ---------
//!
//! Architecture subtype describe the highest instruction-set level that can
//! be used.
//!
//! ARM32
//! -----
//!
//! Architecture mode means the instruction encoding to be used when generating
//! machine code, thus mode can be used to force generation of THUMB and THUMB2
//! encoding or regular ARM encoding.
//!
//! ARM64
//! -----
//!
//! No meaning yet.
ASMJIT_INLINE uint32_t getSubType() const noexcept { return _subType; }
//! Get if the architecture is X86, X64, or X32.
ASMJIT_INLINE bool isX86Family() const noexcept { return isX86Family(_type); }
//! Get if the architecture is ARM32 or ARM64.
ASMJIT_INLINE bool isArmFamily() const noexcept { return isArmFamily(_type); }
//! Get a size of a general-purpose register.
ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _gpSize; }
//! Get number of general-purpose registers.
ASMJIT_INLINE uint32_t getGpCount() const noexcept { return _gpCount; }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE const ArchInfo& operator=(const ArchInfo& other) noexcept { _signature = other._signature; return *this; }
ASMJIT_INLINE bool operator==(const ArchInfo& other) const noexcept { return _signature == other._signature; }
ASMJIT_INLINE bool operator!=(const ArchInfo& other) const noexcept { return _signature != other._signature; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
uint8_t _type; //!< Architecture type.
uint8_t _subType; //!< Architecture sub-type.
uint8_t _gpSize; //!< Default size of a general purpose register.
uint8_t _gpCount; //!< Count of all general purpose registers.
};
uint32_t _signature; //!< Architecture signature (32-bit int).
};
};
// ============================================================================
// [asmjit::ArchRegs]
// ============================================================================
//! Information about all architecture registers.
struct ArchRegs {
//! Register information and signatures indexed by \ref Reg::Type.
RegInfo regInfo[Reg::kRegMax + 1];
//! Count (maximum) of registers per \ref Reg::Type.
uint8_t regCount[Reg::kRegMax + 1];
//! Converts RegType to TypeId, see \ref TypeId::Id.
uint8_t regTypeToTypeId[Reg::kRegMax + 1];
};
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
struct ArchUtils {
ASMJIT_API static Error typeIdToRegInfo(uint32_t archType, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_ARCH_H

View File

@@ -9,495 +9,379 @@
// [Dependencies]
#include "../base/assembler.h"
#include "../base/constpool.h"
#include "../base/utils.h"
#include "../base/vmem.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
ErrorHandler* ErrorHandler::addRef() const noexcept {
return const_cast<ErrorHandler*>(this);
}
void ErrorHandler::release() noexcept {}
// ============================================================================
// [asmjit::ExternalTool]
// ============================================================================
ExternalTool::ExternalTool() noexcept
: _assembler(nullptr),
_exId(0),
_arch(kArchNone),
_regSize(0),
_finalized(false),
_reserved(0),
_lastError(kErrorNotInitialized) {}
ExternalTool::~ExternalTool() noexcept {}
Error ExternalTool::setLastError(Error error, const char* message) noexcept {
// Special case, reset the last error the error is `kErrorOk`.
if (error == kErrorOk) {
_lastError = kErrorOk;
return kErrorOk;
}
// Don't do anything if the code-generator doesn't have associated assembler.
Assembler* assembler = getAssembler();
if (assembler == nullptr)
return error;
if (message == nullptr)
message = DebugUtils::errorAsString(error);
// Logging is skipped if the error is handled by `ErrorHandler.
ErrorHandler* eh = assembler->getErrorHandler();
ASMJIT_TLOG("[ERROR (ExternalTool)] %s (0x%0.8u) %s\n", message,
static_cast<unsigned int>(error),
!eh ? "(Possibly unhandled?)" : "");
if (eh != nullptr && eh->handleError(error, message, this))
return error;
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = assembler->getLogger();
if (logger != nullptr)
logger->logFormat(Logger::kStyleComment,
"*** ERROR (ExternalTool): %s (0x%0.8u).\n", message,
static_cast<unsigned int>(error));
#endif // !ASMJIT_DISABLE_LOGGER
// The handler->handleError() function may throw an exception or longjmp()
// to terminate the execution of `setLastError()`. This is the reason why
// we have delayed changing the `_error` member until now.
_lastError = error;
return error;
}
// ============================================================================
// [asmjit::Assembler - Construction / Destruction]
// ============================================================================
Assembler::Assembler(Runtime* runtime) noexcept
: _runtime(runtime),
_logger(nullptr),
_errorHandler(nullptr),
_arch(kArchNone),
_regSize(0),
_reserved(0),
_asmOptions(0),
_instOptions(0),
_lastError(runtime ? kErrorOk : kErrorNotInitialized),
_exIdGenerator(0),
_exCountAttached(0),
_zoneAllocator(8192 - Zone::kZoneOverhead),
_buffer(nullptr),
_end(nullptr),
_cursor(nullptr),
_trampolinesSize(0),
_comment(nullptr),
_unusedLinks(nullptr),
_labels(),
_relocations() {}
Assembler::Assembler() noexcept
: CodeEmitter(kTypeAssembler),
_section(nullptr),
_bufferData(nullptr),
_bufferEnd(nullptr),
_bufferPtr(nullptr) {}
Assembler::~Assembler() noexcept {
reset(true);
if (_errorHandler != nullptr)
_errorHandler->release();
if (_code) sync();
}
// ============================================================================
// [asmjit::Assembler - Reset]
// [asmjit::Assembler - Events]
// ============================================================================
void Assembler::reset(bool releaseMemory) noexcept {
_asmOptions = 0;
_instOptions = 0;
_lastError = kErrorOk;
_exIdGenerator = 0;
_exCountAttached = 0;
Error Assembler::onAttach(CodeHolder* code) noexcept {
// Attach to the end of the .text section.
_section = code->_sections[0];
uint8_t* p = _section->_buffer._data;
_zoneAllocator.reset(releaseMemory);
_bufferData = p;
_bufferEnd = p + _section->_buffer._capacity;
_bufferPtr = p + _section->_buffer._length;
return Base::onAttach(code);
}
if (releaseMemory && _buffer != nullptr) {
ASMJIT_FREE(_buffer);
_buffer = nullptr;
_end = nullptr;
}
_cursor = _buffer;
_trampolinesSize = 0;
_comment = nullptr;
_unusedLinks = nullptr;
_sections.reset(releaseMemory);
_labels.reset(releaseMemory);
_relocations.reset(releaseMemory);
Error Assembler::onDetach(CodeHolder* code) noexcept {
_section = nullptr;
_bufferData = nullptr;
_bufferEnd = nullptr;
_bufferPtr = nullptr;
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::Assembler - Logging & Error Handling]
// [asmjit::Assembler - Sync]
// ============================================================================
Error Assembler::setLastError(Error error, const char* message) noexcept {
// Special case, reset the last error the error is `kErrorOk`.
if (error == kErrorOk) {
_lastError = kErrorOk;
return kErrorOk;
}
void Assembler::sync() noexcept {
ASMJIT_ASSERT(_code != nullptr); // Only called by CodeHolder, so we must be attached.
ASMJIT_ASSERT(_section != nullptr); // One section must always be active, no matter what.
ASMJIT_ASSERT(_bufferData == _section->_buffer._data); // `_bufferStart` is a shortcut to `_section->buffer.data`.
if (message == nullptr)
message = DebugUtils::errorAsString(error);
// Logging is skipped if the error is handled by `ErrorHandler`.
ErrorHandler* eh = _errorHandler;
ASMJIT_TLOG("[ERROR (Assembler)] %s (0x%0.8u) %s\n", message,
static_cast<unsigned int>(error),
!eh ? "(Possibly unhandled?)" : "");
if (eh != nullptr && eh->handleError(error, message, this))
return error;
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = _logger;
if (logger != nullptr)
logger->logFormat(Logger::kStyleComment,
"*** ERROR (Assembler): %s (0x%0.8u).\n", message,
static_cast<unsigned int>(error));
#endif // !ASMJIT_DISABLE_LOGGER
// The handler->handleError() function may throw an exception or longjmp()
// to terminate the execution of `setLastError()`. This is the reason why
// we have delayed changing the `_error` member until now.
_lastError = error;
return error;
// Update only if the current offset is greater than the section length.
size_t offset = (size_t)(_bufferPtr - _bufferData);
if (_section->getBuffer().getLength() < offset)
_section->_buffer._length = offset;
}
Error Assembler::setErrorHandler(ErrorHandler* handler) noexcept {
ErrorHandler* oldHandler = _errorHandler;
// ============================================================================
// [asmjit::Assembler - Code-Buffer]
// ============================================================================
if (oldHandler != nullptr)
oldHandler->release();
Error Assembler::setOffset(size_t offset) {
if (_lastError) return _lastError;
if (handler != nullptr)
handler = handler->addRef();
size_t length = std::max(_section->getBuffer().getLength(), getOffset());
if (ASMJIT_UNLIKELY(offset > length))
return setLastError(DebugUtils::errored(kErrorInvalidArgument));
_errorHandler = handler;
// If the `Assembler` generated any code the `_bufferPtr` may be higher than
// the section length stored in `CodeHolder` as it doesn't update it each
// time it generates machine code. This is the same as calling `sync()`.
if (_section->_buffer._length < length)
_section->_buffer._length = length;
_bufferPtr = _bufferData + offset;
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Buffer]
// [asmjit::Assembler - Comment]
// ============================================================================
Error Assembler::_grow(size_t n) noexcept {
size_t capacity = getCapacity();
size_t after = getOffset() + n;
Error Assembler::comment(const char* s, size_t len) {
if (_lastError) return _lastError;
// Overflow.
if (n > IntTraits<uintptr_t>::maxValue() - capacity)
return setLastError(kErrorNoHeapMemory);
// Grow is called when allocation is needed, so it shouldn't happen, but on
// the other hand it is simple to catch and it's not an error.
if (after <= capacity)
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled) {
Logger* logger = _code->getLogger();
logger->log(s, len);
logger->log("\n", 1);
return kErrorOk;
if (capacity < kMemAllocOverhead)
capacity = kMemAllocOverhead;
else
capacity += kMemAllocOverhead;
do {
size_t oldCapacity = capacity;
if (capacity < kMemAllocGrowMax)
capacity *= 2;
else
capacity += kMemAllocGrowMax;
// Overflow.
if (oldCapacity > capacity)
return setLastError(kErrorNoHeapMemory);
} while (capacity - kMemAllocOverhead < after);
capacity -= kMemAllocOverhead;
return _reserve(capacity);
}
Error Assembler::_reserve(size_t n) noexcept {
size_t capacity = getCapacity();
if (n <= capacity)
return kErrorOk;
uint8_t* newBuffer;
if (_buffer == nullptr)
newBuffer = static_cast<uint8_t*>(ASMJIT_ALLOC(n));
else
newBuffer = static_cast<uint8_t*>(ASMJIT_REALLOC(_buffer, n));
if (newBuffer == nullptr)
return setLastError(kErrorNoHeapMemory);
size_t offset = getOffset();
_buffer = newBuffer;
_end = _buffer + n;
_cursor = newBuffer + offset;
}
#else
ASMJIT_UNUSED(s);
ASMJIT_UNUSED(len);
#endif
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Label]
// [asmjit::Assembler - Building Blocks]
// ============================================================================
Error Assembler::_newLabelId() noexcept {
LabelData* data = _zoneAllocator.allocT<LabelData>();
data->offset = -1;
data->links = nullptr;
data->exId = 0;
data->exData = nullptr;
uint32_t id = OperandUtil::makeLabelId(static_cast<uint32_t>(_labels.getLength()));
Error error = _labels.append(data);
if (error != kErrorOk) {
setLastError(kErrorNoHeapMemory);
return kInvalidValue;
Label Assembler::newLabel() {
uint32_t id = 0;
if (!_lastError) {
ASMJIT_ASSERT(_code != nullptr);
Error err = _code->newLabelId(id);
if (ASMJIT_UNLIKELY(err)) setLastError(err);
}
return id;
return Label(id);
}
LabelLink* Assembler::_newLabelLink() noexcept {
LabelLink* link = _unusedLinks;
if (link) {
_unusedLinks = link->prev;
Label Assembler::newNamedLabel(const char* name, size_t nameLength, uint32_t type, uint32_t parentId) {
uint32_t id = 0;
if (!_lastError) {
ASMJIT_ASSERT(_code != nullptr);
Error err = _code->newNamedLabelId(id, name, nameLength, type, parentId);
if (ASMJIT_UNLIKELY(err)) setLastError(err);
}
else {
link = _zoneAllocator.allocT<LabelLink>();
if (link == nullptr)
return nullptr;
}
link->prev = nullptr;
link->offset = 0;
link->displacement = 0;
link->relocId = -1;
return link;
return Label(id);
}
Error Assembler::bind(const Label& label) noexcept {
// Get label data based on label id.
uint32_t index = label.getId();
LabelData* data = getLabelData(index);
Error Assembler::bind(const Label& label) {
if (_lastError) return _lastError;
ASMJIT_ASSERT(_code != nullptr);
LabelEntry* le = _code->getLabelEntry(label);
if (ASMJIT_UNLIKELY(!le))
return setLastError(DebugUtils::errored(kErrorInvalidLabel));
// Label can be bound only once.
if (data->offset != -1)
return setLastError(kErrorLabelAlreadyBound);
if (ASMJIT_UNLIKELY(le->isBound()))
return setLastError(DebugUtils::errored(kErrorLabelAlreadyBound));
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger) {
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled) {
StringBuilderTmp<256> sb;
sb.setFormat("L%u:", index);
sb.setFormat("L%u:", Operand::unpackId(label.getId()));
size_t binSize = 0;
if (!_logger->hasOption(Logger::kOptionBinaryForm))
binSize = kInvalidIndex;
if (!_code->_logger->hasOption(Logger::kOptionBinaryForm))
binSize = Globals::kInvalidIndex;
LogUtil::formatLine(sb, nullptr, binSize, 0, 0, _comment);
_logger->logString(Logger::kStyleLabel, sb.getData(), sb.getLength());
Logging::formatLine(sb, nullptr, binSize, 0, 0, getInlineComment());
_code->_logger->log(sb.getData(), sb.getLength());
}
#endif // !ASMJIT_DISABLE_LOGGER
#endif // !ASMJIT_DISABLE_LOGGING
Error error = kErrorOk;
Error err = kErrorOk;
size_t pos = getOffset();
LabelLink* link = data->links;
LabelLink* link = le->_links;
LabelLink* prev = nullptr;
while (link) {
intptr_t offset = link->offset;
uint32_t relocId = link->relocId;
if (link->relocId != -1) {
// Handle RelocData - We have to update RelocData information instead of
// patching the displacement in LabelData.
_relocations[link->relocId].data += static_cast<Ptr>(pos);
if (relocId != RelocEntry::kInvalidId) {
// Adjust relocation data.
RelocEntry* re = _code->_relocations[relocId];
re->_data += static_cast<uint64_t>(pos);
}
else {
// Not using relocId, this means that we are overwriting a real
// displacement in the binary stream.
// displacement in the CodeBuffer.
int32_t patchedValue = static_cast<int32_t>(
static_cast<intptr_t>(pos) - offset + link->displacement);
static_cast<intptr_t>(pos) - offset + link->rel);
// Size of the value we are going to patch. Only BYTE/DWORD is allowed.
uint32_t size = readU8At(offset);
ASMJIT_ASSERT(size == 1 || size == 4);
if (size == 4) {
writeI32At(offset, patchedValue);
}
else {
ASMJIT_ASSERT(size == 1);
if (Utils::isInt8(patchedValue))
writeU8At(offset, static_cast<uint32_t>(patchedValue) & 0xFF);
else
error = kErrorIllegalDisplacement;
}
uint32_t size = _bufferData[offset];
if (size == 4)
Utils::writeI32u(_bufferData + offset, static_cast<int32_t>(patchedValue));
else if (size == 1 && Utils::isInt8(patchedValue))
_bufferData[offset] = static_cast<uint8_t>(patchedValue & 0xFF);
else
err = DebugUtils::errored(kErrorInvalidDisplacement);
}
prev = link->prev;
_code->_unresolvedLabelsCount--;
_code->_baseHeap.release(link, sizeof(LabelLink));
link = prev;
}
// Chain unused links.
link = data->links;
if (link) {
if (prev == nullptr)
prev = link;
// Set as bound.
le->_sectionId = _section->getId();
le->_offset = pos;
le->_links = nullptr;
resetInlineComment();
prev->prev = _unusedLinks;
_unusedLinks = link;
}
// Set as bound (offset is zero or greater and no links).
data->offset = pos;
data->links = nullptr;
if (error != kErrorOk)
return setLastError(error);
_comment = nullptr;
return error;
}
// ============================================================================
// [asmjit::Assembler - Embed]
// ============================================================================
Error Assembler::embed(const void* data, uint32_t size) noexcept {
if (getRemainingSpace() < size) {
Error error = _grow(size);
if (error != kErrorOk)
return setLastError(error);
}
uint8_t* cursor = getCursor();
::memcpy(cursor, data, size);
setCursor(cursor + size);
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logBinary(Logger::kStyleData, data, size);
#endif // !ASMJIT_DISABLE_LOGGER
if (err != kErrorOk)
return setLastError(err);
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Reloc]
// ============================================================================
Error Assembler::embed(const void* data, uint32_t size) {
if (_lastError) return _lastError;
size_t Assembler::relocCode(void* dst, Ptr baseAddress) const noexcept {
if (baseAddress == kNoBaseAddress)
baseAddress = static_cast<Ptr>((uintptr_t)dst);
return _relocCode(dst, baseAddress);
if (getRemainingSpace() < size) {
Error err = _code->growBuffer(&_section->_buffer, size);
if (ASMJIT_UNLIKELY(err != kErrorOk)) return setLastError(err);
}
::memcpy(_bufferPtr, data, size);
_bufferPtr += size;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled)
_code->_logger->logBinary(data, size);
#endif // !ASMJIT_DISABLE_LOGGING
return kErrorOk;
}
Error Assembler::embedLabel(const Label& label) {
if (_lastError) return _lastError;
ASMJIT_ASSERT(_code != nullptr);
RelocEntry* re;
LabelEntry* le = _code->getLabelEntry(label);
if (ASMJIT_UNLIKELY(!le))
return setLastError(DebugUtils::errored(kErrorInvalidLabel));
Error err;
uint32_t gpSize = getGpSize();
if (getRemainingSpace() < gpSize) {
err = _code->growBuffer(&_section->_buffer, gpSize);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
}
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled)
_code->_logger->logf(gpSize == 4 ? ".dd L%u\n" : ".dq L%u\n", Operand::unpackId(label.getId()));
#endif // !ASMJIT_DISABLE_LOGGING
err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, gpSize);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
re->_sourceSectionId = _section->getId();
re->_sourceOffset = static_cast<uint64_t>(getOffset());
if (le->isBound()) {
re->_targetSectionId = le->getSectionId();
re->_data = static_cast<uint64_t>(static_cast<int64_t>(le->getOffset()));
}
else {
LabelLink* link = _code->newLabelLink(le, _section->getId(), getOffset(), 0);
if (ASMJIT_UNLIKELY(!link))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
link->relocId = re->getId();
}
// Emit dummy DWORD/QWORD depending on the address size.
::memset(_bufferPtr, 0, gpSize);
_bufferPtr += gpSize;
return kErrorOk;
}
Error Assembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (_lastError) return _lastError;
if (!isLabelValid(label))
return DebugUtils::errored(kErrorInvalidLabel);
ASMJIT_PROPAGATE(align(kAlignData, static_cast<uint32_t>(pool.getAlignment())));
ASMJIT_PROPAGATE(bind(label));
size_t size = pool.getSize();
if (getRemainingSpace() < size) {
Error err = _code->growBuffer(&_section->_buffer, size);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
}
uint8_t* p = _bufferPtr;
pool.fill(p);
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled)
_code->_logger->logBinary(p, size);
#endif // !ASMJIT_DISABLE_LOGGING
_bufferPtr += size;
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Make]
// [asmjit::Assembler - Emit-Helpers]
// ============================================================================
void* Assembler::make() noexcept {
// Do nothing on error condition or if no instruction has been emitted.
if (_lastError != kErrorOk || getCodeSize() == 0)
return nullptr;
#if !defined(ASMJIT_DISABLE_LOGGING)
void Assembler::_emitLog(
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3,
uint32_t relSize, uint32_t imLen, uint8_t* afterCursor) {
void* p;
Error error = _runtime->add(&p, this);
Logger* logger = _code->getLogger();
ASMJIT_ASSERT(logger != nullptr);
ASMJIT_ASSERT(options & CodeEmitter::kOptionLoggingEnabled);
if (error != kErrorOk)
setLastError(error);
StringBuilderTmp<256> sb;
uint32_t logOptions = logger->getOptions();
return p;
uint8_t* beforeCursor = _bufferPtr;
intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
sb.appendString(logger->getIndentation());
Operand_ opArray[6];
opArray[0].copyFrom(o0);
opArray[1].copyFrom(o1);
opArray[2].copyFrom(o2);
opArray[3].copyFrom(o3);
opArray[4].copyFrom(_op4);
opArray[5].copyFrom(_op5);
if (!(options & CodeEmitter::kOptionOp4)) opArray[4].reset();
if (!(options & CodeEmitter::kOptionOp5)) opArray[5].reset();
Logging::formatInstruction(
sb, logOptions,
this, getArchType(),
instId, options, _opExtra, opArray, 6);
if ((logOptions & Logger::kOptionBinaryForm) != 0)
Logging::formatLine(sb, _bufferPtr, emittedSize, relSize, imLen, getInlineComment());
else
Logging::formatLine(sb, nullptr, Globals::kInvalidIndex, 0, 0, getInlineComment());
logger->log(sb.getData(), sb.getLength());
}
// ============================================================================
// [asmjit::Assembler - Emit (Helpers)]
// ============================================================================
Error Assembler::_emitFailed(
Error err,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
#define NA noOperand
StringBuilderTmp<256> sb;
sb.appendString(DebugUtils::errorAsString(err));
sb.appendString(": ");
Error Assembler::emit(uint32_t code) {
return _emit(code, NA, NA, NA, NA);
Operand_ opArray[6];
opArray[0].copyFrom(o0);
opArray[1].copyFrom(o1);
opArray[2].copyFrom(o2);
opArray[3].copyFrom(o3);
opArray[4].copyFrom(_op4);
opArray[5].copyFrom(_op5);
if (!(options & CodeEmitter::kOptionOp4)) opArray[4].reset();
if (!(options & CodeEmitter::kOptionOp5)) opArray[5].reset();
Logging::formatInstruction(
sb, 0,
this, getArchType(),
instId, options, _opExtra, opArray, 6);
resetOptions();
resetInlineComment();
return setLastError(err, sb.getData());
}
Error Assembler::emit(uint32_t code, const Operand& o0) {
return _emit(code, o0, NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1) {
return _emit(code, o0, o1, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) {
return _emit(code, o0, o1, o2, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) {
return _emit(code, o0, o1, o2, o3);
}
Error Assembler::emit(uint32_t code, int o0) {
return _emit(code, Imm(o0), NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, int o1) {
return _emit(code, o0, Imm(o1), NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2) {
return _emit(code, o0, o1, Imm(o2), NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3) {
return _emit(code, o0, o1, o2, Imm(o3));
}
Error Assembler::emit(uint32_t code, int64_t o0) {
return _emit(code, Imm(o0), NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, int64_t o1) {
return _emit(code, o0, Imm(o1), NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, int64_t o2) {
return _emit(code, o0, o1, Imm(o2), NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int64_t o3) {
return _emit(code, o0, o1, o2, Imm(o3));
}
#undef NA
#endif
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,605 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_BUILDER)
// [Dependencies]
#include "../base/codebuilder.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CodeBuilder - Construction / Destruction]
// ============================================================================
CodeBuilder::CodeBuilder() noexcept
: CodeEmitter(kTypeBuilder),
_cbBaseZone(32768 - Zone::kZoneOverhead),
_cbDataZone(16384 - Zone::kZoneOverhead),
_cbPassZone(32768 - Zone::kZoneOverhead),
_cbHeap(&_cbBaseZone),
_cbPasses(),
_cbLabels(),
_position(0),
_nodeFlags(0),
_firstNode(nullptr),
_lastNode(nullptr),
_cursor(nullptr) {}
CodeBuilder::~CodeBuilder() noexcept {}
// ============================================================================
// [asmjit::CodeBuilder - Events]
// ============================================================================
Error CodeBuilder::onAttach(CodeHolder* code) noexcept {
return Base::onAttach(code);
}
Error CodeBuilder::onDetach(CodeHolder* code) noexcept {
_cbPasses.reset();
_cbLabels.reset();
_cbHeap.reset(&_cbBaseZone);
_cbBaseZone.reset(false);
_cbDataZone.reset(false);
_cbPassZone.reset(false);
_position = 0;
_nodeFlags = 0;
_firstNode = nullptr;
_lastNode = nullptr;
_cursor = nullptr;
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::CodeBuilder - Node-Factory]
// ============================================================================
Error CodeBuilder::getCBLabel(CBLabel** pOut, uint32_t id) noexcept {
if (_lastError) return _lastError;
ASMJIT_ASSERT(_code != nullptr);
size_t index = Operand::unpackId(id);
if (ASMJIT_UNLIKELY(index >= _code->getLabelsCount()))
return DebugUtils::errored(kErrorInvalidLabel);
if (index >= _cbLabels.getLength())
ASMJIT_PROPAGATE(_cbLabels.resize(&_cbHeap, index + 1));
CBLabel* node = _cbLabels[index];
if (!node) {
node = newNodeT<CBLabel>(id);
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorNoHeapMemory);
_cbLabels[index] = node;
}
*pOut = node;
return kErrorOk;
}
Error CodeBuilder::registerLabelNode(CBLabel* node) noexcept {
if (_lastError) return _lastError;
ASMJIT_ASSERT(_code != nullptr);
// Don't call setLastError() from here, we are noexcept and we are called
// by `newLabelNode()` and `newFuncNode()`, which are noexcept as well.
uint32_t id;
ASMJIT_PROPAGATE(_code->newLabelId(id));
size_t index = Operand::unpackId(id);
// We just added one label so it must be true.
ASMJIT_ASSERT(_cbLabels.getLength() < index + 1);
ASMJIT_PROPAGATE(_cbLabels.resize(&_cbHeap, index + 1));
_cbLabels[index] = node;
node->_id = id;
return kErrorOk;
}
CBLabel* CodeBuilder::newLabelNode() noexcept {
CBLabel* node = newNodeT<CBLabel>();
if (!node || registerLabelNode(node) != kErrorOk)
return nullptr;
return node;
}
CBAlign* CodeBuilder::newAlignNode(uint32_t mode, uint32_t alignment) noexcept {
return newNodeT<CBAlign>(mode, alignment);
}
CBData* CodeBuilder::newDataNode(const void* data, uint32_t size) noexcept {
if (size > CBData::kInlineBufferSize) {
void* cloned = _cbDataZone.alloc(size);
if (!cloned) return nullptr;
if (data) ::memcpy(cloned, data, size);
data = cloned;
}
return newNodeT<CBData>(const_cast<void*>(data), size);
}
CBConstPool* CodeBuilder::newConstPool() noexcept {
CBConstPool* node = newNodeT<CBConstPool>();
if (!node || registerLabelNode(node) != kErrorOk)
return nullptr;
return node;
}
CBComment* CodeBuilder::newCommentNode(const char* s, size_t len) noexcept {
if (s) {
if (len == Globals::kInvalidIndex) len = ::strlen(s);
if (len > 0) {
s = static_cast<char*>(_cbDataZone.dup(s, len, true));
if (!s) return nullptr;
}
}
return newNodeT<CBComment>(s);
}
// ============================================================================
// [asmjit::CodeBuilder - Code-Emitter]
// ============================================================================
Label CodeBuilder::newLabel() {
uint32_t id = kInvalidValue;
if (!_lastError) {
CBLabel* node = newNodeT<CBLabel>(id);
if (ASMJIT_UNLIKELY(!node)) {
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
else {
Error err = registerLabelNode(node);
if (ASMJIT_UNLIKELY(err))
setLastError(err);
else
id = node->getId();
}
}
return Label(id);
}
Label CodeBuilder::newNamedLabel(const char* name, size_t nameLength, uint32_t type, uint32_t parentId) {
uint32_t id = kInvalidValue;
if (!_lastError) {
CBLabel* node = newNodeT<CBLabel>(id);
if (ASMJIT_UNLIKELY(!node)) {
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
else {
Error err = _code->newNamedLabelId(id, name, nameLength, type, parentId);
if (ASMJIT_UNLIKELY(err))
setLastError(err);
else
id = node->getId();
}
}
return Label(id);
}
Error CodeBuilder::bind(const Label& label) {
if (_lastError) return _lastError;
CBLabel* node;
Error err = getCBLabel(&node, label);
if (ASMJIT_UNLIKELY(err))
return setLastError(err);
addNode(node);
return kErrorOk;
}
Error CodeBuilder::align(uint32_t mode, uint32_t alignment) {
if (_lastError) return _lastError;
CBAlign* node = newAlignNode(mode, alignment);
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
Error CodeBuilder::embed(const void* data, uint32_t size) {
if (_lastError) return _lastError;
CBData* node = newDataNode(data, size);
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
Error CodeBuilder::embedLabel(const Label& label) {
if (_lastError) return _lastError;
CBLabelData* node = newNodeT<CBLabelData>(label.getId());
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
Error CodeBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (_lastError) return _lastError;
if (!isLabelValid(label))
return setLastError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(kAlignData, static_cast<uint32_t>(pool.getAlignment())));
ASMJIT_PROPAGATE(bind(label));
CBData* node = newDataNode(nullptr, static_cast<uint32_t>(pool.getSize()));
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
pool.fill(node->getData());
addNode(node);
return kErrorOk;
}
Error CodeBuilder::comment(const char* s, size_t len) {
if (_lastError) return _lastError;
CBComment* node = newCommentNode(s, len);
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeBuilder - Node-Management]
// ============================================================================
CBNode* CodeBuilder::addNode(CBNode* node) noexcept {
ASMJIT_ASSERT(node);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
if (!_cursor) {
if (!_firstNode) {
_firstNode = node;
_lastNode = node;
}
else {
node->_next = _firstNode;
_firstNode->_prev = node;
_firstNode = node;
}
}
else {
CBNode* prev = _cursor;
CBNode* next = _cursor->_next;
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
}
_cursor = node;
return node;
}
CBNode* CodeBuilder::addAfter(CBNode* node, CBNode* ref) noexcept {
ASMJIT_ASSERT(node);
ASMJIT_ASSERT(ref);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
CBNode* prev = ref;
CBNode* next = ref->_next;
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
return node;
}
CBNode* CodeBuilder::addBefore(CBNode* node, CBNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
ASMJIT_ASSERT(ref != nullptr);
CBNode* prev = ref->_prev;
CBNode* next = ref;
node->_prev = prev;
node->_next = next;
next->_prev = node;
if (prev)
prev->_next = node;
else
_firstNode = node;
return node;
}
static ASMJIT_INLINE void CodeBuilder_nodeRemoved(CodeBuilder* self, CBNode* node_) noexcept {
if (node_->isJmpOrJcc()) {
CBJump* node = static_cast<CBJump*>(node_);
CBLabel* label = node->getTarget();
if (label) {
// Disconnect.
CBJump** pPrev = &label->_from;
for (;;) {
ASMJIT_ASSERT(*pPrev != nullptr);
CBJump* current = *pPrev;
if (!current) break;
if (current == node) {
*pPrev = node->_jumpNext;
break;
}
pPrev = &current->_jumpNext;
}
label->subNumRefs();
}
}
}
CBNode* CodeBuilder::removeNode(CBNode* node) noexcept {
CBNode* prev = node->_prev;
CBNode* next = node->_next;
if (_firstNode == node)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == node)
_lastNode = prev;
else
next->_prev = prev;
node->_prev = nullptr;
node->_next = nullptr;
if (_cursor == node)
_cursor = prev;
CodeBuilder_nodeRemoved(this, node);
return node;
}
void CodeBuilder::removeNodes(CBNode* first, CBNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
}
CBNode* prev = first->_prev;
CBNode* next = last->_next;
if (_firstNode == first)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == last)
_lastNode = prev;
else
next->_prev = prev;
CBNode* node = first;
for (;;) {
CBNode* next = node->getNext();
ASMJIT_ASSERT(next != nullptr);
node->_prev = nullptr;
node->_next = nullptr;
if (_cursor == node)
_cursor = prev;
CodeBuilder_nodeRemoved(this, node);
if (node == last)
break;
node = next;
}
}
CBNode* CodeBuilder::setCursor(CBNode* node) noexcept {
CBNode* old = _cursor;
_cursor = node;
return old;
}
// ============================================================================
// [asmjit::CodeBuilder - Passes]
// ============================================================================
ASMJIT_FAVOR_SIZE CBPass* CodeBuilder::getPassByName(const char* name) const noexcept {
for (size_t i = 0, len = _cbPasses.getLength(); i < len; i++) {
CBPass* pass = _cbPasses[i];
if (::strcmp(pass->getName(), name) == 0)
return pass;
}
return nullptr;
}
ASMJIT_FAVOR_SIZE Error CodeBuilder::addPass(CBPass* pass) noexcept {
if (ASMJIT_UNLIKELY(pass == nullptr)) {
// Since this is directly called by `addPassT()` we treat `null` argument
// as out-of-memory condition. Otherwise it would be API misuse.
return DebugUtils::errored(kErrorNoHeapMemory);
}
else if (ASMJIT_UNLIKELY(pass->_cb)) {
// Kind of weird, but okay...
if (pass->_cb == this)
return kErrorOk;
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(_cbPasses.append(&_cbHeap, pass));
pass->_cb = this;
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error CodeBuilder::deletePass(CBPass* pass) noexcept {
if (ASMJIT_UNLIKELY(pass == nullptr))
return DebugUtils::errored(kErrorInvalidArgument);
if (pass->_cb != nullptr) {
if (pass->_cb != this)
return DebugUtils::errored(kErrorInvalidState);
size_t index = _cbPasses.indexOf(pass);
ASMJIT_ASSERT(index != Globals::kInvalidIndex);
pass->_cb = nullptr;
_cbPasses.removeAt(index);
}
pass->~CBPass();
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeBuilder - Serialization]
// ============================================================================
Error CodeBuilder::serialize(CodeEmitter* dst) {
Error err = kErrorOk;
CBNode* node_ = getFirstNode();
do {
dst->setInlineComment(node_->getInlineComment());
switch (node_->getType()) {
case CBNode::kNodeAlign: {
CBAlign* node = static_cast<CBAlign*>(node_);
err = dst->align(node->getMode(), node->getAlignment());
break;
}
case CBNode::kNodeData: {
CBData* node = static_cast<CBData*>(node_);
err = dst->embed(node->getData(), node->getSize());
break;
}
case CBNode::kNodeFunc:
case CBNode::kNodeLabel: {
CBLabel* node = static_cast<CBLabel*>(node_);
err = dst->bind(node->getLabel());
break;
}
case CBNode::kNodeLabelData: {
CBLabelData* node = static_cast<CBLabelData*>(node_);
err = dst->embedLabel(node->getLabel());
break;
}
case CBNode::kNodeConstPool: {
CBConstPool* node = static_cast<CBConstPool*>(node_);
err = dst->embedConstPool(node->getLabel(), node->getConstPool());
break;
}
case CBNode::kNodeInst:
case CBNode::kNodeFuncCall: {
CBInst* node = static_cast<CBInst*>(node_);
uint32_t instId = node->getInstId();
uint32_t options = node->getOptions();
const Operand* opArray = node->getOpArray();
uint32_t opCount = node->getOpCount();
const Operand_* o0 = &dst->_none;
const Operand_* o1 = &dst->_none;
const Operand_* o2 = &dst->_none;
const Operand_* o3 = &dst->_none;
switch (opCount) {
case 6: dst->_op5 = opArray[5]; options |= CodeEmitter::kOptionOp5; ASMJIT_FALLTHROUGH;
case 5: dst->_op4 = opArray[4]; options |= CodeEmitter::kOptionOp4; ASMJIT_FALLTHROUGH;
case 4: o3 = &opArray[3]; ASMJIT_FALLTHROUGH;
case 3: o2 = &opArray[2]; ASMJIT_FALLTHROUGH;
case 2: o1 = &opArray[1]; ASMJIT_FALLTHROUGH;
case 1: o0 = &opArray[0]; ASMJIT_FALLTHROUGH;
case 0: break;
}
dst->setOptions(options);
err = dst->_emit(instId, *o0, *o1, *o2, *o3);
break;
}
case CBNode::kNodeComment: {
CBComment* node = static_cast<CBComment*>(node_);
err = dst->comment(node->getInlineComment());
break;
}
default:
break;
}
if (err) break;
node_ = node_->getNext();
} while (node_);
return err;
}
// ============================================================================
// [asmjit::CBPass]
// ============================================================================
CBPass::CBPass(const char* name) noexcept
: _cb(nullptr),
_name(name) {}
CBPass::~CBPass() noexcept {}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_BUILDER

View File

@@ -0,0 +1,915 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CODEBUILDER_H
#define _ASMJIT_BASE_CODEBUILDER_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_BUILDER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/codeholder.h"
#include "../base/constpool.h"
#include "../base/operand.h"
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
class CBNode;
class CBPass;
class CBAlign;
class CBComment;
class CBConstPool;
class CBData;
class CBInst;
class CBJump;
class CBLabel;
class CBLabelData;
class CBSentinel;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::CodeBuilder]
// ============================================================================
class ASMJIT_VIRTAPI CodeBuilder : public CodeEmitter {
public:
ASMJIT_NONCOPYABLE(CodeBuilder)
typedef CodeEmitter Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CodeBuilder` instance.
ASMJIT_API CodeBuilder() noexcept;
//! Destroy the `CodeBuilder` instance.
ASMJIT_API virtual ~CodeBuilder() noexcept;
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API virtual Error onDetach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get a vector of CBPass objects that will be executed by `process()`.
ASMJIT_INLINE const ZoneVector<CBPass*>& getPasses() const noexcept { return _cbPasses; }
//! Get a vector of CBLabel nodes.
//!
//! NOTE: If a label of some index is not associated with `CodeBuilder` it
//! would be null, so always check for nulls if you iterate over the vector.
ASMJIT_INLINE const ZoneVector<CBLabel*>& getLabels() const noexcept { return _cbLabels; }
//! Get the first node.
ASMJIT_INLINE CBNode* getFirstNode() const noexcept { return _firstNode; }
//! Get the last node.
ASMJIT_INLINE CBNode* getLastNode() const noexcept { return _lastNode; }
// --------------------------------------------------------------------------
// [Node-Management]
// --------------------------------------------------------------------------
//! \internal
template<typename T>
ASMJIT_INLINE T* newNodeT() noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this); }
//! \internal
template<typename T, typename P0>
ASMJIT_INLINE T* newNodeT(P0 p0) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0); }
//! \internal
template<typename T, typename P0, typename P1>
ASMJIT_INLINE T* newNodeT(P0 p0, P1 p1) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0, p1); }
//! \internal
template<typename T, typename P0, typename P1, typename P2>
ASMJIT_INLINE T* newNodeT(P0 p0, P1 p1, P2 p2) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0, p1, p2); }
ASMJIT_API Error registerLabelNode(CBLabel* node) noexcept;
//! Get `CBLabel` by `id`.
ASMJIT_API Error getCBLabel(CBLabel** pOut, uint32_t id) noexcept;
//! Get `CBLabel` by `label`.
ASMJIT_INLINE Error getCBLabel(CBLabel** pOut, const Label& label) noexcept { return getCBLabel(pOut, label.getId()); }
//! Create a new \ref CBLabel node.
ASMJIT_API CBLabel* newLabelNode() noexcept;
//! Create a new \ref CBAlign node.
ASMJIT_API CBAlign* newAlignNode(uint32_t mode, uint32_t alignment) noexcept;
//! Create a new \ref CBData node.
ASMJIT_API CBData* newDataNode(const void* data, uint32_t size) noexcept;
//! Create a new \ref CBConstPool node.
ASMJIT_API CBConstPool* newConstPool() noexcept;
//! Create a new \ref CBComment node.
ASMJIT_API CBComment* newCommentNode(const char* s, size_t len) noexcept;
// --------------------------------------------------------------------------
// [Code-Emitter]
// --------------------------------------------------------------------------
ASMJIT_API virtual Label newLabel() override;
ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameLength = Globals::kInvalidIndex, uint32_t type = Label::kTypeGlobal, uint32_t parentId = kInvalidValue) override;
ASMJIT_API virtual Error bind(const Label& label) override;
ASMJIT_API virtual Error align(uint32_t mode, uint32_t alignment) override;
ASMJIT_API virtual Error embed(const void* data, uint32_t size) override;
ASMJIT_API virtual Error embedLabel(const Label& label) override;
ASMJIT_API virtual Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API virtual Error comment(const char* s, size_t len = Globals::kInvalidIndex) override;
// --------------------------------------------------------------------------
// [Node-Management]
// --------------------------------------------------------------------------
//! Add `node` after the current and set current to `node`.
ASMJIT_API CBNode* addNode(CBNode* node) noexcept;
//! Insert `node` after `ref`.
ASMJIT_API CBNode* addAfter(CBNode* node, CBNode* ref) noexcept;
//! Insert `node` before `ref`.
ASMJIT_API CBNode* addBefore(CBNode* node, CBNode* ref) noexcept;
//! Remove `node`.
ASMJIT_API CBNode* removeNode(CBNode* node) noexcept;
//! Remove multiple nodes.
ASMJIT_API void removeNodes(CBNode* first, CBNode* last) noexcept;
//! Get current node.
//!
//! \note If this method returns null it means that nothing has been
//! emitted yet.
ASMJIT_INLINE CBNode* getCursor() const noexcept { return _cursor; }
//! Set the current node without returning the previous node.
ASMJIT_INLINE void _setCursor(CBNode* node) noexcept { _cursor = node; }
//! Set the current node to `node` and return the previous one.
ASMJIT_API CBNode* setCursor(CBNode* node) noexcept;
// --------------------------------------------------------------------------
// [Passes]
// --------------------------------------------------------------------------
template<typename T>
ASMJIT_INLINE T* newPassT() noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(); }
template<typename T, typename P0>
ASMJIT_INLINE T* newPassT(P0 p0) noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(p0); }
template<typename T, typename P0, typename P1>
ASMJIT_INLINE T* newPassT(P0 p0, P1 p1) noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(p0, p1); }
template<typename T>
ASMJIT_INLINE Error addPassT() noexcept { return addPass(newPassT<T>()); }
template<typename T, typename P0>
ASMJIT_INLINE Error addPassT(P0 p0) noexcept { return addPass(newPassT<P0>(p0)); }
template<typename T, typename P0, typename P1>
ASMJIT_INLINE Error addPassT(P0 p0, P1 p1) noexcept { return addPass(newPassT<P0, P1>(p0, p1)); }
//! Get a `CBPass` by name.
ASMJIT_API CBPass* getPassByName(const char* name) const noexcept;
//! Add `pass` to the list of passes.
ASMJIT_API Error addPass(CBPass* pass) noexcept;
//! Remove `pass` from the list of passes and delete it.
ASMJIT_API Error deletePass(CBPass* pass) noexcept;
// --------------------------------------------------------------------------
// [Serialization]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error serialize(CodeEmitter* dst);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Zone _cbBaseZone; //!< Base zone used to allocate nodes and `CBPass`.
Zone _cbDataZone; //!< Data zone used to allocate data and names.
Zone _cbPassZone; //!< Zone passed to `CBPass::process()`.
ZoneHeap _cbHeap; //!< ZoneHeap that uses `_cbBaseZone`.
ZoneVector<CBPass*> _cbPasses; //!< Array of `CBPass` objects.
ZoneVector<CBLabel*> _cbLabels; //!< Maps label indexes to `CBLabel` nodes.
CBNode* _firstNode; //!< First node of the current section.
CBNode* _lastNode; //!< Last node of the current section.
CBNode* _cursor; //!< Current node (cursor).
uint32_t _position; //!< Flow-id assigned to each new node.
uint32_t _nodeFlags; //!< Flags assigned to each new node.
};
// ============================================================================
// [asmjit::CBPass]
// ============================================================================
//! `CodeBuilder` pass used to code transformations, analysis, and lowering.
class ASMJIT_VIRTAPI CBPass {
public:
ASMJIT_NONCOPYABLE(CBPass);
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API CBPass(const char* name) noexcept;
ASMJIT_API virtual ~CBPass() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Process the code stored in CodeBuffer `cb`.
//!
//! This is the only function that is called by the `CodeBuilder` to process
//! the code. It passes the CodeBuilder itself (`cb`) and also a zone memory
//! allocator `zone`, which will be reset after the `process()` returns. The
//! allocator should be used for all allocations as it's fast and everything
//! it allocates will be released at once when `process()` returns.
virtual Error process(Zone* zone) noexcept = 0;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE const CodeBuilder* cb() const noexcept { return _cb; }
ASMJIT_INLINE const char* getName() const noexcept { return _name; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CodeBuilder* _cb; //!< CodeBuilder this pass is assigned to.
const char* _name; //!< Name of the pass.
};
// ============================================================================
// [asmjit::CBNode]
// ============================================================================
//! Node (CodeBuilder).
//!
//! Every node represents a building-block used by \ref CodeBuilder. It can be
//! instruction, data, label, comment, directive, or any other high-level
//! representation that can be transformed to the building blocks mentioned.
//! Every class that inherits \ref CodeBuilder can define its own nodes that it
//! can lower to basic nodes.
class CBNode {
public:
ASMJIT_NONCOPYABLE(CBNode)
// --------------------------------------------------------------------------
// [Type]
// --------------------------------------------------------------------------
//! Type of \ref CBNode.
ASMJIT_ENUM(NodeType) {
kNodeNone = 0, //!< Invalid node (internal, don't use).
// [CodeBuilder]
kNodeInst = 1, //!< Node is \ref CBInst or \ref CBJump.
kNodeData = 2, //!< Node is \ref CBData.
kNodeAlign = 3, //!< Node is \ref CBAlign.
kNodeLabel = 4, //!< Node is \ref CBLabel.
kNodeLabelData = 5, //!< Node is \ref CBLabelData.
kNodeConstPool = 6, //!< Node is \ref CBConstPool.
kNodeComment = 7, //!< Node is \ref CBComment.
kNodeSentinel = 8, //!< Node is \ref CBSentinel.
// [CodeCompiler]
kNodeFunc = 16, //!< Node is \ref CCFunc (considered as \ref CBLabel by \ref CodeBuilder).
kNodeFuncExit = 17, //!< Node is \ref CCFuncRet.
kNodeFuncCall = 18, //!< Node is \ref CCFuncCall.
kNodePushArg = 19, //!< Node is \ref CCPushArg.
kNodeHint = 20, //!< Node is \ref CCHint.
// [UserDefined]
kNodeUser = 32 //!< First id of a user-defined node.
};
// --------------------------------------------------------------------------
// [Flags]
// --------------------------------------------------------------------------
ASMJIT_ENUM(Flags) {
//! The node has been translated by the CodeCompiler.
kFlagIsTranslated = 0x0001,
//! If the node can be safely removed (has no effect).
kFlagIsRemovable = 0x0004,
//! If the node is informative only and can be safely removed.
kFlagIsInformative = 0x0008,
//! If the `CBInst` is a jump.
kFlagIsJmp = 0x0010,
//! If the `CBInst` is a conditional jump.
kFlagIsJcc = 0x0020,
//! If the `CBInst` is an unconditional jump or conditional jump that is
//! likely to be taken.
kFlagIsTaken = 0x0040,
//! If the `CBNode` will return from a function.
//!
//! This flag is used by both `CBSentinel` and `CCFuncRet`.
kFlagIsRet = 0x0080,
//! Whether the instruction is special.
kFlagIsSpecial = 0x0100,
//! Whether the instruction is an FPU instruction.
kFlagIsFp = 0x0200
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new \ref CBNode - always use \ref CodeBuilder to allocate nodes.
ASMJIT_INLINE CBNode(CodeBuilder* cb, uint32_t type) noexcept {
_prev = nullptr;
_next = nullptr;
_type = static_cast<uint8_t>(type);
_opCount = 0;
_flags = static_cast<uint16_t>(cb->_nodeFlags);
_position = cb->_position;
_inlineComment = nullptr;
_passData = nullptr;
}
//! Destroy the `CBNode` instance (NEVER CALLED).
ASMJIT_INLINE ~CBNode() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
template<typename T>
ASMJIT_INLINE T* as() noexcept { return static_cast<T*>(this); }
template<typename T>
ASMJIT_INLINE const T* as() const noexcept { return static_cast<const T*>(this); }
//! Get previous node in the compiler stream.
ASMJIT_INLINE CBNode* getPrev() const noexcept { return _prev; }
//! Get next node in the compiler stream.
ASMJIT_INLINE CBNode* getNext() const noexcept { return _next; }
//! Get the node type, see \ref Type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
//! Get the node flags.
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
//! Get whether the instruction has flag `flag`.
ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (static_cast<uint32_t>(_flags) & flag) != 0; }
//! Set node flags to `flags`.
ASMJIT_INLINE void setFlags(uint32_t flags) noexcept { _flags = static_cast<uint16_t>(flags); }
//! Add instruction `flags`.
ASMJIT_INLINE void orFlags(uint32_t flags) noexcept { _flags |= static_cast<uint16_t>(flags); }
//! And instruction `flags`.
ASMJIT_INLINE void andFlags(uint32_t flags) noexcept { _flags &= static_cast<uint16_t>(flags); }
//! Clear instruction `flags`.
ASMJIT_INLINE void andNotFlags(uint32_t flags) noexcept { _flags &= ~static_cast<uint16_t>(flags); }
//! Get whether the node has been translated.
ASMJIT_INLINE bool isTranslated() const noexcept { return hasFlag(kFlagIsTranslated); }
//! Get whether the node is removable if it's in unreachable code block.
ASMJIT_INLINE bool isRemovable() const noexcept { return hasFlag(kFlagIsRemovable); }
//! Get whether the node is informative only (comment, hint).
ASMJIT_INLINE bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); }
//! Whether the node is `CBLabel`.
ASMJIT_INLINE bool isLabel() const noexcept { return _type == kNodeLabel; }
//! Whether the `CBInst` node is an unconditional jump.
ASMJIT_INLINE bool isJmp() const noexcept { return hasFlag(kFlagIsJmp); }
//! Whether the `CBInst` node is a conditional jump.
ASMJIT_INLINE bool isJcc() const noexcept { return hasFlag(kFlagIsJcc); }
//! Whether the `CBInst` node is a conditional/unconditional jump.
ASMJIT_INLINE bool isJmpOrJcc() const noexcept { return hasFlag(kFlagIsJmp | kFlagIsJcc); }
//! Whether the `CBInst` node is a return.
ASMJIT_INLINE bool isRet() const noexcept { return hasFlag(kFlagIsRet); }
//! Get whether the node is `CBInst` and the instruction is special.
ASMJIT_INLINE bool isSpecial() const noexcept { return hasFlag(kFlagIsSpecial); }
//! Get whether the node is `CBInst` and the instruction uses x87-FPU.
ASMJIT_INLINE bool isFp() const noexcept { return hasFlag(kFlagIsFp); }
ASMJIT_INLINE bool hasPosition() const noexcept { return _position != 0; }
//! Get flow index.
ASMJIT_INLINE uint32_t getPosition() const noexcept { return _position; }
//! Set flow index.
ASMJIT_INLINE void setPosition(uint32_t position) noexcept { _position = position; }
//! Get if the node has an inline comment.
ASMJIT_INLINE bool hasInlineComment() const noexcept { return _inlineComment != nullptr; }
//! Get an inline comment string.
ASMJIT_INLINE const char* getInlineComment() const noexcept { return _inlineComment; }
//! Set an inline comment string to `s`.
ASMJIT_INLINE void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Set an inline comment string to null.
ASMJIT_INLINE void resetInlineComment() noexcept { _inlineComment = nullptr; }
//! Get if the node has associated work-data.
ASMJIT_INLINE bool hasPassData() const noexcept { return _passData != nullptr; }
//! Get work-data - data used during processing & transformations.
template<typename T>
ASMJIT_INLINE T* getPassData() const noexcept { return (T*)_passData; }
//! Set work-data to `data`.
template<typename T>
ASMJIT_INLINE void setPassData(T* data) noexcept { _passData = (void*)data; }
//! Reset work-data to null.
ASMJIT_INLINE void resetPassData() noexcept { _passData = nullptr; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CBNode* _prev; //!< Previous node.
CBNode* _next; //!< Next node.
uint8_t _type; //!< Node type, see \ref NodeType.
uint8_t _opCount; //!< Count of operands or zero.
uint16_t _flags; //!< Flags, different meaning for every type of the node.
uint32_t _position; //!< Flow index.
const char* _inlineComment; //!< Inline comment or null if not used.
void* _passData; //!< Data used exclusively by the current `CBPass`.
};
// ============================================================================
// [asmjit::CBInst]
// ============================================================================
//! Instruction (CodeBuilder).
//!
//! Wraps an instruction with its options and operands.
class CBInst : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBInst)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBInst` instance.
ASMJIT_INLINE CBInst(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
: CBNode(cb, kNodeInst) {
orFlags(kFlagIsRemovable);
_instId = static_cast<uint16_t>(instId);
_reserved = 0;
_options = options;
_opCount = static_cast<uint8_t>(opCount);
_opArray = opArray;
_updateMemOp();
}
//! Destroy the `CBInst` instance (NEVER CALLED).
ASMJIT_INLINE ~CBInst() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the instruction id, see \ref X86Inst::Id.
ASMJIT_INLINE uint32_t getInstId() const noexcept { return _instId; }
//! Set the instruction id to `instId`.
//!
//! NOTE: Please do not modify instruction code if you don't know what you
//! are doing. Incorrect instruction code and/or operands can cause random
//! errors in production builds and will most probably trigger assertion
//! failures in debug builds.
ASMJIT_INLINE void setInstId(uint32_t instId) noexcept { _instId = static_cast<uint16_t>(instId); }
//! Whether the instruction is either a jump or a conditional jump likely to
//! be taken.
ASMJIT_INLINE bool isTaken() const noexcept { return hasFlag(kFlagIsTaken); }
//! Get emit options.
ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; }
//! Set emit options.
ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _options = options; }
//! Add emit options.
ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; }
//! Mask emit options.
ASMJIT_INLINE void andOptions(uint32_t options) noexcept { _options &= options; }
//! Clear emit options.
ASMJIT_INLINE void delOptions(uint32_t options) noexcept { _options &= ~options; }
//! Get op-mask operand (used to represent AVX-512 op-mask selector).
ASMJIT_INLINE Operand& getOpExtra() noexcept { return _opExtra; }
//! \overload
ASMJIT_INLINE const Operand& getOpExtra() const noexcept { return _opExtra; }
//1 Set op-mask operand.
ASMJIT_INLINE void setOpExtra(const Operand& opExtra) noexcept { _opExtra = opExtra; }
//! Get operands count.
ASMJIT_INLINE uint32_t getOpCount() const noexcept { return _opCount; }
//! Get operands list.
ASMJIT_INLINE Operand* getOpArray() noexcept { return _opArray; }
//! \overload
ASMJIT_INLINE const Operand* getOpArray() const noexcept { return _opArray; }
//! Get whether the instruction contains a memory operand.
ASMJIT_INLINE bool hasMemOp() const noexcept { return _memOpIndex != 0xFF; }
//! Get memory operand.
//!
//! NOTE: Can only be called if the instruction has such operand,
//! see `hasMemOp()`.
ASMJIT_INLINE Mem* getMemOp() const noexcept {
ASMJIT_ASSERT(hasMemOp());
return static_cast<Mem*>(&_opArray[_memOpIndex]);
}
//! \overload
template<typename T>
ASMJIT_INLINE T* getMemOp() const noexcept {
ASMJIT_ASSERT(hasMemOp());
return static_cast<T*>(&_opArray[_memOpIndex]);
}
//! Set memory operand index, `0xFF` means no memory operand.
ASMJIT_INLINE void setMemOpIndex(uint32_t index) noexcept { _memOpIndex = static_cast<uint8_t>(index); }
//! Reset memory operand index to `0xFF` (no operand).
ASMJIT_INLINE void resetMemOpIndex() noexcept { _memOpIndex = 0xFF; }
// --------------------------------------------------------------------------
// [Utils]
// --------------------------------------------------------------------------
ASMJIT_INLINE void _updateMemOp() noexcept {
Operand* opArray = getOpArray();
uint32_t opCount = getOpCount();
uint32_t i;
for (i = 0; i < opCount; i++)
if (opArray[i].isMem())
goto Update;
i = 0xFF;
Update:
setMemOpIndex(i);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint16_t _instId; //!< Instruction id (architecture dependent).
uint8_t _memOpIndex; //!< \internal
uint8_t _reserved; //!< \internal
uint32_t _options; //!< Instruction options.
Operand _opExtra; //!< Extra operand (op-mask {k} on AVX-512).
Operand* _opArray; //!< Instruction operands.
};
// ============================================================================
// [asmjit::CBInstEx]
// ============================================================================
struct CBInstEx : public CBInst {
Operand _op4;
Operand _op5;
Operand _opExtra;
};
// ============================================================================
// [asmjit::CBJump]
// ============================================================================
//! Asm jump (conditional or direct).
//!
//! Extension of `CBInst` node, which stores more information about the jump.
class CBJump : public CBInst {
public:
ASMJIT_NONCOPYABLE(CBJump)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE CBJump(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
: CBInst(cb, instId, options, opArray, opCount),
_target(nullptr),
_jumpNext(nullptr) {}
ASMJIT_INLINE ~CBJump() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE CBLabel* getTarget() const noexcept { return _target; }
ASMJIT_INLINE CBJump* getJumpNext() const noexcept { return _jumpNext; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CBLabel* _target; //!< Target node.
CBJump* _jumpNext; //!< Next jump to the same target in a single linked-list.
};
// ============================================================================
// [asmjit::CBData]
// ============================================================================
//! Asm data (CodeBuilder).
//!
//! Wraps `.data` directive. The node contains data that will be placed at the
//! node's position in the assembler stream. The data is considered to be RAW;
//! no analysis nor byte-order conversion is performed on RAW data.
class CBData : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBData)
enum { kInlineBufferSize = static_cast<int>(64 - sizeof(CBNode) - 4) };
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBData` instance.
ASMJIT_INLINE CBData(CodeBuilder* cb, void* data, uint32_t size) noexcept : CBNode(cb, kNodeData) {
if (size <= kInlineBufferSize) {
if (data) ::memcpy(_buf, data, size);
}
else {
_externalPtr = static_cast<uint8_t*>(data);
}
_size = size;
}
//! Destroy the `CBData` instance (NEVER CALLED).
ASMJIT_INLINE ~CBData() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get size of the data.
uint32_t getSize() const noexcept { return _size; }
//! Get pointer to the data.
uint8_t* getData() const noexcept { return _size <= kInlineBufferSize ? const_cast<uint8_t*>(_buf) : _externalPtr; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
uint8_t _buf[kInlineBufferSize]; //!< Embedded data buffer.
uint32_t _size; //!< Size of the data.
};
struct {
uint8_t* _externalPtr; //!< Pointer to external data.
};
};
};
// ============================================================================
// [asmjit::CBAlign]
// ============================================================================
//! Align directive (CodeBuilder).
//!
//! Wraps `.align` directive.
class CBAlign : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBAlign)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBAlign` instance.
ASMJIT_INLINE CBAlign(CodeBuilder* cb, uint32_t mode, uint32_t alignment) noexcept
: CBNode(cb, kNodeAlign),
_mode(mode),
_alignment(alignment) {}
//! Destroy the `CBAlign` instance (NEVER CALLED).
ASMJIT_INLINE ~CBAlign() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get align mode.
ASMJIT_INLINE uint32_t getMode() const noexcept { return _mode; }
//! Set align mode.
ASMJIT_INLINE void setMode(uint32_t mode) noexcept { _mode = mode; }
//! Get align offset in bytes.
ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
//! Set align offset in bytes to `offset`.
ASMJIT_INLINE void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _mode; //!< Align mode, see \ref AlignMode.
uint32_t _alignment; //!< Alignment (in bytes).
};
// ============================================================================
// [asmjit::CBLabel]
// ============================================================================
//! Label (CodeBuilder).
class CBLabel : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBLabel)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBLabel` instance.
ASMJIT_INLINE CBLabel(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
: CBNode(cb, kNodeLabel),
_id(id),
_numRefs(0),
_from(nullptr) {}
//! Destroy the `CBLabel` instance (NEVER CALLED).
ASMJIT_INLINE ~CBLabel() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the label id.
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
//! Get the label as `Label` operand.
ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); }
//! Get first jmp instruction.
ASMJIT_INLINE CBJump* getFrom() const noexcept { return _from; }
//! Get number of jumps to this target.
ASMJIT_INLINE uint32_t getNumRefs() const noexcept { return _numRefs; }
//! Set number of jumps to this target.
ASMJIT_INLINE void setNumRefs(uint32_t i) noexcept { _numRefs = i; }
//! Add number of jumps to this target.
ASMJIT_INLINE void addNumRefs(uint32_t i = 1) noexcept { _numRefs += i; }
//! Subtract number of jumps to this target.
ASMJIT_INLINE void subNumRefs(uint32_t i = 1) noexcept { _numRefs -= i; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _id; //!< Label id.
uint32_t _numRefs; //!< Count of jumps here.
CBJump* _from; //!< Linked-list of nodes that can jump here.
};
// ============================================================================
// [asmjit::CBLabelData]
// ============================================================================
class CBLabelData : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBLabelData)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBLabelData` instance.
ASMJIT_INLINE CBLabelData(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
: CBNode(cb, kNodeLabelData),
_id(id) {}
//! Destroy the `CBLabelData` instance (NEVER CALLED).
ASMJIT_INLINE ~CBLabelData() noexcept {}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Get the label id.
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
//! Get the label as `Label` operand.
ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _id;
};
// ============================================================================
// [asmjit::CBConstPool]
// ============================================================================
class CBConstPool : public CBLabel {
public:
ASMJIT_NONCOPYABLE(CBConstPool)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBConstPool` instance.
ASMJIT_INLINE CBConstPool(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
: CBLabel(cb, id),
_constPool(&cb->_cbBaseZone) { _type = kNodeConstPool; }
//! Destroy the `CBConstPool` instance (NEVER CALLED).
ASMJIT_INLINE ~CBConstPool() noexcept {}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_INLINE ConstPool& getConstPool() noexcept { return _constPool; }
ASMJIT_INLINE const ConstPool& getConstPool() const noexcept { return _constPool; }
//! Get whether the constant-pool is empty.
ASMJIT_INLINE bool isEmpty() const noexcept { return _constPool.isEmpty(); }
//! Get the size of the constant-pool in bytes.
ASMJIT_INLINE size_t getSize() const noexcept { return _constPool.getSize(); }
//! Get minimum alignment.
ASMJIT_INLINE size_t getAlignment() const noexcept { return _constPool.getAlignment(); }
//! See \ref ConstPool::add().
ASMJIT_INLINE Error add(const void* data, size_t size, size_t& dstOffset) noexcept {
return _constPool.add(data, size, dstOffset);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
ConstPool _constPool;
};
// ============================================================================
// [asmjit::CBComment]
// ============================================================================
//! Comment (CodeBuilder).
class CBComment : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBComment)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBComment` instance.
ASMJIT_INLINE CBComment(CodeBuilder* cb, const char* comment) noexcept : CBNode(cb, kNodeComment) {
orFlags(kFlagIsRemovable | kFlagIsInformative);
_inlineComment = comment;
}
//! Destroy the `CBComment` instance (NEVER CALLED).
ASMJIT_INLINE ~CBComment() noexcept {}
};
// ============================================================================
// [asmjit::CBSentinel]
// ============================================================================
//! Sentinel (CodeBuilder).
//!
//! Sentinel is a marker that is completely ignored by the code builder. It's
//! used to remember a position in a code as it never gets removed by any pass.
class CBSentinel : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBSentinel)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBSentinel` instance.
ASMJIT_INLINE CBSentinel(CodeBuilder* cb) noexcept : CBNode(cb, kNodeSentinel) {}
//! Destroy the `CBSentinel` instance (NEVER CALLED).
ASMJIT_INLINE ~CBSentinel() noexcept {}
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_BUILDER
#endif // _ASMJIT_BASE_CODEBUILDER_H

View File

@@ -0,0 +1,571 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/codecompiler.h"
#include "../base/cpuinfo.h"
#include "../base/logging.h"
#include "../base/regalloc_p.h"
#include "../base/utils.h"
#include <stdarg.h>
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [Constants]
// ============================================================================
static const char noName[1] = { '\0' };
// ============================================================================
// [asmjit::CCFuncCall - Arg / Ret]
// ============================================================================
bool CCFuncCall::_setArg(uint32_t i, const Operand_& op) noexcept {
if ((i & ~kFuncArgHi) >= _funcDetail.getArgCount())
return false;
_args[i] = op;
return true;
}
bool CCFuncCall::_setRet(uint32_t i, const Operand_& op) noexcept {
if (i >= 2)
return false;
_ret[i] = op;
return true;
}
// ============================================================================
// [asmjit::CodeCompiler - Construction / Destruction]
// ============================================================================
CodeCompiler::CodeCompiler() noexcept
: CodeBuilder(),
_func(nullptr),
_vRegZone(4096 - Zone::kZoneOverhead),
_vRegArray(),
_localConstPool(nullptr),
_globalConstPool(nullptr) {
_type = kTypeCompiler;
}
CodeCompiler::~CodeCompiler() noexcept {}
// ============================================================================
// [asmjit::CodeCompiler - Events]
// ============================================================================
Error CodeCompiler::onAttach(CodeHolder* code) noexcept {
return Base::onAttach(code);
}
Error CodeCompiler::onDetach(CodeHolder* code) noexcept {
_func = nullptr;
_localConstPool = nullptr;
_globalConstPool = nullptr;
_vRegArray.reset();
_vRegZone.reset(false);
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::CodeCompiler - Node-Factory]
// ============================================================================
CCHint* CodeCompiler::newHintNode(Reg& r, uint32_t hint, uint32_t value) noexcept {
if (!r.isVirtReg()) return nullptr;
VirtReg* vr = getVirtReg(r);
return newNodeT<CCHint>(vr, hint, value);
}
// ============================================================================
// [asmjit::CodeCompiler - Func]
// ============================================================================
CCFunc* CodeCompiler::newFunc(const FuncSignature& sign) noexcept {
Error err;
CCFunc* func = newNodeT<CCFunc>();
if (!func) goto _NoMemory;
err = registerLabelNode(func);
if (ASMJIT_UNLIKELY(err)) {
// TODO: Calls setLastError, maybe rethink noexcept?
setLastError(err);
return nullptr;
}
// Create helper nodes.
func->_end = newNodeT<CBSentinel>();
func->_exitNode = newLabelNode();
if (!func->_exitNode || !func->_end) goto _NoMemory;
// Function prototype.
err = func->getDetail().init(sign);
if (err != kErrorOk) {
setLastError(err);
return nullptr;
}
// Override the natural stack alignment of the calling convention to what's
// specified by CodeInfo.
func->_funcDetail._callConv.setNaturalStackAlignment(_codeInfo.getStackAlignment());
// Allocate space for function arguments.
func->_args = nullptr;
if (func->getArgCount() != 0) {
func->_args = _cbHeap.allocT<VirtReg*>(func->getArgCount() * sizeof(VirtReg*));
if (!func->_args) goto _NoMemory;
::memset(func->_args, 0, func->getArgCount() * sizeof(VirtReg*));
}
return func;
_NoMemory:
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
CCFunc* CodeCompiler::addFunc(CCFunc* func) {
ASMJIT_ASSERT(_func == nullptr);
_func = func;
addNode(func); // Function node.
CBNode* cursor = getCursor(); // {CURSOR}.
addNode(func->getExitNode()); // Function exit label.
addNode(func->getEnd()); // Function end marker.
_setCursor(cursor);
return func;
}
CCFunc* CodeCompiler::addFunc(const FuncSignature& sign) {
CCFunc* func = newFunc(sign);
if (!func) {
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
return addFunc(func);
}
CBSentinel* CodeCompiler::endFunc() {
CCFunc* func = getFunc();
if (!func) {
// TODO:
return nullptr;
}
// Add the local constant pool at the end of the function (if exist).
setCursor(func->getExitNode());
if (_localConstPool) {
addNode(_localConstPool);
_localConstPool = nullptr;
}
// Mark as finished.
func->_isFinished = true;
_func = nullptr;
setCursor(func->getEnd());
return func->getEnd();
}
// ============================================================================
// [asmjit::CodeCompiler - Ret]
// ============================================================================
CCFuncRet* CodeCompiler::newRet(const Operand_& o0, const Operand_& o1) noexcept {
CCFuncRet* node = newNodeT<CCFuncRet>(o0, o1);
if (!node) {
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
return node;
}
CCFuncRet* CodeCompiler::addRet(const Operand_& o0, const Operand_& o1) noexcept {
CCFuncRet* node = newRet(o0, o1);
if (!node) return nullptr;
return static_cast<CCFuncRet*>(addNode(node));
}
// ============================================================================
// [asmjit::CodeCompiler - Call]
// ============================================================================
CCFuncCall* CodeCompiler::newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept {
Error err;
uint32_t nArgs;
CCFuncCall* node = _cbHeap.allocT<CCFuncCall>(sizeof(CCFuncCall) + sizeof(Operand));
Operand* opArray = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(node) + sizeof(CCFuncCall));
if (ASMJIT_UNLIKELY(!node))
goto _NoMemory;
opArray[0].copyFrom(o0);
new (node) CCFuncCall(this, instId, 0, opArray, 1);
if ((err = node->getDetail().init(sign)) != kErrorOk) {
setLastError(err);
return nullptr;
}
// If there are no arguments skip the allocation.
if ((nArgs = sign.getArgCount()) == 0)
return node;
node->_args = static_cast<Operand*>(_cbHeap.alloc(nArgs * sizeof(Operand)));
if (!node->_args) goto _NoMemory;
::memset(node->_args, 0, nArgs * sizeof(Operand));
return node;
_NoMemory:
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
CCFuncCall* CodeCompiler::addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept {
CCFuncCall* node = newCall(instId, o0, sign);
if (!node) return nullptr;
return static_cast<CCFuncCall*>(addNode(node));
}
// ============================================================================
// [asmjit::CodeCompiler - Vars]
// ============================================================================
Error CodeCompiler::setArg(uint32_t argIndex, const Reg& r) {
CCFunc* func = getFunc();
if (!func)
return setLastError(DebugUtils::errored(kErrorInvalidState));
if (!isVirtRegValid(r))
return setLastError(DebugUtils::errored(kErrorInvalidVirtId));
VirtReg* vr = getVirtReg(r);
func->setArg(argIndex, vr);
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeCompiler - Hint]
// ============================================================================
Error CodeCompiler::_hint(Reg& r, uint32_t hint, uint32_t value) {
if (!r.isVirtReg()) return kErrorOk;
CCHint* node = newHintNode(r, hint, value);
if (!node) return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeCompiler - Vars]
// ============================================================================
VirtReg* CodeCompiler::newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept {
size_t index = _vRegArray.getLength();
if (ASMJIT_UNLIKELY(index > Operand::kPackedIdCount))
return nullptr;
VirtReg* vreg;
if (_vRegArray.willGrow(&_cbHeap, 1) != kErrorOk || !(vreg = _vRegZone.allocZeroedT<VirtReg>()))
return nullptr;
vreg->_id = Operand::packId(static_cast<uint32_t>(index));
vreg->_regInfo._signature = signature;
vreg->_name = noName;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (name && name[0] != '\0')
vreg->_name = static_cast<char*>(_cbDataZone.dup(name, ::strlen(name), true));
#endif // !ASMJIT_DISABLE_LOGGING
vreg->_size = TypeId::sizeOf(typeId);
vreg->_typeId = typeId;
vreg->_alignment = static_cast<uint8_t>(std::min<uint32_t>(vreg->_size, 64));
vreg->_priority = 10;
// The following are only used by `RAPass`.
vreg->_raId = kInvalidValue;
vreg->_state = VirtReg::kStateNone;
vreg->_physId = Globals::kInvalidRegId;
_vRegArray.appendUnsafe(vreg);
return vreg;
}
Error CodeCompiler::_newReg(Reg& out, uint32_t typeId, const char* name) {
RegInfo regInfo;
Error err = ArchUtils::typeIdToRegInfo(getArchType(), typeId, regInfo);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
VirtReg* vReg = newVirtReg(typeId, regInfo.getSignature(), name);
if (ASMJIT_UNLIKELY(!vReg)) {
out.reset();
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
out._initReg(regInfo.getSignature(), vReg->getId());
return kErrorOk;
}
Error CodeCompiler::_newReg(Reg& out, uint32_t typeId, const char* nameFmt, va_list ap) {
StringBuilderTmp<256> sb;
sb.appendFormatVA(nameFmt, ap);
return _newReg(out, typeId, sb.getData());
}
Error CodeCompiler::_newReg(Reg& out, const Reg& ref, const char* name) {
RegInfo regInfo;
uint32_t typeId;
if (isVirtRegValid(ref)) {
VirtReg* vRef = getVirtReg(ref);
typeId = vRef->getTypeId();
// NOTE: It's possible to cast one register type to another if it's the
// same register kind. However, VirtReg always contains the TypeId that
// was used to create the register. This means that in some cases we may
// end up having different size of `ref` and `vRef`. In such case we
// adjust the TypeId to match the `ref` register type instead of the
// original register type, which should be the expected behavior.
uint32_t typeSize = TypeId::sizeOf(typeId);
uint32_t refSize = ref.getSize();
if (typeSize != refSize) {
if (TypeId::isInt(typeId)) {
// GP register - change TypeId to match `ref`, but keep sign of `vRef`.
switch (refSize) {
case 1: typeId = TypeId::kI8 | (typeId & 1); break;
case 2: typeId = TypeId::kI16 | (typeId & 1); break;
case 4: typeId = TypeId::kI32 | (typeId & 1); break;
case 8: typeId = TypeId::kI64 | (typeId & 1); break;
default: typeId = TypeId::kVoid; break;
}
}
else if (TypeId::isMmx(typeId)) {
// MMX register - always use 64-bit.
typeId = TypeId::kMmx64;
}
else if (TypeId::isMask(typeId)) {
// Mask register - change TypeId to match `ref` size.
switch (refSize) {
case 1: typeId = TypeId::kMask8; break;
case 2: typeId = TypeId::kMask16; break;
case 4: typeId = TypeId::kMask32; break;
case 8: typeId = TypeId::kMask64; break;
default: typeId = TypeId::kVoid; break;
}
}
else {
// VEC register - change TypeId to match `ref` size, keep vector metadata.
uint32_t elementTypeId = TypeId::elementOf(typeId);
switch (refSize) {
case 16: typeId = TypeId::_kVec128Start + (elementTypeId - TypeId::kI8); break;
case 32: typeId = TypeId::_kVec256Start + (elementTypeId - TypeId::kI8); break;
case 64: typeId = TypeId::_kVec512Start + (elementTypeId - TypeId::kI8); break;
default: typeId = TypeId::kVoid; break;
}
}
if (typeId == TypeId::kVoid)
return setLastError(DebugUtils::errored(kErrorInvalidState));
}
}
else {
typeId = ref.getType();
}
Error err = ArchUtils::typeIdToRegInfo(getArchType(), typeId, regInfo);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
VirtReg* vReg = newVirtReg(typeId, regInfo.getSignature(), name);
if (ASMJIT_UNLIKELY(!vReg)) {
out.reset();
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
out._initReg(regInfo.getSignature(), vReg->getId());
return kErrorOk;
}
Error CodeCompiler::_newReg(Reg& out, const Reg& ref, const char* nameFmt, va_list ap) {
StringBuilderTmp<256> sb;
sb.appendFormatVA(nameFmt, ap);
return _newReg(out, ref, sb.getData());
}
Error CodeCompiler::_newStack(Mem& out, uint32_t size, uint32_t alignment, const char* name) {
if (size == 0)
return setLastError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment == 0) alignment = 1;
if (!Utils::isPowerOf2(alignment))
return setLastError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment > 64) alignment = 64;
VirtReg* vReg = newVirtReg(0, 0, name);
if (ASMJIT_UNLIKELY(!vReg)) {
out.reset();
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
vReg->_size = size;
vReg->_isStack = true;
vReg->_alignment = static_cast<uint8_t>(alignment);
// Set the memory operand to GPD/GPQ and its id to VirtReg.
out = Mem(Init, _nativeGpReg.getType(), vReg->getId(), Reg::kRegNone, kInvalidValue, 0, 0, Mem::kSignatureMemRegHomeFlag);
return kErrorOk;
}
Error CodeCompiler::_newConst(Mem& out, uint32_t scope, const void* data, size_t size) {
CBConstPool** pPool;
if (scope == kConstScopeLocal)
pPool = &_localConstPool;
else if (scope == kConstScopeGlobal)
pPool = &_globalConstPool;
else
return setLastError(DebugUtils::errored(kErrorInvalidArgument));
if (!*pPool && !(*pPool = newConstPool()))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
CBConstPool* pool = *pPool;
size_t off;
Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
out = Mem(Init,
Label::kLabelTag, // Base type.
pool->getId(), // Base id.
0, // Index type.
kInvalidValue, // Index id.
static_cast<int32_t>(off), // Offset.
static_cast<uint32_t>(size), // Size.
0); // Flags.
return kErrorOk;
}
Error CodeCompiler::alloc(Reg& reg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintAlloc, kInvalidValue);
}
Error CodeCompiler::alloc(Reg& reg, uint32_t physId) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintAlloc, physId);
}
Error CodeCompiler::alloc(Reg& reg, const Reg& physReg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintAlloc, physReg.getId());
}
Error CodeCompiler::save(Reg& reg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintSave, kInvalidValue);
}
Error CodeCompiler::spill(Reg& reg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintSpill, kInvalidValue);
}
Error CodeCompiler::unuse(Reg& reg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintUnuse, kInvalidValue);
}
uint32_t CodeCompiler::getPriority(Reg& reg) const {
if (!reg.isVirtReg()) return 0;
return getVirtRegById(reg.getId())->getPriority();
}
void CodeCompiler::setPriority(Reg& reg, uint32_t priority) {
if (!reg.isVirtReg()) return;
if (priority > 255) priority = 255;
VirtReg* vreg = getVirtRegById(reg.getId());
if (vreg) vreg->_priority = static_cast<uint8_t>(priority);
}
bool CodeCompiler::getSaveOnUnuse(Reg& reg) const {
if (!reg.isVirtReg()) return false;
VirtReg* vreg = getVirtRegById(reg.getId());
return static_cast<bool>(vreg->_saveOnUnuse);
}
void CodeCompiler::setSaveOnUnuse(Reg& reg, bool value) {
if (!reg.isVirtReg()) return;
VirtReg* vreg = getVirtRegById(reg.getId());
if (!vreg) return;
vreg->_saveOnUnuse = value;
}
void CodeCompiler::rename(Reg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return;
VirtReg* vreg = getVirtRegById(reg.getId());
if (!vreg) return;
vreg->_name = noName;
if (fmt && fmt[0] != '\0') {
char buf[64];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0';
vreg->_name = static_cast<char*>(_cbDataZone.dup(buf, ::strlen(buf), true));
va_end(ap);
}
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER

View File

@@ -0,0 +1,738 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CODECOMPILER_H
#define _ASMJIT_BASE_CODECOMPILER_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/codebuilder.h"
#include "../base/constpool.h"
#include "../base/func.h"
#include "../base/operand.h"
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
struct VirtReg;
struct TiedReg;
struct RAState;
struct RACell;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::ConstScope]
// ============================================================================
//! Scope of the constant.
ASMJIT_ENUM(ConstScope) {
//! Local constant, always embedded right after the current function.
kConstScopeLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kConstScopeGlobal = 1
};
// ============================================================================
// [asmjit::VirtReg]
// ============================================================================
//! Virtual register data (CodeCompiler).
struct VirtReg {
//! A state of a virtual register (used during register allocation).
ASMJIT_ENUM(State) {
kStateNone = 0, //!< Not allocated, not used.
kStateReg = 1, //!< Allocated in register.
kStateMem = 2 //!< Allocated in memory or spilled.
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the virtual-register id.
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
//! Get virtual-register's name.
ASMJIT_INLINE const char* getName() const noexcept { return _name; }
//! Get a physical register type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _regInfo.getType(); }
//! Get a physical register kind.
ASMJIT_INLINE uint32_t getKind() const noexcept { return _regInfo.getKind(); }
//! Get a physical register size.
ASMJIT_INLINE uint32_t getRegSize() const noexcept { return _regInfo.getSize(); }
//! Get a register signature of this virtual register.
ASMJIT_INLINE uint32_t getSignature() const noexcept { return _regInfo.getSignature(); }
//! Get a register's type-id, see \ref TypeId.
ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _typeId; }
//! Get virtual-register's size.
ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
//! Get virtual-register's alignment.
ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
//! Get the virtual-register priority, used by compiler to decide which variable to spill.
ASMJIT_INLINE uint32_t getPriority() const noexcept { return _priority; }
//! Set the virtual-register priority.
ASMJIT_INLINE void setPriority(uint32_t priority) noexcept {
ASMJIT_ASSERT(priority <= 0xFF);
_priority = static_cast<uint8_t>(priority);
}
//! Get variable state, only used by `RAPass`.
ASMJIT_INLINE uint32_t getState() const noexcept { return _state; }
//! Set variable state, only used by `RAPass`.
ASMJIT_INLINE void setState(uint32_t state) {
ASMJIT_ASSERT(state <= 0xFF);
_state = static_cast<uint8_t>(state);
}
//! Get register index.
ASMJIT_INLINE uint32_t getPhysId() const noexcept { return _physId; }
//! Set register index.
ASMJIT_INLINE void setPhysId(uint32_t physId) {
ASMJIT_ASSERT(physId <= Globals::kInvalidRegId);
_physId = static_cast<uint8_t>(physId);
}
//! Reset register index.
ASMJIT_INLINE void resetPhysId() {
_physId = static_cast<uint8_t>(Globals::kInvalidRegId);
}
//! Get home registers mask.
ASMJIT_INLINE uint32_t getHomeMask() const { return _homeMask; }
//! Add a home register index to the home registers mask.
ASMJIT_INLINE void addHomeId(uint32_t physId) { _homeMask |= Utils::mask(physId); }
ASMJIT_INLINE bool isFixed() const noexcept { return static_cast<bool>(_isFixed); }
//! Get whether the VirtReg is only memory allocated on the stack.
ASMJIT_INLINE bool isStack() const noexcept { return static_cast<bool>(_isStack); }
//! Get whether to save variable when it's unused (spill).
ASMJIT_INLINE bool saveOnUnuse() const noexcept { return static_cast<bool>(_saveOnUnuse); }
//! Get whether the variable was changed.
ASMJIT_INLINE bool isModified() const noexcept { return static_cast<bool>(_modified); }
//! Set whether the variable was changed.
ASMJIT_INLINE void setModified(bool modified) noexcept { _modified = modified; }
//! Get home memory offset.
ASMJIT_INLINE int32_t getMemOffset() const noexcept { return _memOffset; }
//! Set home memory offset.
ASMJIT_INLINE void setMemOffset(int32_t offset) noexcept { _memOffset = offset; }
//! Get home memory cell.
ASMJIT_INLINE RACell* getMemCell() const noexcept { return _memCell; }
//! Set home memory cell.
ASMJIT_INLINE void setMemCell(RACell* cell) noexcept { _memCell = cell; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _id; //!< Virtual register id.
RegInfo _regInfo; //!< Physical register info & signature.
const char* _name; //!< Virtual name (user provided).
uint32_t _size; //!< Virtual size (can be smaller than `regInfo._size`).
uint8_t _typeId; //!< Type-id.
uint8_t _alignment; //!< Register's natural alignment (for spilling).
uint8_t _priority; //!< Allocation priority (hint for RAPass that can be ignored).
uint8_t _isFixed : 1; //!< True if this is a fixed register, never reallocated.
uint8_t _isStack : 1; //!< True if the virtual register is only used as a stack.
uint8_t _isMaterialized : 1; //!< Register is constant that is easily created by a single instruction.
uint8_t _saveOnUnuse : 1; //!< Save on unuse (at end of the variable scope).
// -------------------------------------------------------------------------
// The following members are used exclusively by RAPass. They are initialized
// when the VirtReg is created and then changed during RAPass.
// -------------------------------------------------------------------------
uint32_t _raId; //!< Register allocator work-id (used by RAPass).
int32_t _memOffset; //!< Home memory offset.
uint32_t _homeMask; //!< Mask of all registers variable has been allocated to.
uint8_t _state; //!< Variable state (connected with actual `RAState)`.
uint8_t _physId; //!< Actual register index (only used by `RAPass)`, during translate.
uint8_t _modified; //!< Whether variable was changed (connected with actual `RAState)`.
RACell* _memCell; //!< Home memory cell, used by `RAPass` (initially nullptr).
//! Temporary link to TiedReg* used by the `RAPass` used in
//! various phases, but always set back to nullptr when finished.
//!
//! This temporary data is designed to be used by algorithms that need to
//! store some data into variables themselves during compilation. But it's
//! expected that after variable is compiled & translated the data is set
//! back to zero/null. Initial value is nullptr.
TiedReg* _tied;
};
// ============================================================================
// [asmjit::CCHint]
// ============================================================================
//! Hint for register allocator (CodeCompiler).
class CCHint : public CBNode {
public:
ASMJIT_NONCOPYABLE(CCHint)
//! Hint type.
ASMJIT_ENUM(Hint) {
//! Alloc to physical reg.
kHintAlloc = 0,
//! Spill to memory.
kHintSpill = 1,
//! Save if modified.
kHintSave = 2,
//! Save if modified and mark it as unused.
kHintSaveAndUnuse = 3,
//! Mark as unused.
kHintUnuse = 4
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCHint` instance.
ASMJIT_INLINE CCHint(CodeBuilder* cb, VirtReg* vreg, uint32_t hint, uint32_t value) noexcept : CBNode(cb, kNodeHint) {
orFlags(kFlagIsRemovable | kFlagIsInformative);
_vreg = vreg;
_hint = hint;
_value = value;
}
//! Destroy the `CCHint` instance (NEVER CALLED).
ASMJIT_INLINE ~CCHint() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get variable.
ASMJIT_INLINE VirtReg* getVReg() const noexcept { return _vreg; }
//! Get hint it, see \ref Hint.
ASMJIT_INLINE uint32_t getHint() const noexcept { return _hint; }
//! Set hint it, see \ref Hint.
ASMJIT_INLINE void setHint(uint32_t hint) noexcept { _hint = hint; }
//! Get hint value.
ASMJIT_INLINE uint32_t getValue() const noexcept { return _value; }
//! Set hint value.
ASMJIT_INLINE void setValue(uint32_t value) noexcept { _value = value; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variable.
VirtReg* _vreg;
//! Hint id.
uint32_t _hint;
//! Value.
uint32_t _value;
};
// ============================================================================
// [asmjit::CCFunc]
// ============================================================================
//! Function entry (CodeCompiler).
class CCFunc : public CBLabel {
public:
ASMJIT_NONCOPYABLE(CCFunc)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCFunc` instance.
//!
//! Always use `CodeCompiler::addFunc()` to create \ref CCFunc.
ASMJIT_INLINE CCFunc(CodeBuilder* cb) noexcept
: CBLabel(cb),
_exitNode(nullptr),
_funcDetail(),
_frameInfo(),
_end(nullptr),
_args(nullptr),
_isFinished(false) {
_type = kNodeFunc;
}
//! Destroy the `CCFunc` instance (NEVER CALLED).
ASMJIT_INLINE ~CCFunc() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get function exit `CBLabel`.
ASMJIT_INLINE CBLabel* getExitNode() const noexcept { return _exitNode; }
//! Get function exit label.
ASMJIT_INLINE Label getExitLabel() const noexcept { return _exitNode->getLabel(); }
//! Get the function end sentinel.
ASMJIT_INLINE CBSentinel* getEnd() const noexcept { return _end; }
//! Get function declaration.
ASMJIT_INLINE FuncDetail& getDetail() noexcept { return _funcDetail; }
//! Get function declaration.
ASMJIT_INLINE const FuncDetail& getDetail() const noexcept { return _funcDetail; }
//! Get function declaration.
ASMJIT_INLINE FuncFrameInfo& getFrameInfo() noexcept { return _frameInfo; }
//! Get function declaration.
ASMJIT_INLINE const FuncFrameInfo& getFrameInfo() const noexcept { return _frameInfo; }
//! Get arguments count.
ASMJIT_INLINE uint32_t getArgCount() const noexcept { return _funcDetail.getArgCount(); }
//! Get returns count.
ASMJIT_INLINE uint32_t getRetCount() const noexcept { return _funcDetail.getRetCount(); }
//! Get arguments list.
ASMJIT_INLINE VirtReg** getArgs() const noexcept { return _args; }
//! Get argument at `i`.
ASMJIT_INLINE VirtReg* getArg(uint32_t i) const noexcept {
ASMJIT_ASSERT(i < getArgCount());
return _args[i];
}
//! Set argument at `i`.
ASMJIT_INLINE void setArg(uint32_t i, VirtReg* vreg) noexcept {
ASMJIT_ASSERT(i < getArgCount());
_args[i] = vreg;
}
//! Reset argument at `i`.
ASMJIT_INLINE void resetArg(uint32_t i) noexcept {
ASMJIT_ASSERT(i < getArgCount());
_args[i] = nullptr;
}
ASMJIT_INLINE uint32_t getAttributes() const noexcept { return _frameInfo.getAttributes(); }
ASMJIT_INLINE void addAttributes(uint32_t attrs) noexcept { _frameInfo.addAttributes(attrs); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
FuncDetail _funcDetail; //!< Function detail.
FuncFrameInfo _frameInfo; //!< Function frame information.
CBLabel* _exitNode; //!< Function exit.
CBSentinel* _end; //!< Function end.
VirtReg** _args; //!< Arguments array as `VirtReg`.
//! Function was finished by `Compiler::endFunc()`.
uint8_t _isFinished;
};
// ============================================================================
// [asmjit::CCFuncRet]
// ============================================================================
//! Function return (CodeCompiler).
class CCFuncRet : public CBNode {
public:
ASMJIT_NONCOPYABLE(CCFuncRet)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCFuncRet` instance.
ASMJIT_INLINE CCFuncRet(CodeBuilder* cb, const Operand_& o0, const Operand_& o1) noexcept : CBNode(cb, kNodeFuncExit) {
orFlags(kFlagIsRet);
_ret[0].copyFrom(o0);
_ret[1].copyFrom(o1);
}
//! Destroy the `CCFuncRet` instance (NEVER CALLED).
ASMJIT_INLINE ~CCFuncRet() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the first return operand.
ASMJIT_INLINE Operand& getFirst() noexcept { return static_cast<Operand&>(_ret[0]); }
//! \overload
ASMJIT_INLINE const Operand& getFirst() const noexcept { return static_cast<const Operand&>(_ret[0]); }
//! Get the second return operand.
ASMJIT_INLINE Operand& getSecond() noexcept { return static_cast<Operand&>(_ret[1]); }
//! \overload
ASMJIT_INLINE const Operand& getSecond() const noexcept { return static_cast<const Operand&>(_ret[1]); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Return operands.
Operand_ _ret[2];
};
// ============================================================================
// [asmjit::CCFuncCall]
// ============================================================================
//! Function call (CodeCompiler).
class CCFuncCall : public CBInst {
public:
ASMJIT_NONCOPYABLE(CCFuncCall)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCFuncCall` instance.
ASMJIT_INLINE CCFuncCall(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
: CBInst(cb, instId, options, opArray, opCount),
_funcDetail(),
_args(nullptr) {
_type = kNodeFuncCall;
_ret[0].reset();
_ret[1].reset();
orFlags(kFlagIsRemovable);
}
//! Destroy the `CCFuncCall` instance (NEVER CALLED).
ASMJIT_INLINE ~CCFuncCall() noexcept {}
// --------------------------------------------------------------------------
// [Signature]
// --------------------------------------------------------------------------
//! Set function signature.
ASMJIT_INLINE Error setSignature(const FuncSignature& sign) noexcept {
return _funcDetail.init(sign);
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get function declaration.
ASMJIT_INLINE FuncDetail& getDetail() noexcept { return _funcDetail; }
//! Get function declaration.
ASMJIT_INLINE const FuncDetail& getDetail() const noexcept { return _funcDetail; }
//! Get target operand.
ASMJIT_INLINE Operand& getTarget() noexcept { return static_cast<Operand&>(_opArray[0]); }
//! \overload
ASMJIT_INLINE const Operand& getTarget() const noexcept { return static_cast<const Operand&>(_opArray[0]); }
//! Get return at `i`.
ASMJIT_INLINE Operand& getRet(uint32_t i = 0) noexcept {
ASMJIT_ASSERT(i < 2);
return static_cast<Operand&>(_ret[i]);
}
//! \overload
ASMJIT_INLINE const Operand& getRet(uint32_t i = 0) const noexcept {
ASMJIT_ASSERT(i < 2);
return static_cast<const Operand&>(_ret[i]);
}
//! Get argument at `i`.
ASMJIT_INLINE Operand& getArg(uint32_t i) noexcept {
ASMJIT_ASSERT(i < kFuncArgCountLoHi);
return static_cast<Operand&>(_args[i]);
}
//! \overload
ASMJIT_INLINE const Operand& getArg(uint32_t i) const noexcept {
ASMJIT_ASSERT(i < kFuncArgCountLoHi);
return static_cast<const Operand&>(_args[i]);
}
//! Set argument at `i` to `op`.
ASMJIT_API bool _setArg(uint32_t i, const Operand_& op) noexcept;
//! Set return at `i` to `op`.
ASMJIT_API bool _setRet(uint32_t i, const Operand_& op) noexcept;
//! Set argument at `i` to `reg`.
ASMJIT_INLINE bool setArg(uint32_t i, const Reg& reg) noexcept { return _setArg(i, reg); }
//! Set argument at `i` to `imm`.
ASMJIT_INLINE bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); }
//! Set return at `i` to `var`.
ASMJIT_INLINE bool setRet(uint32_t i, const Reg& reg) noexcept { return _setRet(i, reg); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
FuncDetail _funcDetail; //!< Function detail.
Operand_ _ret[2]; //!< Return.
Operand_* _args; //!< Arguments.
};
// ============================================================================
// [asmjit::CCPushArg]
// ============================================================================
//! Push argument before a function call (CodeCompiler).
class CCPushArg : public CBNode {
public:
ASMJIT_NONCOPYABLE(CCPushArg)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCPushArg` instance.
ASMJIT_INLINE CCPushArg(CodeBuilder* cb, CCFuncCall* call, VirtReg* src, VirtReg* cvt) noexcept
: CBNode(cb, kNodePushArg),
_call(call),
_src(src),
_cvt(cvt),
_args(0) {
orFlags(kFlagIsRemovable);
}
//! Destroy the `CCPushArg` instance.
ASMJIT_INLINE ~CCPushArg() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the associated function-call.
ASMJIT_INLINE CCFuncCall* getCall() const noexcept { return _call; }
//! Get source variable.
ASMJIT_INLINE VirtReg* getSrcReg() const noexcept { return _src; }
//! Get conversion variable.
ASMJIT_INLINE VirtReg* getCvtReg() const noexcept { return _cvt; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CCFuncCall* _call; //!< Associated `CCFuncCall`.
VirtReg* _src; //!< Source variable.
VirtReg* _cvt; //!< Temporary variable used for conversion (or null).
uint32_t _args; //!< Affected arguments bit-array.
};
// ============================================================================
// [asmjit::CodeCompiler]
// ============================================================================
//! Code emitter that uses virtual registers and performs register allocation.
//!
//! Compiler is a high-level code-generation tool that provides register
//! allocation and automatic handling of function calling conventions. It was
//! primarily designed for merging multiple parts of code into a function
//! without worrying about registers and function calling conventions.
//!
//! CodeCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit
//! code at the same time.
//!
//! CodeCompiler is based on CodeBuilder and contains all the features it
//! provides. It means that the code it stores can be modified (removed, added,
//! injected) and analyzed. When the code is finalized the compiler can emit
//! the code into an Assembler to translate the abstract representation into a
//! machine code.
class ASMJIT_VIRTAPI CodeCompiler : public CodeBuilder {
public:
ASMJIT_NONCOPYABLE(CodeCompiler)
typedef CodeBuilder Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CodeCompiler` instance.
ASMJIT_API CodeCompiler() noexcept;
//! Destroy the `CodeCompiler` instance.
ASMJIT_API virtual ~CodeCompiler() noexcept;
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API virtual Error onDetach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Node-Factory]
// --------------------------------------------------------------------------
//! \internal
//!
//! Create a new `CCHint`.
ASMJIT_API CCHint* newHintNode(Reg& reg, uint32_t hint, uint32_t value) noexcept;
// --------------------------------------------------------------------------
// [Func]
// --------------------------------------------------------------------------
//! Get the current function.
ASMJIT_INLINE CCFunc* getFunc() const noexcept { return _func; }
//! Create a new `CCFunc`.
ASMJIT_API CCFunc* newFunc(const FuncSignature& sign) noexcept;
//! Add a function `node` to the stream.
ASMJIT_API CCFunc* addFunc(CCFunc* func);
//! Add a new function.
ASMJIT_API CCFunc* addFunc(const FuncSignature& sign);
//! Emit a sentinel that marks the end of the current function.
ASMJIT_API CBSentinel* endFunc();
// --------------------------------------------------------------------------
// [Ret]
// --------------------------------------------------------------------------
//! Create a new `CCFuncRet`.
ASMJIT_API CCFuncRet* newRet(const Operand_& o0, const Operand_& o1) noexcept;
//! Add a new `CCFuncRet`.
ASMJIT_API CCFuncRet* addRet(const Operand_& o0, const Operand_& o1) noexcept;
// --------------------------------------------------------------------------
// [Call]
// --------------------------------------------------------------------------
//! Create a new `CCFuncCall`.
ASMJIT_API CCFuncCall* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept;
//! Add a new `CCFuncCall`.
ASMJIT_API CCFuncCall* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept;
// --------------------------------------------------------------------------
// [Args]
// --------------------------------------------------------------------------
//! Set a function argument at `argIndex` to `reg`.
ASMJIT_API Error setArg(uint32_t argIndex, const Reg& reg);
// --------------------------------------------------------------------------
// [Hint]
// --------------------------------------------------------------------------
//! Emit a new hint (purely informational node).
ASMJIT_API Error _hint(Reg& reg, uint32_t hint, uint32_t value);
// --------------------------------------------------------------------------
// [VirtReg / Stack]
// --------------------------------------------------------------------------
//! Create a new virtual register representing the given `vti` and `signature`.
//!
//! This function accepts either register type representing a machine-specific
//! register, like `X86Reg`, or RegTag representation, which represents
//! machine independent register, and from the machine-specific register
//! is deduced.
ASMJIT_API VirtReg* newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept;
ASMJIT_API Error _newReg(Reg& out, uint32_t typeId, const char* name);
ASMJIT_API Error _newReg(Reg& out, uint32_t typeId, const char* nameFmt, va_list ap);
ASMJIT_API Error _newReg(Reg& out, const Reg& ref, const char* name);
ASMJIT_API Error _newReg(Reg& out, const Reg& ref, const char* nameFmt, va_list ap);
ASMJIT_API Error _newStack(Mem& out, uint32_t size, uint32_t alignment, const char* name);
ASMJIT_API Error _newConst(Mem& out, uint32_t scope, const void* data, size_t size);
// --------------------------------------------------------------------------
// [VirtReg]
// --------------------------------------------------------------------------
//! Get whether the virtual register `r` is valid.
ASMJIT_INLINE bool isVirtRegValid(const Reg& reg) const noexcept {
return isVirtRegValid(reg.getId());
}
//! \overload
ASMJIT_INLINE bool isVirtRegValid(uint32_t id) const noexcept {
size_t index = Operand::unpackId(id);
return index < _vRegArray.getLength();
}
//! Get \ref VirtReg associated with the given `r`.
ASMJIT_INLINE VirtReg* getVirtReg(const Reg& reg) const noexcept {
return getVirtRegById(reg.getId());
}
//! Get \ref VirtReg associated with the given `id`.
ASMJIT_INLINE VirtReg* getVirtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(id != kInvalidValue);
size_t index = Operand::unpackId(id);
ASMJIT_ASSERT(index < _vRegArray.getLength());
return _vRegArray[index];
}
//! Get an array of all virtual registers managed by CodeCompiler.
ASMJIT_INLINE const ZoneVector<VirtReg*>& getVirtRegArray() const noexcept { return _vRegArray; }
//! Alloc a virtual register `reg`.
ASMJIT_API Error alloc(Reg& reg);
//! Alloc a virtual register `reg` using `physId` as a register id.
ASMJIT_API Error alloc(Reg& reg, uint32_t physId);
//! Alloc a virtual register `reg` using `ref` as a register operand.
ASMJIT_API Error alloc(Reg& reg, const Reg& ref);
//! Spill a virtual register `reg`.
ASMJIT_API Error spill(Reg& reg);
//! Save a virtual register `reg` if the status is `modified` at this point.
ASMJIT_API Error save(Reg& reg);
//! Unuse a virtual register `reg`.
ASMJIT_API Error unuse(Reg& reg);
//! Get priority of a virtual register `reg`.
ASMJIT_API uint32_t getPriority(Reg& reg) const;
//! Set priority of variable `reg` to `priority`.
ASMJIT_API void setPriority(Reg& reg, uint32_t priority);
//! Get save-on-unuse `reg` property.
ASMJIT_API bool getSaveOnUnuse(Reg& reg) const;
//! Set save-on-unuse `reg` property to `value`.
ASMJIT_API void setSaveOnUnuse(Reg& reg, bool value);
//! Rename variable `reg` to `name`.
//!
//! NOTE: Only new name will appear in the logger.
ASMJIT_API void rename(Reg& reg, const char* fmt, ...);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CCFunc* _func; //!< Current function.
Zone _vRegZone; //!< Allocates \ref VirtReg objects.
ZoneVector<VirtReg*> _vRegArray; //!< Stores array of \ref VirtReg pointers.
CBConstPool* _localConstPool; //!< Local constant pool, flushed at the end of each function.
CBConstPool* _globalConstPool; //!< Global constant pool, flushed at the end of the compilation.
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_CODECOMPILER_H

View File

@@ -0,0 +1,292 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/assembler.h"
#include "../base/utils.h"
#include "../base/vmem.h"
#if defined(ASMJIT_BUILD_X86)
#include "../x86/x86inst.h"
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
#include "../arm/arminst.h"
#endif // ASMJIT_BUILD_ARM
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CodeEmitter - Construction / Destruction]
// ============================================================================
CodeEmitter::CodeEmitter(uint32_t type) noexcept
: _codeInfo(),
_code(nullptr),
_nextEmitter(nullptr),
_type(static_cast<uint8_t>(type)),
_destroyed(false),
_finalized(false),
_reserved(false),
_lastError(kErrorNotInitialized),
_privateData(0),
_globalHints(0),
_globalOptions(kOptionMaybeFailureCase),
_options(0),
_inlineComment(nullptr),
_op4(),
_op5(),
_opExtra(),
_none(),
_nativeGpReg(),
_nativeGpArray(nullptr) {}
CodeEmitter::~CodeEmitter() noexcept {
if (_code) {
_destroyed = true;
_code->detach(this);
}
}
// ============================================================================
// [asmjit::CodeEmitter - Events]
// ============================================================================
Error CodeEmitter::onAttach(CodeHolder* code) noexcept {
_codeInfo = code->getCodeInfo();
_lastError = kErrorOk;
_globalHints = code->getGlobalHints();
_globalOptions = code->getGlobalOptions();
return kErrorOk;
}
Error CodeEmitter::onDetach(CodeHolder* code) noexcept {
_codeInfo.reset();
_finalized = false;
_lastError = kErrorNotInitialized;
_privateData = 0;
_globalHints = 0;
_globalOptions = kOptionMaybeFailureCase;
_options = 0;
_inlineComment = nullptr;
_op4.reset();
_op5.reset();
_opExtra.reset();
_nativeGpReg.reset();
_nativeGpArray = nullptr;
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeEmitter - Finalize]
// ============================================================================
Label CodeEmitter::getLabelByName(const char* name, size_t nameLength, uint32_t parentId) noexcept {
return Label(_code ? _code->getLabelIdByName(name, nameLength, parentId) : static_cast<uint32_t>(0));
}
// ============================================================================
// [asmjit::CodeEmitter - Finalize]
// ============================================================================
Error CodeEmitter::finalize() {
// Finalization does nothing by default, overridden by `CodeBuilder`.
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeEmitter - Error Handling]
// ============================================================================
Error CodeEmitter::setLastError(Error error, const char* message) {
// This is fatal, CodeEmitter can't set error without being attached to `CodeHolder`.
ASMJIT_ASSERT(_code != nullptr);
// Special case used to reset the last error.
if (error == kErrorOk) {
_lastError = kErrorOk;
_globalOptions &= ~kOptionMaybeFailureCase;
return kErrorOk;
}
if (!message)
message = DebugUtils::errorAsString(error);
// Logging is skipped if the error is handled by `ErrorHandler`.
ErrorHandler* handler = _code->_errorHandler;
if (handler && handler->handleError(error, message, this))
return error;
// The handler->handleError() function may throw an exception or longjmp()
// to terminate the execution of `setLastError()`. This is the reason why
// we have delayed changing the `_error` member until now.
_lastError = error;
return error;
}
// ============================================================================
// [asmjit::CodeEmitter - Helpers]
// ============================================================================
bool CodeEmitter::isLabelValid(uint32_t id) const noexcept {
size_t index = Operand::unpackId(id);
return _code && index < _code->_labels.getLength();
}
Error CodeEmitter::commentf(const char* fmt, ...) {
Error err = _lastError;
if (err) return err;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled) {
va_list ap;
va_start(ap, fmt);
Error err = _code->_logger->logv(fmt, ap);
va_end(ap);
}
#else
ASMJIT_UNUSED(fmt);
#endif
return err;
}
Error CodeEmitter::commentv(const char* fmt, va_list ap) {
Error err = _lastError;
if (err) return err;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled)
err = _code->_logger->logv(fmt, ap);
#else
ASMJIT_UNUSED(fmt);
ASMJIT_UNUSED(ap);
#endif
return err;
}
// ============================================================================
// [asmjit::CodeEmitter - Emit]
// ============================================================================
#define OP const Operand_&
#define NO _none
Error CodeEmitter::emit(uint32_t instId) { return _emit(instId, NO, NO, NO, NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0) { return _emit(instId, o0, NO, NO, NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1) { return _emit(instId, o0, o1, NO, NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2) { return _emit(instId, o0, o1, o2, NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3) { return _emit(instId, o0, o1, o2, o3); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4) {
_op4 = o4;
if (!o4.isNone()) _options |= kOptionOp4;
return _emit(instId, o0, o1, o2, o3);
}
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, OP o5) {
_op4 = o4;
_op5 = o5;
if (!o4.isNone()) _options |= kOptionOp4;
if (!o5.isNone()) _options |= kOptionOp5;
return _emit(instId, o0, o1, o2, o3);
}
Error CodeEmitter::emit(uint32_t instId, int o0) { return _emit(instId, Imm(o0), NO, NO, NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, int o1) { return _emit(instId, o0, Imm(o1), NO, NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, int o2) { return _emit(instId, o0, o1, Imm(o2), NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, int o3) { return _emit(instId, o0, o1, o2, Imm(o3)); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int o4) {
_options |= kOptionOp4;
_op4 = Imm(o4);
return _emit(instId, o0, o1, o2, o3);
}
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int o5) {
_op4 = o4;
_op5 = Imm(o5);
_options |= kOptionOp4 | kOptionOp5;
return _emit(instId, o0, o1, o2, o3);
}
Error CodeEmitter::emit(uint32_t instId, int64_t o0) { return _emit(instId, Imm(o0), NO, NO, NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, int64_t o1) { return _emit(instId, o0, Imm(o1), NO, NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, int64_t o2) { return _emit(instId, o0, o1, Imm(o2), NO); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, int64_t o3) { return _emit(instId, o0, o1, o2, Imm(o3)); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int64_t o4) {
_options |= kOptionOp4;
_op4 = Imm(o4);
return _emit(instId, o0, o1, o2, o3);
}
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int64_t o5) {
_op4 = o4;
_op5 = Imm(o5);
_options |= kOptionOp4 | kOptionOp5;
return _emit(instId, o0, o1, o2, o3);
}
#undef NO
#undef OP
// ============================================================================
// [asmjit::CodeEmitter - Validation]
// ============================================================================
Error CodeEmitter::_validate(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) const noexcept {
#if !defined(ASMJIT_DISABLE_VALIDATION)
Operand_ opArray[6];
opArray[0].copyFrom(o0);
opArray[1].copyFrom(o1);
opArray[2].copyFrom(o2);
opArray[3].copyFrom(o3);
opArray[4].copyFrom(_op4);
opArray[5].copyFrom(_op5);
uint32_t archType = getArchType();
uint32_t options = getGlobalOptions() | getOptions();
if (!(options & CodeEmitter::kOptionOp4)) opArray[4].reset();
if (!(options & CodeEmitter::kOptionOp5)) opArray[5].reset();
#if defined(ASMJIT_BUILD_X86)
if (ArchInfo::isX86Family(archType))
return X86Inst::validate(archType, instId, options, _opExtra, opArray, 6);
#endif
#if defined(ASMJIT_BUILD_ARM)
if (ArchInfo::isArmFamily(archType))
return ArmInst::validate(archType, instId, options, _opExtra, opArray, 6);
#endif
return DebugUtils::errored(kErrorInvalidArch);
#else
return DebugUtils::errored(kErrorFeatureNotEnabled);
#endif // !ASMJIT_DISABLE_VALIDATION
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View File

@@ -0,0 +1,508 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CODEEMITTER_H
#define _ASMJIT_BASE_CODEEMITTER_H
// [Dependencies]
#include "../base/arch.h"
#include "../base/codeholder.h"
#include "../base/operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class ConstPool;
// ============================================================================
// [asmjit::CodeEmitter]
// ============================================================================
//! Provides a base foundation to emit code - specialized by \ref Assembler and
//! \ref CodeBuilder.
class ASMJIT_VIRTAPI CodeEmitter {
public:
//! CodeEmitter type.
ASMJIT_ENUM(Type) {
kTypeNone = 0,
kTypeAssembler = 1,
kTypeBuilder = 2,
kTypeCompiler = 3,
kTypeCount = 4
};
//! CodeEmitter hints - global settings that affect machine-code generation.
ASMJIT_ENUM(Hints) {
//! Emit optimized code-alignment sequences.
//!
//! Default `true`.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Default align sequence used by X86/X64 architecture is one-byte (0x90)
//! opcode that is often shown by disassemblers as nop. However there are
//! more optimized align sequences for 2-11 bytes that may execute faster.
//! If this feature is enabled AsmJit will generate specialized sequences
//! for alignment between 2 to 11 bytes.
kHintOptimizedAlign = 0x00000001U,
//! Emit jump-prediction hints.
//!
//! Default `false`.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Jump prediction is usually based on the direction of the jump. If the
//! jump is backward it is usually predicted as taken; and if the jump is
//! forward it is usually predicted as not-taken. The reason is that loops
//! generally use backward jumps and conditions usually use forward jumps.
//! However this behavior can be overridden by using instruction prefixes.
//! If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that
//! used to take into consideration prediction hints was P4. Newer processors
//! implement heuristics for branch prediction that ignores any static hints.
kHintPredictedJumps = 0x00000002U
};
//! CodeEmitter options that are merged with instruction options.
ASMJIT_ENUM(Options) {
//! Reserved, used to check for errors in `Assembler::_emit()`. In addition,
//! if an emitter is in error state it will have `kOptionMaybeFailureCase`
//! set
kOptionMaybeFailureCase = 0x00000001U,
//! Perform a strict validation before the instruction is emitted.
kOptionStrictValidation = 0x00000002U,
//! Logging is enabled and `CodeHolder::getLogger()` should return a valid
//! \ref Logger pointer.
kOptionLoggingEnabled = 0x00000004U,
//! Mask of all internal options that are not used to represent instruction
//! options, but are used to instrument Assembler and CodeBuilder. These
//! options are internal and should not be used outside of AsmJit itself.
//!
//! NOTE: Reserved options should never appear in `CBInst` options.
kOptionReservedMask = 0x00000007U,
//! Instruction has `_op4` (5th operand, indexed from zero).
kOptionOp4 = 0x0000008U,
//! Instruction has `_op5` (6th operand, indexed from zero).
kOptionOp5 = 0x0000010U,
//! Instruction has `_opExtra` operand (mask-op {k} operand when using AVX-512).
kOptionOpExtra = 0x00000020U,
//! Prevents following a jump during compilation (CodeCompiler).
kOptionUnfollow = 0x00000040U,
//! Overwrite the destination operand (CodeCompiler).
//!
//! Hint that is important for register liveness analysis. It tells the
//! compiler that the destination operand will be overwritten now or by
//! adjacent instructions. CodeCompiler knows when a register is completely
//! overwritten by a single instruction, for example you don't have to
//! mark "movaps" or "pxor x, x", however, if a pair of instructions is
//! used and the first of them doesn't completely overwrite the content
//! of the destination, CodeCompiler fails to mark that register as dead.
//!
//! X86/X64 Specific
//! ----------------
//!
//! - All instructions that always overwrite at least the size of the
//! register the virtual-register uses , for example "mov", "movq",
//! "movaps" don't need the overwrite option to be used - conversion,
//! shuffle, and other miscellaneous instructions included.
//!
//! - All instructions that clear the destination register if all operands
//! are the same, for example "xor x, x", "pcmpeqb x x", etc...
//!
//! - Consecutive instructions that partially overwrite the variable until
//! there is no old content require the `overwrite()` to be used. Some
//! examples (not always the best use cases thought):
//!
//! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
//! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
//! - `mov al, ?` followed by `and ax, 0xFF`
//! - `mov al, ?` followed by `mov ah, al`
//! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
//!
//! - If allocated variable is used temporarily for scalar operations. For
//! example if you allocate a full vector like `X86Compiler::newXmm()`
//! and then use that vector for scalar operations you should use
//! `overwrite()` directive:
//!
//! - `sqrtss x, y` - only LO element of `x` is changed, if you don't use
//! HI elements, use `X86Compiler.overwrite().sqrtss(x, y)`.
kOptionOverwrite = 0x00000080U
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API CodeEmitter(uint32_t type) noexcept;
ASMJIT_API virtual ~CodeEmitter() noexcept;
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
//! Called after the \ref CodeEmitter was attached to the \ref CodeHolder.
virtual Error onAttach(CodeHolder* code) noexcept = 0;
//! Called after the \ref CodeEmitter was detached from the \ref CodeHolder.
virtual Error onDetach(CodeHolder* code) noexcept = 0;
// --------------------------------------------------------------------------
// [Code-Generation]
// --------------------------------------------------------------------------
//! Emit instruction.
virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) = 0;
//! Create a new label.
virtual Label newLabel() = 0;
//! Create a new named label.
virtual Label newNamedLabel(
const char* name,
size_t nameLength = Globals::kInvalidIndex,
uint32_t type = Label::kTypeGlobal,
uint32_t parentId = 0) = 0;
//! Get a label by name.
//!
//! Returns invalid Label in case that the name is invalid or label was not found.
//!
//! NOTE: This function doesn't trigger ErrorHandler in case the name is
//! invalid or no such label exist. You must always check the validity of the
//! \ref Label returned.
ASMJIT_API Label getLabelByName(
const char* name,
size_t nameLength = Globals::kInvalidIndex,
uint32_t parentId = 0) noexcept;
//! Bind the `label` to the current position of the current section.
//!
//! NOTE: Attempt to bind the same label multiple times will return an error.
virtual Error bind(const Label& label) = 0;
//! Align to the `alignment` specified.
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current location depends on the align `mode`, see \ref AlignMode.
virtual Error align(uint32_t mode, uint32_t alignment) = 0;
//! Embed raw data into the code-buffer.
virtual Error embed(const void* data, uint32_t size) = 0;
//! Embed absolute label address as data (4 or 8 bytes).
virtual Error embedLabel(const Label& label) = 0;
//! Embed a constant pool into the code-buffer in the following steps:
//! 1. Align by using kAlignData to the minimum `pool` alignment.
//! 2. Bind `label` so it's bound to an aligned location.
//! 3. Emit constant pool data.
virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
//! Emit a comment string `s` with an optional `len` parameter.
virtual Error comment(const char* s, size_t len = Globals::kInvalidIndex) = 0;
// --------------------------------------------------------------------------
// [Code-Generation Status]
// --------------------------------------------------------------------------
//! Get if the CodeEmitter is initialized (i.e. attached to a \ref CodeHolder).
ASMJIT_INLINE bool isInitialized() const noexcept { return _code != nullptr; }
ASMJIT_API virtual Error finalize();
// --------------------------------------------------------------------------
// [Code Information]
// --------------------------------------------------------------------------
//! Get information about the code, see \ref CodeInfo.
ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; }
//! Get \ref CodeHolder this CodeEmitter is attached to.
ASMJIT_INLINE CodeHolder* getCode() const noexcept { return _code; }
//! Get information about the architecture, see \ref ArchInfo.
ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _codeInfo.getArchInfo(); }
//! Get if the target architecture is 32-bit.
ASMJIT_INLINE bool is32Bit() const noexcept { return getArchInfo().is32Bit(); }
//! Get if the target architecture is 64-bit.
ASMJIT_INLINE bool is64Bit() const noexcept { return getArchInfo().is64Bit(); }
//! Get the target architecture type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return getArchInfo().getType(); }
//! Get the target architecture sub-type.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return getArchInfo().getSubType(); }
//! Get the target architecture's GP register size (4 or 8 bytes).
ASMJIT_INLINE uint32_t getGpSize() const noexcept { return getArchInfo().getGpSize(); }
//! Get the number of target GP registers.
ASMJIT_INLINE uint32_t getGpCount() const noexcept { return getArchInfo().getGpCount(); }
// --------------------------------------------------------------------------
// [Code-Emitter Type]
// --------------------------------------------------------------------------
//! Get the type of this CodeEmitter, see \ref Type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
ASMJIT_INLINE bool isAssembler() const noexcept { return _type == kTypeAssembler; }
ASMJIT_INLINE bool isCodeBuilder() const noexcept { return _type == kTypeBuilder; }
ASMJIT_INLINE bool isCodeCompiler() const noexcept { return _type == kTypeCompiler; }
// --------------------------------------------------------------------------
// [Global Information]
// --------------------------------------------------------------------------
//! Get global hints.
ASMJIT_INLINE uint32_t getGlobalHints() const noexcept { return _globalHints; }
//! Get global options.
//!
//! Global options are merged with instruction options before the instruction
//! is encoded. These options have some bits reserved that are used for error
//! checking, logging, and strict validation. Other options are globals that
//! affect each instruction, for example if VEX3 is set globally, it will all
//! instructions, even those that don't have such option set.
ASMJIT_INLINE uint32_t getGlobalOptions() const noexcept { return _globalOptions; }
// --------------------------------------------------------------------------
// [Error Handling]
// --------------------------------------------------------------------------
//! Get if the object is in error state.
//!
//! Error state means that it does not consume anything unless the error
//! state is reset by calling `resetLastError()`. Use `getLastError()` to
//! get the last error that put the object into the error state.
ASMJIT_INLINE bool isInErrorState() const noexcept { return _lastError != kErrorOk; }
//! Get the last error code.
ASMJIT_INLINE Error getLastError() const noexcept { return _lastError; }
//! Set the last error code and propagate it through the error handler.
ASMJIT_API Error setLastError(Error error, const char* message = nullptr);
//! Clear the last error code and return `kErrorOk`.
ASMJIT_INLINE Error resetLastError() noexcept { return setLastError(kErrorOk); }
// --------------------------------------------------------------------------
// [Accessors That Affect the Next Instruction]
// --------------------------------------------------------------------------
//! Get options of the next instruction.
ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; }
//! Set options of the next instruction.
ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _options = options; }
//! Add options of the next instruction.
ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; }
//! Reset options of the next instruction.
ASMJIT_INLINE void resetOptions() noexcept { _options = 0; }
//! Get if the 5th operand (indexed from zero) of the next instruction is used.
ASMJIT_INLINE bool hasOp4() const noexcept { return (_options & kOptionOp4) != 0; }
//! Get if the 6th operand (indexed from zero) of the next instruction is used.
ASMJIT_INLINE bool hasOp5() const noexcept { return (_options & kOptionOp5) != 0; }
//! Get if the op-mask operand of the next instruction is used.
ASMJIT_INLINE bool hasOpExtra() const noexcept { return (_options & kOptionOpExtra) != 0; }
ASMJIT_INLINE const Operand& getOp4() const noexcept { return static_cast<const Operand&>(_op4); }
ASMJIT_INLINE const Operand& getOp5() const noexcept { return static_cast<const Operand&>(_op5); }
ASMJIT_INLINE const Operand& getOpExtra() const noexcept { return static_cast<const Operand&>(_opExtra); }
ASMJIT_INLINE void setOp4(const Operand_& op4) noexcept { _options |= kOptionOp4; _op4 = op4; }
ASMJIT_INLINE void setOp5(const Operand_& op5) noexcept { _options |= kOptionOp5; _op5 = op5; }
ASMJIT_INLINE void setOpExtra(const Operand_& opExtra) noexcept { _options |= kOptionOpExtra; _opExtra = opExtra; }
//! Get annotation of the next instruction.
ASMJIT_INLINE const char* getInlineComment() const noexcept { return _inlineComment; }
//! Set annotation of the next instruction.
//!
//! NOTE: This string is set back to null by `_emit()`, but until that it has
//! to remain valid as `CodeEmitter` is not required to make a copy of it (and
//! it would be slow to do that for each instruction).
ASMJIT_INLINE void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Reset annotation of the next instruction to null.
ASMJIT_INLINE void resetInlineComment() noexcept { _inlineComment = nullptr; }
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
//! Get if the `label` is valid (i.e. registered).
ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
return isLabelValid(label.getId());
}
//! Get if the label `id` is valid (i.e. registered).
ASMJIT_API bool isLabelValid(uint32_t id) const noexcept;
//! Emit a formatted string `fmt`.
ASMJIT_API Error commentf(const char* fmt, ...);
//! Emit a formatted string `fmt` (va_list version).
ASMJIT_API Error commentv(const char* fmt, va_list ap);
// --------------------------------------------------------------------------
// [Emit]
// --------------------------------------------------------------------------
// NOTE: These `emit()` helpers are designed to address a code-bloat generated
// by C++ compilers to call a function having many arguments. Each parameter to
// `_emit()` requires code to pass it, which means that if we default to 4
// operand parameters in `_emit()` and instId the C++ compiler would have to
// generate a virtual function call having 5 parameters, which is quite a lot.
// Since by default asm instructions have 2 to 3 operands it's better to
// introduce helpers that pass those and fill all the remaining with `_none`.
//! Emit an instruction.
ASMJIT_API Error emit(uint32_t instId);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
//! Emit an instruction that has a 32-bit signed immediate operand.
ASMJIT_API Error emit(uint32_t instId, int o0);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, int o1);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, int o2);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, int o3);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, int o4);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, int o5);
//! Emit an instruction that has a 64-bit signed immediate operand.
ASMJIT_API Error emit(uint32_t instId, int64_t o0);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, int64_t o1);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, int64_t o2);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, int64_t o3);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, int64_t o4);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, int64_t o5);
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, unsigned int o0) {
return emit(instId, static_cast<int64_t>(o0));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, unsigned int o1) {
return emit(instId, o0, static_cast<int64_t>(o1));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, unsigned int o2) {
return emit(instId, o0, o1, static_cast<int64_t>(o2));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, unsigned int o3) {
return emit(instId, o0, o1, o2, static_cast<int64_t>(o3));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, unsigned int o4) {
return emit(instId, o0, o1, o2, o3, static_cast<int64_t>(o4));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, unsigned int o5) {
return emit(instId, o0, o1, o2, o3, o4, static_cast<int64_t>(o5));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, uint64_t o0) {
return emit(instId, static_cast<int64_t>(o0));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, uint64_t o1) {
return emit(instId, o0, static_cast<int64_t>(o1));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, uint64_t o2) {
return emit(instId, o0, o1, static_cast<int64_t>(o2));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, uint64_t o3) {
return emit(instId, o0, o1, o2, static_cast<int64_t>(o3));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, uint64_t o4) {
return emit(instId, o0, o1, o2, o3, static_cast<int64_t>(o4));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, uint64_t o5) {
return emit(instId, o0, o1, o2, o3, o4, static_cast<int64_t>(o5));
}
// --------------------------------------------------------------------------
// [Validation]
// --------------------------------------------------------------------------
//! Validate instruction with current options, called by `_emit()` if validation is enabled.
ASMJIT_API Error _validate(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) const noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CodeInfo _codeInfo; //!< Basic information about the code (matches CodeHolder::_codeInfo).
CodeHolder* _code; //!< CodeHolder the CodeEmitter is attached to.
CodeEmitter* _nextEmitter; //!< Linked list of `CodeEmitter`s attached to the same \ref CodeHolder.
uint8_t _type; //!< See CodeEmitter::Type.
uint8_t _destroyed; //!< Set by ~CodeEmitter() before calling `_code->detach()`.
uint8_t _finalized; //!< True if the CodeEmitter is finalized (CodeBuilder & CodeCompiler).
uint8_t _reserved; //!< \internal
Error _lastError; //!< Last error code.
uint32_t _privateData; //!< Internal private data used freely by any CodeEmitter.
uint32_t _globalHints; //!< Global hints, always in sync with CodeHolder.
uint32_t _globalOptions; //!< Global options, combined with `_options` before used by each instruction.
uint32_t _options; //!< Used to pass instruction options (affects the next instruction).
const char* _inlineComment; //!< Inline comment of the next instruction (affects the next instruction).
Operand_ _op4; //!< 5th operand data (indexed from zero) (affects the next instruction).
Operand_ _op5; //!< 6th operand data (indexed from zero) (affects the next instruction).
Operand_ _opExtra; //!< Extra operand (op-mask {k} on AVX-512) (affects the next instruction).
Operand_ _none; //!< Used to pass unused operands to `_emit()` instead of passing null.
Reg _nativeGpReg; //!< Native GP register with zero id.
const Reg* _nativeGpArray; //!< Array of native registers indexed from zero.
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CODEEMITTER_H

View File

@@ -0,0 +1,696 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/assembler.h"
#include "../base/utils.h"
#include "../base/vmem.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
// ============================================================================
// [asmjit::CodeHolder - Utilities]
// ============================================================================
static void CodeHolder_setGlobalOption(CodeHolder* self, uint32_t clear, uint32_t add) noexcept {
// Modify global options of `CodeHolder` itself.
self->_globalOptions = (self->_globalOptions & ~clear) | add;
// Modify all global options of all `CodeEmitter`s attached.
CodeEmitter* emitter = self->_emitters;
while (emitter) {
emitter->_globalOptions = (emitter->_globalOptions & ~clear) | add;
emitter = emitter->_nextEmitter;
}
}
static void CodeHolder_resetInternal(CodeHolder* self, bool releaseMemory) noexcept {
// Detach all `CodeEmitter`s.
while (self->_emitters)
self->detach(self->_emitters);
// Reset everything into its construction state.
self->_codeInfo.reset();
self->_globalHints = 0;
self->_globalOptions = 0;
self->_logger = nullptr;
self->_errorHandler = nullptr;
self->_unresolvedLabelsCount = 0;
self->_trampolinesSize = 0;
// Reset all sections.
size_t numSections = self->_sections.getLength();
for (size_t i = 0; i < numSections; i++) {
SectionEntry* section = self->_sections[i];
if (section->_buffer.hasData() && !section->_buffer.isExternal())
Internal::releaseMemory(section->_buffer._data);
section->_buffer._data = nullptr;
section->_buffer._capacity = 0;
}
// Reset zone allocator and all containers using it.
ZoneHeap* heap = &self->_baseHeap;
self->_namedLabels.reset(heap);
self->_relocations.reset();
self->_labels.reset();
self->_sections.reset();
heap->reset(&self->_baseZone);
self->_baseZone.reset(releaseMemory);
}
// ============================================================================
// [asmjit::CodeHolder - Construction / Destruction]
// ============================================================================
CodeHolder::CodeHolder() noexcept
: _codeInfo(),
_globalHints(0),
_globalOptions(0),
_emitters(nullptr),
_cgAsm(nullptr),
_logger(nullptr),
_errorHandler(nullptr),
_trampolinesSize(0),
_baseZone(16384 - Zone::kZoneOverhead),
_dataZone(16384 - Zone::kZoneOverhead),
_baseHeap(&_baseZone),
_labels(),
_sections(),
_relocations() {
}
CodeHolder::~CodeHolder() noexcept {
CodeHolder_resetInternal(this, true);
}
// ============================================================================
// [asmjit::CodeHolder - Init / Reset]
// ============================================================================
Error CodeHolder::init(const CodeInfo& info) noexcept {
// Cannot reinitialize if it's locked or there is one or more CodeEmitter
// attached.
if (isInitialized())
return DebugUtils::errored(kErrorAlreadyInitialized);
// If we are just initializing there should be no emitters attached).
ASMJIT_ASSERT(_emitters == nullptr);
// Create the default section and insert it to the `_sections` array.
Error err = _sections.willGrow(&_baseHeap);
if (err == kErrorOk) {
SectionEntry* se = _baseZone.allocZeroedT<SectionEntry>();
if (ASMJIT_LIKELY(se)) {
se->_flags = SectionEntry::kFlagExec | SectionEntry::kFlagConst;
se->_setDefaultName('.', 't', 'e', 'x', 't');
_sections.appendUnsafe(se);
}
else {
err = DebugUtils::errored(kErrorNoHeapMemory);
}
}
if (ASMJIT_UNLIKELY(err)) {
_baseZone.reset(false);
return err;
}
else {
_codeInfo = info;
return kErrorOk;
}
}
void CodeHolder::reset(bool releaseMemory) noexcept {
CodeHolder_resetInternal(this, releaseMemory);
}
// ============================================================================
// [asmjit::CodeHolder - Attach / Detach]
// ============================================================================
Error CodeHolder::attach(CodeEmitter* emitter) noexcept {
// Catch a possible misuse of the API.
if (!emitter)
return DebugUtils::errored(kErrorInvalidArgument);
uint32_t type = emitter->getType();
if (type == CodeEmitter::kTypeNone || type >= CodeEmitter::kTypeCount)
return DebugUtils::errored(kErrorInvalidState);
// This is suspicious, but don't fail if `emitter` matches.
if (emitter->_code != nullptr) {
if (emitter->_code == this) return kErrorOk;
return DebugUtils::errored(kErrorInvalidState);
}
// Special case - attach `Assembler`.
CodeEmitter** pSlot = nullptr;
if (type == CodeEmitter::kTypeAssembler) {
if (_cgAsm)
return DebugUtils::errored(kErrorSlotOccupied);
pSlot = reinterpret_cast<CodeEmitter**>(&_cgAsm);
}
Error err = emitter->onAttach(this);
if (err != kErrorOk) return err;
// Add to a single-linked list of `CodeEmitter`s.
emitter->_nextEmitter = _emitters;
_emitters = emitter;
if (pSlot) *pSlot = emitter;
// Establish the connection.
emitter->_code = this;
return kErrorOk;
}
Error CodeHolder::detach(CodeEmitter* emitter) noexcept {
if (!emitter)
return DebugUtils::errored(kErrorInvalidArgument);
if (emitter->_code != this)
return DebugUtils::errored(kErrorInvalidState);
uint32_t type = emitter->getType();
Error err = kErrorOk;
// NOTE: We always detach if we were asked to, if error happens during
// `emitter->onDetach()` we just propagate it, but the CodeEmitter will
// be detached.
if (!emitter->_destroyed)
err = emitter->onDetach(this);
// Special case - detach `Assembler`.
if (type == CodeEmitter::kTypeAssembler) _cgAsm = nullptr;
// Remove from a single-linked list of `CodeEmitter`s.
CodeEmitter** pPrev = &_emitters;
for (;;) {
ASMJIT_ASSERT(*pPrev != nullptr);
CodeEmitter* cur = *pPrev;
if (cur == emitter) {
*pPrev = emitter->_nextEmitter;
break;
}
pPrev = &cur->_nextEmitter;
}
emitter->_code = nullptr;
emitter->_nextEmitter = nullptr;
return err;
}
// ============================================================================
// [asmjit::CodeHolder - Sync]
// ============================================================================
void CodeHolder::sync() noexcept {
if (_cgAsm) _cgAsm->sync();
}
// ============================================================================
// [asmjit::CodeHolder - Result Information]
// ============================================================================
size_t CodeHolder::getCodeSize() const noexcept {
// Reflect all changes first.
const_cast<CodeHolder*>(this)->sync();
// TODO: Support sections.
return _sections[0]->_buffer._length + getTrampolinesSize();
}
// ============================================================================
// [asmjit::CodeHolder - Logging & Error Handling]
// ============================================================================
#if !defined(ASMJIT_DISABLE_LOGGING)
void CodeHolder::setLogger(Logger* logger) noexcept {
uint32_t opt = 0;
if (logger) opt = CodeEmitter::kOptionLoggingEnabled;
_logger = logger;
CodeHolder_setGlobalOption(this, CodeEmitter::kOptionLoggingEnabled, opt);
}
#endif // !ASMJIT_DISABLE_LOGGING
Error CodeHolder::setErrorHandler(ErrorHandler* handler) noexcept {
_errorHandler = handler;
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeHolder - Sections]
// ============================================================================
static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept {
uint8_t* oldData = cb->_data;
uint8_t* newData;
if (oldData && !cb->isExternal())
newData = static_cast<uint8_t*>(Internal::reallocMemory(oldData, n));
else
newData = static_cast<uint8_t*>(Internal::allocMemory(n));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorNoHeapMemory);
cb->_data = newData;
cb->_capacity = n;
// Update the `Assembler` pointers if attached. Maybe we should introduce an
// event for this, but since only one Assembler can be attached at a time it
// should not matter how these pointers are updated.
Assembler* a = self->_cgAsm;
if (a && &a->_section->_buffer == cb) {
size_t offset = a->getOffset();
a->_bufferData = newData;
a->_bufferEnd = newData + n;
a->_bufferPtr = newData + offset;
}
return kErrorOk;
}
Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
// This is most likely called by `Assembler` so `sync()` shouldn't be needed,
// however, if this is called by the user and the currently attached Assembler
// did generate some code we could lose that, so sync now and make sure the
// section length is updated.
if (_cgAsm) _cgAsm->sync();
// Now the length of the section must be valid.
size_t length = cb->getLength();
if (ASMJIT_UNLIKELY(n > IntTraits<uintptr_t>::maxValue() - length))
return DebugUtils::errored(kErrorNoHeapMemory);
// We can now check if growing the buffer is really necessary. It's unlikely
// that this function is called while there is still room for `n` bytes.
size_t capacity = cb->getCapacity();
size_t required = cb->getLength() + n;
if (ASMJIT_UNLIKELY(required <= capacity)) return kErrorOk;
if (cb->isFixedSize())
return DebugUtils::errored(kErrorCodeTooLarge);
if (capacity < 8096)
capacity = 8096;
else
capacity += Globals::kAllocOverhead;
do {
size_t old = capacity;
if (capacity < Globals::kAllocThreshold)
capacity *= 2;
else
capacity += Globals::kAllocThreshold;
if (capacity < Globals::kAllocThreshold)
capacity *= 2;
else
capacity += Globals::kAllocThreshold;
// Overflow.
if (ASMJIT_UNLIKELY(old > capacity))
return DebugUtils::errored(kErrorNoHeapMemory);
} while (capacity - Globals::kAllocOverhead < required);
return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead);
}
Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
size_t capacity = cb->getCapacity();
if (n <= capacity) return kErrorOk;
if (cb->isFixedSize())
return DebugUtils::errored(kErrorCodeTooLarge);
// We must sync, as mentioned in `growBuffer()` as well.
if (_cgAsm) _cgAsm->sync();
return CodeHolder_reserveInternal(this, cb, n);
}
// ============================================================================
// [asmjit::CodeHolder - Labels & Symbols]
// ============================================================================
namespace {
//! \internal
//!
//! Only used to lookup a label from `_namedLabels`.
class LabelByName {
public:
ASMJIT_INLINE LabelByName(const char* name, size_t nameLength, uint32_t hVal) noexcept
: name(name),
nameLength(static_cast<uint32_t>(nameLength)) {}
ASMJIT_INLINE bool matches(const LabelEntry* entry) const noexcept {
return static_cast<uint32_t>(entry->getNameLength()) == nameLength &&
::memcmp(entry->getName(), name, nameLength) == 0;
}
const char* name;
uint32_t nameLength;
uint32_t hVal;
};
// Returns a hash of `name` and fixes `nameLength` if it's `Globals::kInvalidIndex`.
static uint32_t CodeHolder_hashNameAndFixLen(const char* name, size_t& nameLength) noexcept {
uint32_t hVal = 0;
if (nameLength == Globals::kInvalidIndex) {
size_t i = 0;
for (;;) {
uint8_t c = static_cast<uint8_t>(name[i]);
if (!c) break;
hVal = Utils::hashRound(hVal, c);
i++;
}
nameLength = i;
}
else {
for (size_t i = 0; i < nameLength; i++) {
uint8_t c = static_cast<uint8_t>(name[i]);
if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName);
hVal = Utils::hashRound(hVal, c);
}
}
return hVal;
}
} // anonymous namespace
LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept {
LabelLink* link = _baseHeap.allocT<LabelLink>();
if (ASMJIT_UNLIKELY(!link)) return nullptr;
link->prev = le->_links;
le->_links = link;
link->sectionId = sectionId;
link->relocId = RelocEntry::kInvalidId;
link->offset = offset;
link->rel = rel;
_unresolvedLabelsCount++;
return link;
}
Error CodeHolder::newLabelId(uint32_t& idOut) noexcept {
idOut = 0;
size_t index = _labels.getLength();
if (ASMJIT_LIKELY(index >= Operand::kPackedIdCount))
return DebugUtils::errored(kErrorLabelIndexOverflow);
ASMJIT_PROPAGATE(_labels.willGrow(&_baseHeap));
LabelEntry* le = _baseHeap.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le))
return DebugUtils::errored(kErrorNoHeapMemory);;
uint32_t id = Operand::packId(static_cast<uint32_t>(index));
le->_setId(id);
le->_parentId = 0;
le->_sectionId = SectionEntry::kInvalidId;
le->_offset = 0;
_labels.appendUnsafe(le);
idOut = id;
return kErrorOk;
}
Error CodeHolder::newNamedLabelId(uint32_t& idOut, const char* name, size_t nameLength, uint32_t type, uint32_t parentId) noexcept {
idOut = 0;
uint32_t hVal = CodeHolder_hashNameAndFixLen(name, nameLength);
if (ASMJIT_UNLIKELY(nameLength == 0))
return DebugUtils::errored(kErrorInvalidLabelName);
if (ASMJIT_UNLIKELY(nameLength > Globals::kMaxLabelLength))
return DebugUtils::errored(kErrorLabelNameTooLong);
switch (type) {
case Label::kTypeLocal:
if (ASMJIT_UNLIKELY(Operand::unpackId(parentId) >= _labels.getLength()))
return DebugUtils::errored(kErrorInvalidParentLabel);
hVal ^= parentId;
break;
case Label::kTypeGlobal:
if (ASMJIT_UNLIKELY(parentId != 0))
return DebugUtils::errored(kErrorNonLocalLabelCantHaveParent);
break;
default:
return DebugUtils::errored(kErrorInvalidArgument);
}
// Don't allow to insert duplicates. Local labels allow duplicates that have
// different id, this is already accomplished by having a different hashes
// between the same label names having different parent labels.
LabelEntry* le = _namedLabels.get(LabelByName(name, nameLength, hVal));
if (ASMJIT_UNLIKELY(le))
return DebugUtils::errored(kErrorLabelAlreadyDefined);
Error err = kErrorOk;
size_t index = _labels.getLength();
if (ASMJIT_UNLIKELY(index >= Operand::kPackedIdCount))
return DebugUtils::errored(kErrorLabelIndexOverflow);
ASMJIT_PROPAGATE(_labels.willGrow(&_baseHeap));
le = _baseHeap.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le))
return DebugUtils::errored(kErrorNoHeapMemory);
uint32_t id = Operand::packId(static_cast<uint32_t>(index));
le->_hVal = hVal;
le->_setId(id);
le->_type = static_cast<uint8_t>(type);
le->_parentId = 0;
le->_sectionId = SectionEntry::kInvalidId;
le->_offset = 0;
if (le->_name.mustEmbed(nameLength)) {
le->_name.setEmbedded(name, nameLength);
}
else {
char* nameExternal = static_cast<char*>(_dataZone.dup(name, nameLength, true));
if (ASMJIT_UNLIKELY(!nameExternal))
return DebugUtils::errored(kErrorNoHeapMemory);
le->_name.setExternal(nameExternal, nameLength);
}
_labels.appendUnsafe(le);
_namedLabels.put(le);
idOut = id;
return err;
}
uint32_t CodeHolder::getLabelIdByName(const char* name, size_t nameLength, uint32_t parentId) noexcept {
uint32_t hVal = CodeHolder_hashNameAndFixLen(name, nameLength);
if (ASMJIT_UNLIKELY(!nameLength)) return 0;
LabelEntry* le = _namedLabels.get(LabelByName(name, nameLength, hVal));
return le ? le->getId() : static_cast<uint32_t>(0);
}
// ============================================================================
// [asmjit::CodeEmitter - Relocations]
// ============================================================================
//! Encode MOD byte.
static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept {
return (m << 6) | (o << 3) | rm;
}
Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t type, uint32_t size) noexcept {
ASMJIT_PROPAGATE(_relocations.willGrow(&_baseHeap));
size_t index = _relocations.getLength();
if (ASMJIT_UNLIKELY(index > size_t(0xFFFFFFFFU)))
return DebugUtils::errored(kErrorRelocIndexOverflow);
RelocEntry* re = _baseHeap.allocZeroedT<RelocEntry>();
if (ASMJIT_UNLIKELY(!re))
return DebugUtils::errored(kErrorNoHeapMemory);
re->_id = static_cast<uint32_t>(index);
re->_type = static_cast<uint8_t>(type);
re->_size = static_cast<uint8_t>(size);
re->_sourceSectionId = SectionEntry::kInvalidId;
re->_targetSectionId = SectionEntry::kInvalidId;
_relocations.appendUnsafe(re);
*dst = re;
return kErrorOk;
}
// TODO: Support multiple sections, this only relocates the first.
// TODO: This should go to Runtime as it's responsible for relocating the
// code, CodeHolder should just hold it.
size_t CodeHolder::relocate(void* _dst, uint64_t baseAddress) const noexcept {
SectionEntry* section = _sections[0];
ASMJIT_ASSERT(section != nullptr);
uint32_t archType = getArchType();
uint8_t* dst = static_cast<uint8_t*>(_dst);
if (baseAddress == Globals::kNoBaseAddress)
baseAddress = static_cast<uint64_t>((uintptr_t)dst);
#if !defined(ASMJIT_DISABLE_LOGGING)
Logger* logger = getLogger();
#endif // ASMJIT_DISABLE_LOGGING
size_t minCodeSize = section->getBuffer().getLength(); // Minimum code size.
size_t maxCodeSize = getCodeSize(); // Includes all possible trampolines.
// We will copy the exact size of the generated code. Extra code for trampolines
// is generated on-the-fly by the relocator (this code doesn't exist at the moment).
::memcpy(dst, section->_buffer._data, minCodeSize);
// Trampoline offset from the beginning of dst/baseAddress.
size_t trampOffset = minCodeSize;
// Relocate all recorded locations.
size_t numRelocs = _relocations.getLength();
const RelocEntry* const* reArray = _relocations.getData();
for (size_t i = 0; i < numRelocs; i++) {
const RelocEntry* re = reArray[i];
// Possibly deleted or optimized out relocation entry.
if (re->getType() == RelocEntry::kTypeNone)
continue;
uint64_t ptr = re->getData();
size_t codeOffset = static_cast<size_t>(re->getSourceOffset());
// Make sure that the `RelocEntry` is correct, we don't want to write
// out of bounds in `dst`.
if (ASMJIT_UNLIKELY(codeOffset + re->getSize() > maxCodeSize))
return DebugUtils::errored(kErrorInvalidRelocEntry);
// Whether to use trampoline, can be only used if relocation type is `kRelocTrampoline`.
bool useTrampoline = false;
switch (re->getType()) {
case RelocEntry::kTypeAbsToAbs: {
break;
}
case RelocEntry::kTypeRelToAbs: {
ptr += baseAddress;
break;
}
case RelocEntry::kTypeAbsToRel: {
ptr -= baseAddress + re->getSourceOffset() + re->getSize();
break;
}
case RelocEntry::kTypeTrampoline: {
if (re->getSize() != 4)
return DebugUtils::errored(kErrorInvalidRelocEntry);
ptr -= baseAddress + re->getSourceOffset() + re->getSize();
if (!Utils::isInt32(static_cast<int64_t>(ptr))) {
ptr = (uint64_t)trampOffset - re->getSourceOffset() - re->getSize();
useTrampoline = true;
}
break;
}
default:
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
switch (re->getSize()) {
case 1:
Utils::writeU8(dst + codeOffset, static_cast<uint32_t>(ptr & 0xFFU));
break;
case 4:
Utils::writeU32u(dst + codeOffset, static_cast<uint32_t>(ptr & 0xFFFFFFFFU));
break;
case 8:
Utils::writeU64u(dst + codeOffset, ptr);
break;
default:
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
// Handle the trampoline case.
if (useTrampoline) {
// Bytes that replace [REX, OPCODE] bytes.
uint32_t byte0 = 0xFF;
uint32_t byte1 = dst[codeOffset - 1];
if (byte1 == 0xE8) {
// Patch CALL/MOD byte to FF/2 (-> 0x15).
byte1 = x86EncodeMod(0, 2, 5);
}
else if (byte1 == 0xE9) {
// Patch JMP/MOD byte to FF/4 (-> 0x25).
byte1 = x86EncodeMod(0, 4, 5);
}
else {
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
// Patch `jmp/call` instruction.
ASMJIT_ASSERT(codeOffset >= 2);
dst[codeOffset - 2] = static_cast<uint8_t>(byte0);
dst[codeOffset - 1] = static_cast<uint8_t>(byte1);
// Store absolute address and advance the trampoline pointer.
Utils::writeU64u(dst + trampOffset, re->getData());
trampOffset += 8;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (logger)
logger->logf("[reloc] dq 0x%016llX ; Trampoline\n", re->getData());
#endif // !ASMJIT_DISABLE_LOGGING
}
}
// If there are no trampolines this is the same as `minCodeSize`.
return trampOffset;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View File

@@ -0,0 +1,745 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CODEHOLDER_H
#define _ASMJIT_BASE_CODEHOLDER_H
// [Dependencies]
#include "../base/arch.h"
#include "../base/func.h"
#include "../base/logging.h"
#include "../base/operand.h"
#include "../base/simdtypes.h"
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class Assembler;
class CodeEmitter;
class CodeHolder;
// ============================================================================
// [asmjit::AlignMode]
// ============================================================================
//! Align mode.
ASMJIT_ENUM(AlignMode) {
kAlignCode = 0, //!< Align executable code.
kAlignData = 1, //!< Align non-executable code.
kAlignZero = 2, //!< Align by a sequence of zeros.
kAlignCount //!< Count of alignment modes.
};
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
//! Error handler can be used to override the default behavior of error handling
//! available to all classes that inherit \ref CodeEmitter. See \ref handleError().
class ASMJIT_VIRTAPI ErrorHandler {
public:
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `ErrorHandler` instance.
ASMJIT_API ErrorHandler() noexcept;
//! Destroy the `ErrorHandler` instance.
ASMJIT_API virtual ~ErrorHandler() noexcept;
// --------------------------------------------------------------------------
// [Handle Error]
// --------------------------------------------------------------------------
//! Error handler (abstract).
//!
//! Error handler is called after an error happened and before it's propagated
//! to the caller. There are multiple ways how the error handler can be used:
//!
//! 1. Returning `true` or `false` from `handleError()`. If `true` is returned
//! it means that the error was reported and AsmJit can continue execution.
//! The reported error still be propagated to the caller, but won't put the
//! CodeEmitter into an error state (it won't set last-error). However,
//! returning `false` means that the error cannot be handled - in such case
//! it stores the error, which can be then retrieved by using `getLastError()`.
//! Returning `false` is the default behavior when no error handler is present.
//! To put the assembler into a non-error state again a `resetLastError()` must
//! be called.
//!
//! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely
//! exception-safe, but you can throw exception from your error handler if
//! this way is the preferred way of handling errors in your project. Throwing
//! an exception acts virtually as returning `true` as AsmJit won't be able
//! to store the error because the exception changes execution path.
//!
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts
//! `CodeEmitter` to a consistent state before calling the `handleError()`
//! so `longjmp()` can be used without any issues to cancel the code
//! generation if an error occurred. There is no difference between
//! exceptions and longjmp() from AsmJit's perspective.
virtual bool handleError(Error err, const char* message, CodeEmitter* origin) = 0;
};
// ============================================================================
// [asmjit::CodeInfo]
// ============================================================================
//! Basic information about a code (or target). It describes its architecture,
//! code generation mode (or optimization level), and base address.
class CodeInfo {
public:
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE CodeInfo() noexcept
: _archInfo(),
_stackAlignment(0),
_cdeclCallConv(CallConv::kIdNone),
_stdCallConv(CallConv::kIdNone),
_fastCallConv(CallConv::kIdNone),
_baseAddress(Globals::kNoBaseAddress) {}
ASMJIT_INLINE CodeInfo(const CodeInfo& other) noexcept { init(other); }
explicit ASMJIT_INLINE CodeInfo(uint32_t archType, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
: _archInfo(archType, archMode),
_packedMiscInfo(0),
_baseAddress(baseAddress) {}
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isInitialized() const noexcept {
return _archInfo._type != ArchInfo::kTypeNone;
}
ASMJIT_INLINE void init(const CodeInfo& other) noexcept {
_archInfo = other._archInfo;
_packedMiscInfo = other._packedMiscInfo;
_baseAddress = other._baseAddress;
}
ASMJIT_INLINE void init(uint32_t archType, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept {
_archInfo.init(archType, archMode);
_packedMiscInfo = 0;
_baseAddress = baseAddress;
}
ASMJIT_INLINE void reset() noexcept {
_archInfo.reset();
_stackAlignment = 0;
_cdeclCallConv = CallConv::kIdNone;
_stdCallConv = CallConv::kIdNone;
_fastCallConv = CallConv::kIdNone;
_baseAddress = Globals::kNoBaseAddress;
}
// --------------------------------------------------------------------------
// [Architecture Information]
// --------------------------------------------------------------------------
//! Get architecture information, see \ref ArchInfo.
ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _archInfo; }
//! Get architecture type, see \ref ArchInfo::Type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archInfo.getType(); }
//! Get architecture sub-type, see \ref ArchInfo::SubType.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _archInfo.getSubType(); }
//! Get a size of a GP register of the architecture the code is using.
ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _archInfo.getGpSize(); }
//! Get number of GP registers available of the architecture the code is using.
ASMJIT_INLINE uint32_t getGpCount() const noexcept { return _archInfo.getGpCount(); }
// --------------------------------------------------------------------------
// [High-Level Information]
// --------------------------------------------------------------------------
//! Get a natural stack alignment that must be honored (or 0 if not known).
ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; }
//! Set a natural stack alignment that must be honored.
ASMJIT_INLINE void setStackAlignment(uint8_t sa) noexcept { _stackAlignment = static_cast<uint8_t>(sa); }
ASMJIT_INLINE uint32_t getCdeclCallConv() const noexcept { return _cdeclCallConv; }
ASMJIT_INLINE void setCdeclCallConv(uint32_t cc) noexcept { _cdeclCallConv = static_cast<uint8_t>(cc); }
ASMJIT_INLINE uint32_t getStdCallConv() const noexcept { return _stdCallConv; }
ASMJIT_INLINE void setStdCallConv(uint32_t cc) noexcept { _stdCallConv = static_cast<uint8_t>(cc); }
ASMJIT_INLINE uint32_t getFastCallConv() const noexcept { return _fastCallConv; }
ASMJIT_INLINE void setFastCallConv(uint32_t cc) noexcept { _fastCallConv = static_cast<uint8_t>(cc); }
// --------------------------------------------------------------------------
// [Addressing Information]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
ASMJIT_INLINE uint64_t getBaseAddress() const noexcept { return _baseAddress; }
ASMJIT_INLINE void setBaseAddress(uint64_t p) noexcept { _baseAddress = p; }
ASMJIT_INLINE void resetBaseAddress() noexcept { _baseAddress = Globals::kNoBaseAddress; }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE CodeInfo& operator=(const CodeInfo& other) noexcept { init(other); return *this; }
ASMJIT_INLINE bool operator==(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) == 0; }
ASMJIT_INLINE bool operator!=(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) != 0; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
ArchInfo _archInfo; //!< Architecture information.
union {
struct {
uint8_t _stackAlignment; //!< Natural stack alignment (ARCH+OS).
uint8_t _cdeclCallConv; //!< Default CDECL calling convention.
uint8_t _stdCallConv; //!< Default STDCALL calling convention.
uint8_t _fastCallConv; //!< Default FASTCALL calling convention.
};
uint32_t _packedMiscInfo; //!< \internal
};
uint64_t _baseAddress; //!< Base address.
};
// ============================================================================
// [asmjit::CodeBuffer]
// ============================================================================
//! Code or data buffer.
struct CodeBuffer {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool hasData() const noexcept { return _data != nullptr; }
ASMJIT_INLINE uint8_t* getData() noexcept { return _data; }
ASMJIT_INLINE const uint8_t* getData() const noexcept { return _data; }
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; }
ASMJIT_INLINE bool isExternal() const noexcept { return _isExternal; }
ASMJIT_INLINE bool isFixedSize() const noexcept { return _isFixedSize; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint8_t* _data; //!< The content of the buffer (data).
size_t _length; //!< Number of bytes of `data` used.
size_t _capacity; //!< Buffer capacity (in bytes).
bool _isExternal; //!< True if this is external buffer.
bool _isFixedSize; //!< True if this buffer cannot grow.
};
// ============================================================================
// [asmjit::SectionEntry]
// ============================================================================
//! Section entry.
class SectionEntry {
public:
ASMJIT_ENUM(Id) {
kInvalidId = 0xFFFFFFFFU //!< Invalid section id.
};
//! Section flags.
ASMJIT_ENUM(Flags) {
kFlagExec = 0x00000001U, //!< Executable (.text sections).
kFlagConst = 0x00000002U, //!< Read-only (.text and .data sections).
kFlagZero = 0x00000004U, //!< Zero initialized by the loader (BSS).
kFlagInfo = 0x00000008U, //!< Info / comment flag.
kFlagImplicit = 0x80000000U //!< Section created implicitly (can be deleted by the Runtime).
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
ASMJIT_INLINE const char* getName() const noexcept { return _name; }
ASMJIT_INLINE void _setDefaultName(
char c0 = 0, char c1 = 0, char c2 = 0, char c3 = 0,
char c4 = 0, char c5 = 0, char c6 = 0, char c7 = 0) noexcept {
reinterpret_cast<uint32_t*>(_name)[0] = Utils::pack32_4x8(c0, c1, c2, c3);
reinterpret_cast<uint32_t*>(_name)[1] = Utils::pack32_4x8(c4, c5, c6, c7);
}
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
ASMJIT_INLINE void addFlags(uint32_t flags) noexcept { _flags |= flags; }
ASMJIT_INLINE void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
ASMJIT_INLINE void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
ASMJIT_INLINE size_t getPhysicalSize() const noexcept { return _buffer.getLength(); }
ASMJIT_INLINE size_t getVirtualSize() const noexcept { return _virtualSize; }
ASMJIT_INLINE void setVirtualSize(uint32_t size) noexcept { _virtualSize = size; }
ASMJIT_INLINE CodeBuffer& getBuffer() noexcept { return _buffer; }
ASMJIT_INLINE const CodeBuffer& getBuffer() const noexcept { return _buffer; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _id; //!< Section id.
uint32_t _flags; //!< Section flags.
uint32_t _alignment; //!< Section alignment requirements (0 if no requirements).
uint32_t _virtualSize; //!< Virtual size of the section (zero initialized mostly).
char _name[36]; //!< Section name (max 35 characters, PE allows max 8).
CodeBuffer _buffer; //!< Code or data buffer.
};
// ============================================================================
// [asmjit::LabelLink]
// ============================================================================
//! Data structure used to link labels.
struct LabelLink {
LabelLink* prev; //!< Previous link (single-linked list).
uint32_t sectionId; //!< Section id.
uint32_t relocId; //!< Relocation id or RelocEntry::kInvalidId.
size_t offset; //!< Label offset relative to the start of the section.
intptr_t rel; //!< Inlined rel8/rel32.
};
// ============================================================================
// [asmjit::LabelEntry]
// ============================================================================
//! Label entry.
//!
//! Contains the following properties:
//! * Label id - This is the only thing that is set to the `Label` operand.
//! * Label name - Optional, used mostly to create executables and libraries.
//! * Label type - Type of the label, default `Label::kTypeAnonymous`.
//! * Label parent id - Derived from many assemblers that allow to define a
//! local label that falls under a global label. This allows to define
//! many labels of the same name that have different parent (global) label.
//! * Offset - offset of the label bound by `Assembler`.
//! * Links - single-linked list that contains locations of code that has
//! to be patched when the label gets bound. Every use of unbound label
//! adds one link to `_links` list.
//! * HVal - Hash value of label's name and optionally parentId.
//! * HashNext - Hash-table implementation detail.
class LabelEntry : public ZoneHashNode {
public:
// NOTE: Label id is stored in `_customData`, which is provided by ZoneHashNode
// to fill a padding that a C++ compiler targeting 64-bit CPU will add to align
// the structure to 64-bits.
//! Get label id.
ASMJIT_INLINE uint32_t getId() const noexcept { return _customData; }
//! Set label id (internal, used only by \ref CodeHolder).
ASMJIT_INLINE void _setId(uint32_t id) noexcept { _customData = id; }
//! Get label type, see \ref Label::Type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
//! Get label flags, returns 0 at the moment.
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
ASMJIT_INLINE bool hasParent() const noexcept { return _parentId != 0; }
//! Get label's parent id.
ASMJIT_INLINE uint32_t getParentId() const noexcept { return _parentId; }
//! Get label's section id where it's bound to (or `SectionEntry::kInvalidId` if it's not bound yet).
ASMJIT_INLINE uint32_t getSectionId() const noexcept { return _sectionId; }
//! Get if the label has name.
ASMJIT_INLINE bool hasName() const noexcept { return !_name.isEmpty(); }
//! Get the label's name.
//!
//! NOTE: Local labels will return their local name without their parent
//! part, for example ".L1".
ASMJIT_INLINE const char* getName() const noexcept { return _name.getData(); }
//! Get length of label's name.
//!
//! NOTE: Label name is always null terminated, so you can use `strlen()` to
//! get it, however, it's also cached in `LabelEntry`, so if you want to know
//! the length the easiest way is to use `LabelEntry::getNameLength()`.
ASMJIT_INLINE size_t getNameLength() const noexcept { return _name.getLength(); }
//! Get if the label is bound.
ASMJIT_INLINE bool isBound() const noexcept { return _sectionId != SectionEntry::kInvalidId; }
//! Get the label offset (only useful if the label is bound).
ASMJIT_INLINE intptr_t getOffset() const noexcept { return _offset; }
//! Get the hash-value of label's name and its parent label (if any).
//!
//! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function
//! is implemented in `Utils::hashString()` and `Utils::hashRound()`.
ASMJIT_INLINE uint32_t getHVal() const noexcept { return _hVal; }
// ------------------------------------------------------------------------
// [Members]
// ------------------------------------------------------------------------
// Let's round the size of `LabelEntry` to 64 bytes (as ZoneHeap has 32
// bytes granularity anyway). This gives `_name` the remaining space, which
// is roughly 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
enum { kNameBytes = 64 - (sizeof(ZoneHashNode) + 16 + sizeof(intptr_t) + sizeof(LabelLink*)) };
uint8_t _type; //!< Label type, see Label::Type.
uint8_t _flags; //!< Must be zero.
uint16_t _reserved16; //!< Reserved.
uint32_t _parentId; //!< Label parent id or zero.
uint32_t _sectionId; //!< Section id or `SectionEntry::kInvalidId`.
uint32_t _reserved32; //!< Reserved.
intptr_t _offset; //!< Label offset.
LabelLink* _links; //!< Label links.
SmallString<kNameBytes> _name; //!< Label name.
};
// ============================================================================
// [asmjit::RelocEntry]
// ============================================================================
//! Relocation entry.
struct RelocEntry {
ASMJIT_ENUM(Id) {
kInvalidId = 0xFFFFFFFFU //!< Invalid relocation id.
};
//! Relocation type.
ASMJIT_ENUM(Type) {
kTypeNone = 0, //!< Deleted entry (no relocation).
kTypeAbsToAbs = 1, //!< Relocate absolute to absolute.
kTypeRelToAbs = 2, //!< Relocate relative to absolute.
kTypeAbsToRel = 3, //!< Relocate absolute to relative.
kTypeTrampoline = 4 //!< Relocate absolute to relative or use trampoline.
};
// ------------------------------------------------------------------------
// [Accessors]
// ------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
ASMJIT_INLINE uint32_t getSourceSectionId() const noexcept { return _sourceSectionId; }
ASMJIT_INLINE uint32_t getTargetSectionId() const noexcept { return _targetSectionId; }
ASMJIT_INLINE uint64_t getSourceOffset() const noexcept { return _sourceOffset; }
ASMJIT_INLINE uint64_t getData() const noexcept { return _data; }
// ------------------------------------------------------------------------
// [Members]
// ------------------------------------------------------------------------
uint32_t _id; //!< Relocation id.
uint8_t _type; //!< Type of the relocation.
uint8_t _size; //!< Size of the relocation (1, 2, 4 or 8 bytes).
uint8_t _reserved[2]; //!< Reserved.
uint32_t _sourceSectionId; //!< Source section id.
uint32_t _targetSectionId; //!< Destination section id.
uint64_t _sourceOffset; //!< Source offset (relative to start of the section).
uint64_t _data; //!< Relocation data (target offset, target address, etc).
};
// ============================================================================
// [asmjit::CodeHolder]
// ============================================================================
//! Contains basic information about the target architecture plus its settings,
//! and holds code & data (including sections, labels, and relocation information).
//! CodeHolder can store both binary and intermediate representation of assembly,
//! which can be generated by \ref Assembler and/or \ref CodeBuilder.
//!
//! NOTE: CodeHolder has ability to attach an \ref ErrorHandler, however, this
//! error handler is not triggered by CodeHolder itself, it's only used by the
//! attached code generators.
class ASMJIT_VIRTAPI CodeHolder {
public:
ASMJIT_NONCOPYABLE(CodeHolder)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create an uninitialized CodeHolder (you must init() it before it can be used).
ASMJIT_API CodeHolder() noexcept;
//! Destroy the CodeHolder.
ASMJIT_API ~CodeHolder() noexcept;
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isInitialized() const noexcept { return _codeInfo.isInitialized(); }
//! Initialize to CodeHolder to hold code described by `codeInfo`.
ASMJIT_API Error init(const CodeInfo& info) noexcept;
//! Detach all code-generators attached and reset the \ref CodeHolder.
ASMJIT_API void reset(bool releaseMemory = false) noexcept;
// --------------------------------------------------------------------------
// [Attach / Detach]
// --------------------------------------------------------------------------
//! Attach a \ref CodeEmitter to this \ref CodeHolder.
ASMJIT_API Error attach(CodeEmitter* emitter) noexcept;
//! Detach a \ref CodeEmitter from this \ref CodeHolder.
ASMJIT_API Error detach(CodeEmitter* emitter) noexcept;
// --------------------------------------------------------------------------
// [Sync]
// --------------------------------------------------------------------------
//! Synchronize all states of all `CodeEmitter`s associated with the CodeHolder.
//! This is required as some code generators don't sync every time they do
//! something - for example \ref Assembler generally syncs when it needs to
//! reallocate the \ref CodeBuffer, but not each time it encodes instruction
//! or directive.
ASMJIT_API void sync() noexcept;
// --------------------------------------------------------------------------
// [Code-Information]
// --------------------------------------------------------------------------
//! Get code/target information, see \ref CodeInfo.
ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; }
//! Get architecture information, see \ref ArchInfo.
ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _codeInfo.getArchInfo(); }
//! Get the target's architecture type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return getArchInfo().getType(); }
//! Get the target's architecture sub-type.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return getArchInfo().getSubType(); }
//! Get if a static base-address is set.
ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _codeInfo.hasBaseAddress(); }
//! Get a static base-address (uint64_t).
ASMJIT_INLINE uint64_t getBaseAddress() const noexcept { return _codeInfo.getBaseAddress(); }
// --------------------------------------------------------------------------
// [Global Information]
// --------------------------------------------------------------------------
//! Get global hints, internally propagated to all `CodeEmitter`s attached.
ASMJIT_INLINE uint32_t getGlobalHints() const noexcept { return _globalHints; }
//! Get global options, internally propagated to all `CodeEmitter`s attached.
ASMJIT_INLINE uint32_t getGlobalOptions() const noexcept { return _globalOptions; }
// --------------------------------------------------------------------------
// [Result Information]
// --------------------------------------------------------------------------
//! Get the size code & data of all sections.
ASMJIT_API size_t getCodeSize() const noexcept;
//! Get size of all possible trampolines.
//!
//! Trampolines are needed to successfully generate relative jumps to absolute
//! addresses. This value is only non-zero if jmp of call instructions were
//! used with immediate operand (this means jumping or calling an absolute
//! address directly).
ASMJIT_INLINE size_t getTrampolinesSize() const noexcept { return _trampolinesSize; }
// --------------------------------------------------------------------------
// [Logging & Error Handling]
// --------------------------------------------------------------------------
#if !defined(ASMJIT_DISABLE_LOGGING)
//! Get if a logger attached.
ASMJIT_INLINE bool hasLogger() const noexcept { return _logger != nullptr; }
//! Get the attached logger.
ASMJIT_INLINE Logger* getLogger() const noexcept { return _logger; }
//! Attach a `logger` to CodeHolder and propagate it to all attached `CodeEmitter`s.
ASMJIT_API void setLogger(Logger* logger) noexcept;
//! Reset the logger (does nothing if not attached).
ASMJIT_INLINE void resetLogger() noexcept { setLogger(nullptr); }
#endif // !ASMJIT_DISABLE_LOGGING
//! Get if error-handler is attached.
ASMJIT_INLINE bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Get the error-handler.
ASMJIT_INLINE ErrorHandler* getErrorHandler() const noexcept { return _errorHandler; }
//! Set the error handler, will affect all attached `CodeEmitter`s.
ASMJIT_API Error setErrorHandler(ErrorHandler* handler) noexcept;
//! Reset the error handler (does nothing if not attached).
ASMJIT_INLINE void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
// --------------------------------------------------------------------------
// [Sections]
// --------------------------------------------------------------------------
//! Get array of `SectionEntry*` records.
ASMJIT_INLINE const ZoneVector<SectionEntry*>& getSections() const noexcept { return _sections; }
//! Get a section entry of the given index.
ASMJIT_INLINE SectionEntry* getSectionEntry(size_t index) const noexcept { return _sections[index]; }
ASMJIT_API Error growBuffer(CodeBuffer* cb, size_t n) noexcept;
ASMJIT_API Error reserveBuffer(CodeBuffer* cb, size_t n) noexcept;
// --------------------------------------------------------------------------
// [Labels & Symbols]
// --------------------------------------------------------------------------
//! Create a new anonymous label and return its id in `idOut`.
//!
//! Returns `Error`, does not report error to \ref ErrorHandler.
ASMJIT_API Error newLabelId(uint32_t& idOut) noexcept;
//! Create a new named label label-type `type`.
//!
//! Returns `Error`, does not report error to \ref ErrorHandler.
ASMJIT_API Error newNamedLabelId(uint32_t& idOut, const char* name, size_t nameLength, uint32_t type, uint32_t parentId) noexcept;
//! Get a label id by name.
ASMJIT_API uint32_t getLabelIdByName(const char* name, size_t nameLength = Globals::kInvalidIndex, uint32_t parentId = 0) noexcept;
//! Create a new label-link used to store information about yet unbound labels.
//!
//! Returns `null` if the allocation failed.
ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept;
//! Get array of `LabelEntry*` records.
ASMJIT_INLINE const ZoneVector<LabelEntry*>& getLabelEntries() const noexcept { return _labels; }
//! Get number of labels created.
ASMJIT_INLINE size_t getLabelsCount() const noexcept { return _labels.getLength(); }
//! Get number of label references, which are unresolved at the moment.
ASMJIT_INLINE size_t getUnresolvedLabelsCount() const noexcept { return _unresolvedLabelsCount; }
//! Get if the `label` is valid (i.e. created by `newLabelId()`).
ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
return isLabelValid(label.getId());
}
//! Get if the label having `id` is valid (i.e. created by `newLabelId()`).
ASMJIT_INLINE bool isLabelValid(uint32_t labelId) const noexcept {
size_t index = Operand::unpackId(labelId);
return index < _labels.getLength();
}
//! Get if the `label` is already bound.
//!
//! Returns `false` if the `label` is not valid.
ASMJIT_INLINE bool isLabelBound(const Label& label) const noexcept {
return isLabelBound(label.getId());
}
//! \overload
ASMJIT_INLINE bool isLabelBound(uint32_t id) const noexcept {
size_t index = Operand::unpackId(id);
return index < _labels.getLength() && _labels[index]->isBound();
}
//! Get a `label` offset or -1 if the label is not yet bound.
ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const noexcept {
return getLabelOffset(label.getId());
}
//! \overload
ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const noexcept {
ASMJIT_ASSERT(isLabelValid(id));
return _labels[Operand::unpackId(id)]->getOffset();
}
//! Get information about the given `label`.
ASMJIT_INLINE LabelEntry* getLabelEntry(const Label& label) const noexcept {
return getLabelEntry(label.getId());
}
//! Get information about a label having the given `id`.
ASMJIT_INLINE LabelEntry* getLabelEntry(uint32_t id) const noexcept {
size_t index = static_cast<size_t>(Operand::unpackId(id));
return index < _labels.getLength() ? _labels[index] : static_cast<LabelEntry*>(nullptr);
}
// --------------------------------------------------------------------------
// [Relocations]
// --------------------------------------------------------------------------
//! Create a new relocation entry of type `type` and size `size`.
//!
//! Additional fields can be set after the relocation entry was created.
ASMJIT_API Error newRelocEntry(RelocEntry** dst, uint32_t type, uint32_t size) noexcept;
//! Get if the code contains relocations.
ASMJIT_INLINE bool hasRelocations() const noexcept { return !_relocations.isEmpty(); }
//! Get array of `RelocEntry*` records.
ASMJIT_INLINE const ZoneVector<RelocEntry*>& getRelocEntries() const noexcept { return _relocations; }
ASMJIT_INLINE RelocEntry* getRelocEntry(uint32_t id) const noexcept { return _relocations[id]; }
//! Relocate the code to `baseAddress` and copy it to `dst`.
//!
//! \param dst Contains the location where the relocated code should be
//! copied. The pointer can be address returned by virtual memory allocator
//! or any other address that has sufficient space.
//!
//! \param baseAddress Base address used for relocation. `JitRuntime` always
//! sets the `baseAddress` to be the same as `dst`.
//!
//! \return The number bytes actually used. If the code emitter reserved
//! space for possible trampolines, but didn't use it, the number of bytes
//! used can actually be less than the expected worst case. Virtual memory
//! allocator can shrink the memory it allocated initially.
//!
//! A given buffer will be overwritten, to get the number of bytes required,
//! use `getCodeSize()`.
ASMJIT_API size_t relocate(void* dst, uint64_t baseAddress = Globals::kNoBaseAddress) const noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CodeInfo _codeInfo; //!< Basic information about the code (architecture and other info).
uint32_t _globalHints; //!< Global hints, propagated to all `CodeEmitter`s.
uint32_t _globalOptions; //!< Global options, propagated to all `CodeEmitter`s.
CodeEmitter* _emitters; //!< Linked-list of all attached `CodeEmitter`s.
Assembler* _cgAsm; //!< Attached \ref Assembler (only one at a time).
Logger* _logger; //!< Attached \ref Logger, used by all consumers.
ErrorHandler* _errorHandler; //!< Attached \ref ErrorHandler.
uint32_t _unresolvedLabelsCount; //!< Count of label references which were not resolved.
uint32_t _trampolinesSize; //!< Size of all possible trampolines.
Zone _baseZone; //!< Base zone (used to allocate core structures).
Zone _dataZone; //!< Data zone (used to allocate extra data like label names).
ZoneHeap _baseHeap; //!< Zone allocator, used to manage internal containers.
ZoneVector<SectionEntry*> _sections; //!< Section entries.
ZoneVector<LabelEntry*> _labels; //!< Label entries (each label is stored here).
ZoneVector<RelocEntry*> _relocations; //!< Relocation entries.
ZoneHash<LabelEntry> _namedLabels; //!< Label name -> LabelEntry (only named labels).
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CODEHOLDER_H

View File

@@ -1,630 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/compiler.h"
#include "../base/compilercontext_p.h"
#include "../base/cpuinfo.h"
#include "../base/logger.h"
#include "../base/utils.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [Constants]
// ============================================================================
static const char noName[1] = { '\0' };
enum { kCompilerDefaultLookAhead = 64 };
// ============================================================================
// [asmjit::Compiler - Construction / Destruction]
// ============================================================================
Compiler::Compiler() noexcept
: _features(0),
_maxLookAhead(kCompilerDefaultLookAhead),
_instOptions(0),
_tokenGenerator(0),
_nodeFlowId(0),
_nodeFlags(0),
_targetVarMapping(nullptr),
_firstNode(nullptr),
_lastNode(nullptr),
_cursor(nullptr),
_func(nullptr),
_zoneAllocator(8192 - Zone::kZoneOverhead),
_varAllocator(4096 - Zone::kZoneOverhead),
_stringAllocator(4096 - Zone::kZoneOverhead),
_constAllocator(4096 - Zone::kZoneOverhead),
_localConstPool(&_constAllocator),
_globalConstPool(&_zoneAllocator) {}
Compiler::~Compiler() noexcept {}
// ============================================================================
// [asmjit::Compiler - Attach / Reset]
// ============================================================================
void Compiler::reset(bool releaseMemory) noexcept {
Assembler* assembler = getAssembler();
if (assembler != nullptr)
assembler->_detached(this);
_arch = kArchNone;
_regSize = 0;
_finalized = false;
_lastError = kErrorNotInitialized;
_features = 0;
_maxLookAhead = kCompilerDefaultLookAhead;
_instOptions = 0;
_tokenGenerator = 0;
_nodeFlowId = 0;
_nodeFlags = 0;
_firstNode = nullptr;
_lastNode = nullptr;
_cursor = nullptr;
_func = nullptr;
_localConstPool.reset();
_globalConstPool.reset();
_localConstPoolLabel.reset();
_globalConstPoolLabel.reset();
_zoneAllocator.reset(releaseMemory);
_varAllocator.reset(releaseMemory);
_stringAllocator.reset(releaseMemory);
_constAllocator.reset(releaseMemory);
_varList.reset(releaseMemory);
}
// ============================================================================
// [asmjit::Compiler - Node-Factory]
// ============================================================================
HLData* Compiler::newDataNode(const void* data, uint32_t size) noexcept {
if (size > HLData::kInlineBufferSize) {
void* clonedData = _stringAllocator.alloc(size);
if (clonedData == nullptr)
return nullptr;
if (data != nullptr)
::memcpy(clonedData, data, size);
data = clonedData;
}
return newNode<HLData>(const_cast<void*>(data), size);
}
HLAlign* Compiler::newAlignNode(uint32_t alignMode, uint32_t offset) noexcept {
return newNode<HLAlign>(alignMode, offset);
}
HLLabel* Compiler::newLabelNode() noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return nullptr;
uint32_t id = assembler->_newLabelId();
LabelData* ld = assembler->getLabelData(id);
HLLabel* node = newNode<HLLabel>(id);
if (node == nullptr) return nullptr;
// These have to be zero now.
ASMJIT_ASSERT(ld->exId == 0);
ASMJIT_ASSERT(ld->exData == nullptr);
ld->exId = _exId;
ld->exData = node;
return node;
}
HLComment* Compiler::newCommentNode(const char* str) noexcept {
if (str != nullptr && str[0]) {
str = _stringAllocator.sdup(str);
if (str == nullptr)
return nullptr;
}
return newNode<HLComment>(str);
}
HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) noexcept {
if (var.getId() == kInvalidValue)
return nullptr;
VarData* vd = getVd(var);
return newNode<HLHint>(vd, hint, value);
}
// ============================================================================
// [asmjit::Compiler - Code-Stream]
// ============================================================================
HLNode* Compiler::addFunc(HLFunc* func) noexcept {
ASMJIT_ASSERT(_func == nullptr);
_func = func;
addNode(func); // Add function node.
addNode(func->getEntryNode()); // Add function entry.
HLNode* cursor = getCursor();
addNode(func->getExitNode()); // Add function exit / epilog marker.
addNode(func->getEnd()); // Add function end.
setCursor(cursor);
return func;
}
HLNode* Compiler::addNode(HLNode* node) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
if (_cursor == nullptr) {
if (_firstNode == nullptr) {
_firstNode = node;
_lastNode = node;
}
else {
node->_next = _firstNode;
_firstNode->_prev = node;
_firstNode = node;
}
}
else {
HLNode* prev = _cursor;
HLNode* next = _cursor->_next;
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
}
_cursor = node;
return node;
}
HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
ASMJIT_ASSERT(ref != nullptr);
HLNode* prev = ref->_prev;
HLNode* next = ref;
node->_prev = prev;
node->_next = next;
next->_prev = node;
if (prev)
prev->_next = node;
else
_firstNode = node;
return node;
}
HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
ASMJIT_ASSERT(ref != nullptr);
HLNode* prev = ref;
HLNode* next = ref->_next;
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
return node;
}
static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) noexcept {
if (node_->isJmpOrJcc()) {
HLJump* node = static_cast<HLJump*>(node_);
HLLabel* label = node->getTarget();
if (label != nullptr) {
// Disconnect.
HLJump** pPrev = &label->_from;
for (;;) {
ASMJIT_ASSERT(*pPrev != nullptr);
HLJump* current = *pPrev;
if (current == nullptr)
break;
if (current == node) {
*pPrev = node->_jumpNext;
break;
}
pPrev = &current->_jumpNext;
}
label->subNumRefs();
}
}
}
HLNode* Compiler::removeNode(HLNode* node) noexcept {
HLNode* prev = node->_prev;
HLNode* next = node->_next;
if (_firstNode == node)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == node)
_lastNode = prev;
else
next->_prev = prev;
node->_prev = nullptr;
node->_next = nullptr;
if (_cursor == node)
_cursor = prev;
Compiler_nodeRemoved(this, node);
return node;
}
void Compiler::removeNodes(HLNode* first, HLNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
}
HLNode* prev = first->_prev;
HLNode* next = last->_next;
if (_firstNode == first)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == last)
_lastNode = prev;
else
next->_prev = prev;
HLNode* node = first;
for (;;) {
HLNode* next = node->getNext();
ASMJIT_ASSERT(next != nullptr);
node->_prev = nullptr;
node->_next = nullptr;
if (_cursor == node)
_cursor = prev;
Compiler_nodeRemoved(this, node);
if (node == last)
break;
node = next;
}
}
HLNode* Compiler::setCursor(HLNode* node) noexcept {
HLNode* old = _cursor;
_cursor = node;
return old;
}
// ============================================================================
// [asmjit::Compiler - Align]
// ============================================================================
Error Compiler::align(uint32_t alignMode, uint32_t offset) noexcept {
HLAlign* node = newAlignNode(alignMode, offset);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Label]
// ============================================================================
HLLabel* Compiler::getHLLabel(uint32_t id) const noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return nullptr;
LabelData* ld = assembler->getLabelData(id);
if (ld->exId == _exId)
return static_cast<HLLabel*>(ld->exData);
else
return nullptr;
}
bool Compiler::isLabelValid(uint32_t id) const noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return false;
return static_cast<size_t>(id) < assembler->getLabelsCount();
}
uint32_t Compiler::_newLabelId() noexcept {
HLLabel* node = newLabelNode();
if (node == nullptr) {
setLastError(kErrorNoHeapMemory);
return kInvalidValue;
}
return node->getLabelId();
}
Error Compiler::bind(const Label& label) noexcept {
HLLabel* node = getHLLabel(label);
if (node == nullptr)
return setLastError(kErrorInvalidState);
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Embed]
// ============================================================================
Error Compiler::embed(const void* data, uint32_t size) noexcept {
HLData* node = newDataNode(data, size);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
addNode(node);
return kErrorOk;
}
Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) noexcept {
if (label.getId() == kInvalidValue)
return kErrorInvalidState;
align(kAlignData, static_cast<uint32_t>(pool.getAlignment()));
bind(label);
HLData* embedNode = newDataNode(nullptr, static_cast<uint32_t>(pool.getSize()));
if (embedNode == nullptr)
return kErrorNoHeapMemory;
pool.fill(embedNode->getData());
addNode(embedNode);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Comment]
// ============================================================================
Error Compiler::comment(const char* fmt, ...) noexcept {
char buf[256];
char* p = buf;
if (fmt) {
va_list ap;
va_start(ap, fmt);
p += vsnprintf(p, 254, fmt, ap);
va_end(ap);
}
p[0] = '\0';
HLComment* node = newCommentNode(buf);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Hint]
// ============================================================================
Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
HLHint* node = newHintNode(var, hint, value);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Vars]
// ============================================================================
VarData* Compiler::_newVd(const VarInfo& vi, const char* name) noexcept {
VarData* vd = reinterpret_cast<VarData*>(_varAllocator.alloc(sizeof(VarData)));
if (ASMJIT_UNLIKELY(vd == nullptr))
goto _NoMemory;
vd->_name = noName;
vd->_id = OperandUtil::makeVarId(static_cast<uint32_t>(_varList.getLength()));
vd->_localId = kInvalidValue;
#if !defined(ASMJIT_DISABLE_LOGGER)
if (name != nullptr && name[0] != '\0') {
vd->_name = _stringAllocator.sdup(name);
}
#endif // !ASMJIT_DISABLE_LOGGER
vd->_type = static_cast<uint8_t>(vi.getTypeId());
vd->_class = static_cast<uint8_t>(vi.getRegClass());
vd->_flags = 0;
vd->_priority = 10;
vd->_state = kVarStateNone;
vd->_regIndex = kInvalidReg;
vd->_isStack = false;
vd->_isMemArg = false;
vd->_isCalculated = false;
vd->_saveOnUnuse = false;
vd->_modified = false;
vd->_reserved0 = 0;
vd->_alignment = static_cast<uint8_t>(Utils::iMin<uint32_t>(vi.getSize(), 64));
vd->_size = vi.getSize();
vd->_homeMask = 0;
vd->_memOffset = 0;
vd->_memCell = nullptr;
vd->rReadCount = 0;
vd->rWriteCount = 0;
vd->mReadCount = 0;
vd->mWriteCount = 0;
vd->_va = nullptr;
if (ASMJIT_UNLIKELY(_varList.append(vd) != kErrorOk))
goto _NoMemory;
return vd;
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
Error Compiler::alloc(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, kInvalidValue);
}
Error Compiler::alloc(Var& var, uint32_t regIndex) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, regIndex);
}
Error Compiler::alloc(Var& var, const Reg& reg) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, reg.getRegIndex());
}
Error Compiler::save(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintSave, kInvalidValue);
}
Error Compiler::spill(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintSpill, kInvalidValue);
}
Error Compiler::unuse(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintUnuse, kInvalidValue);
}
uint32_t Compiler::getPriority(Var& var) const noexcept {
if (var.getId() == kInvalidValue)
return kInvalidValue;
VarData* vd = getVdById(var.getId());
return vd->getPriority();
}
void Compiler::setPriority(Var& var, uint32_t priority) noexcept {
if (var.getId() == kInvalidValue)
return;
if (priority > 255)
priority = 255;
VarData* vd = getVdById(var.getId());
vd->_priority = static_cast<uint8_t>(priority);
}
bool Compiler::getSaveOnUnuse(Var& var) const noexcept {
if (var.getId() == kInvalidValue)
return false;
VarData* vd = getVdById(var.getId());
return static_cast<bool>(vd->_saveOnUnuse);
}
void Compiler::setSaveOnUnuse(Var& var, bool value) noexcept {
if (var.getId() == kInvalidValue)
return;
VarData* vd = getVdById(var.getId());
vd->_saveOnUnuse = value;
}
void Compiler::rename(Var& var, const char* fmt, ...) noexcept {
if (var.getId() == kInvalidValue)
return;
VarData* vd = getVdById(var.getId());
vd->_name = noName;
if (fmt != nullptr && fmt[0] != '\0') {
char buf[64];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0';
vd->_name = _stringAllocator.sdup(buf);
va_end(ap);
}
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER

View File

@@ -1,576 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_COMPILER_H
#define _ASMJIT_BASE_COMPILER_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/compilerfunc.h"
#include "../base/constpool.h"
#include "../base/containers.h"
#include "../base/hlstream.h"
#include "../base/operand.h"
#include "../base/podvector.h"
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
struct VarAttr;
struct VarData;
struct VarMap;
struct VarState;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::CompilerFeatures]
// ============================================================================
ASMJIT_ENUM(CompilerFeatures) {
//! Schedule instructions so they can be executed faster (`Compiler` only).
//!
//! Default `false` - has to be explicitly enabled as the scheduler needs
//! some time to run.
//!
//! X86/X64 Specific
//! ----------------
//!
//! If scheduling is enabled AsmJit will try to reorder instructions to
//! minimize the dependency chain. Scheduler always runs after the registers
//! are allocated so it doesn't change count of register allocs/spills.
//!
//! This feature is highly experimental and untested.
kCompilerFeatureEnableScheduler = 0
};
// ============================================================================
// [asmjit::ConstScope]
// ============================================================================
//! Scope of the constant.
ASMJIT_ENUM(ConstScope) {
//! Local constant, always embedded right after the current function.
kConstScopeLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kConstScopeGlobal = 1
};
// ============================================================================
// [asmjit::VarInfo]
// ============================================================================
struct VarInfo {
// ============================================================================
// [Flags]
// ============================================================================
//! \internal
//!
//! Variable flags.
ASMJIT_ENUM(Flags) {
//! Variable contains one or more single-precision floating point.
kFlagSP = 0x10,
//! Variable contains one or more double-precision floating point.
kFlagDP = 0x20,
//! Variable is a vector, contains packed data.
kFlagSIMD = 0x80
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get type id.
ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _typeId; }
//! Get type name.
ASMJIT_INLINE const char* getTypeName() const noexcept { return _typeName; }
//! Get register size in bytes.
ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
//! Get variable class, see \ref RegClass.
ASMJIT_INLINE uint32_t getRegClass() const noexcept { return _regClass; }
//! Get register type, see `X86RegType`.
ASMJIT_INLINE uint32_t getRegType() const noexcept { return _regType; }
//! Get type flags, see `VarFlag`.
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variable type id.
uint8_t _typeId;
//! Variable and register size (in bytes).
uint8_t _size;
//! Register class, see `RegClass`.
uint8_t _regClass;
//! Register type the variable is mapped to.
uint8_t _regType;
//! Variable info flags, see \ref Flags.
uint32_t _flags;
//! Variable type name.
char _typeName[8];
};
// ============================================================================
// [asmjit::Compiler]
// ============================================================================
//! Compiler interface.
//!
//! \sa Assembler.
class ASMJIT_VIRTAPI Compiler : public ExternalTool {
public:
ASMJIT_NO_COPY(Compiler)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `Compiler` instance.
ASMJIT_API Compiler() noexcept;
//! Destroy the `Compiler` instance.
ASMJIT_API virtual ~Compiler() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! \override
ASMJIT_API virtual void reset(bool releaseMemory) noexcept;
// --------------------------------------------------------------------------
// [Compiler Features]
// --------------------------------------------------------------------------
//! Get code-generator features.
ASMJIT_INLINE uint32_t getFeatures() const noexcept {
return _features;
}
//! Set code-generator features.
ASMJIT_INLINE void setFeatures(uint32_t features) noexcept {
_features = features;
}
//! Get code-generator `feature`.
ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept {
ASMJIT_ASSERT(feature < 32);
return (_features & (1 << feature)) != 0;
}
//! Set code-generator `feature` to `value`.
ASMJIT_INLINE void setFeature(uint32_t feature, bool value) noexcept {
ASMJIT_ASSERT(feature < 32);
feature = static_cast<uint32_t>(value) << feature;
_features = (_features & ~feature) | feature;
}
//! Get maximum look ahead.
ASMJIT_INLINE uint32_t getMaxLookAhead() const noexcept {
return _maxLookAhead;
}
//! Set maximum look ahead to `val`.
ASMJIT_INLINE void setMaxLookAhead(uint32_t val) noexcept {
_maxLookAhead = val;
}
// --------------------------------------------------------------------------
// [Token ID]
// --------------------------------------------------------------------------
//! \internal
//!
//! Reset the token-id generator.
ASMJIT_INLINE void _resetTokenGenerator() noexcept {
_tokenGenerator = 0;
}
//! \internal
//!
//! Generate a new unique token id.
ASMJIT_INLINE uint32_t _generateUniqueToken() noexcept {
return ++_tokenGenerator;
}
// --------------------------------------------------------------------------
// [Instruction Options]
// --------------------------------------------------------------------------
//! Get options of the next instruction.
ASMJIT_INLINE uint32_t getInstOptions() const noexcept {
return _instOptions;
}
//! Set options of the next instruction.
ASMJIT_INLINE void setInstOptions(uint32_t instOptions) noexcept {
_instOptions = instOptions;
}
//! Get options of the next instruction and reset them.
ASMJIT_INLINE uint32_t getInstOptionsAndReset() {
uint32_t instOptions = _instOptions;
_instOptions = 0;
return instOptions;
};
// --------------------------------------------------------------------------
// [Node-Factory]
// --------------------------------------------------------------------------
//! \internal
template<typename T>
ASMJIT_INLINE T* newNode() noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this);
}
//! \internal
template<typename T, typename P0>
ASMJIT_INLINE T* newNode(P0 p0) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0);
}
//! \internal
template<typename T, typename P0, typename P1>
ASMJIT_INLINE T* newNode(P0 p0, P1 p1) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0, p1);
}
//! \internal
template<typename T, typename P0, typename P1, typename P2>
ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0, p1, p2);
}
//! \internal
//!
//! Create a new `HLData` node.
ASMJIT_API HLData* newDataNode(const void* data, uint32_t size) noexcept;
//! \internal
//!
//! Create a new `HLAlign` node.
ASMJIT_API HLAlign* newAlignNode(uint32_t alignMode, uint32_t offset) noexcept;
//! \internal
//!
//! Create a new `HLLabel` node.
ASMJIT_API HLLabel* newLabelNode() noexcept;
//! \internal
//!
//! Create a new `HLComment`.
ASMJIT_API HLComment* newCommentNode(const char* str) noexcept;
//! \internal
//!
//! Create a new `HLHint`.
ASMJIT_API HLHint* newHintNode(Var& var, uint32_t hint, uint32_t value) noexcept;
// --------------------------------------------------------------------------
// [Code-Stream]
// --------------------------------------------------------------------------
//! Add a function `node` to the stream.
ASMJIT_API HLNode* addFunc(HLFunc* func) noexcept;
//! Add node `node` after current and set current to `node`.
ASMJIT_API HLNode* addNode(HLNode* node) noexcept;
//! Insert `node` before `ref`.
ASMJIT_API HLNode* addNodeBefore(HLNode* node, HLNode* ref) noexcept;
//! Insert `node` after `ref`.
ASMJIT_API HLNode* addNodeAfter(HLNode* node, HLNode* ref) noexcept;
//! Remove `node`.
ASMJIT_API HLNode* removeNode(HLNode* node) noexcept;
//! Remove multiple nodes.
ASMJIT_API void removeNodes(HLNode* first, HLNode* last) noexcept;
//! Get the first node.
ASMJIT_INLINE HLNode* getFirstNode() const noexcept { return _firstNode; }
//! Get the last node.
ASMJIT_INLINE HLNode* getLastNode() const noexcept { return _lastNode; }
//! Get current node.
//!
//! \note If this method returns `nullptr` it means that nothing has been
//! emitted yet.
ASMJIT_INLINE HLNode* getCursor() const noexcept { return _cursor; }
//! \internal
//!
//! Set the current node without returning the previous node.
ASMJIT_INLINE void _setCursor(HLNode* node) noexcept { _cursor = node; }
//! Set the current node to `node` and return the previous one.
ASMJIT_API HLNode* setCursor(HLNode* node) noexcept;
// --------------------------------------------------------------------------
// [Func]
// --------------------------------------------------------------------------
//! Get current function.
ASMJIT_INLINE HLFunc* getFunc() const noexcept { return _func; }
// --------------------------------------------------------------------------
// [Align]
// --------------------------------------------------------------------------
//! Align target buffer to the `offset` specified.
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current depends on `alignMode`, see \ref AlignMode.
ASMJIT_API Error align(uint32_t alignMode, uint32_t offset) noexcept;
// --------------------------------------------------------------------------
// [Label]
// --------------------------------------------------------------------------
//! Get `HLLabel` by `id`.
//!
//! NOTE: The label has to be valid, see `isLabelValid()`.
ASMJIT_API HLLabel* getHLLabel(uint32_t id) const noexcept;
//! Get `HLLabel` by `label`.
//!
//! NOTE: The label has to be valid, see `isLabelValid()`.
ASMJIT_INLINE HLLabel* getHLLabel(const Label& label) noexcept {
return getHLLabel(label.getId());
}
//! Get whether the label `id` is valid.
ASMJIT_API bool isLabelValid(uint32_t id) const noexcept;
//! Get whether the `label` is valid.
ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
return isLabelValid(label.getId());
}
//! \internal
//!
//! Create a new label and return its ID.
ASMJIT_API uint32_t _newLabelId() noexcept;
//! Create and return a new `Label`.
ASMJIT_INLINE Label newLabel() noexcept { return Label(_newLabelId()); }
//! Bind label to the current offset.
//!
//! NOTE: Label can be bound only once!
ASMJIT_API Error bind(const Label& label) noexcept;
// --------------------------------------------------------------------------
// [Embed]
// --------------------------------------------------------------------------
//! Embed data.
ASMJIT_API Error embed(const void* data, uint32_t size) noexcept;
//! Embed a constant pool data, adding the following in order:
//! 1. Data alignment.
//! 2. Label.
//! 3. Constant pool data.
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) noexcept;
// --------------------------------------------------------------------------
// [Comment]
// --------------------------------------------------------------------------
//! Emit a single comment line.
ASMJIT_API Error comment(const char* fmt, ...) noexcept;
// --------------------------------------------------------------------------
// [Hint]
// --------------------------------------------------------------------------
//! Emit a new hint (purery informational node).
ASMJIT_API Error _hint(Var& var, uint32_t hint, uint32_t value) noexcept;
// --------------------------------------------------------------------------
// [Vars]
// --------------------------------------------------------------------------
//! Get whether variable `var` is created.
ASMJIT_INLINE bool isVarValid(const Var& var) const noexcept {
return static_cast<size_t>(var.getId() & Operand::kIdIndexMask) < _varList.getLength();
}
//! \internal
//!
//! Get `VarData` by `var`.
ASMJIT_INLINE VarData* getVd(const Var& var) const noexcept {
return getVdById(var.getId());
}
//! \internal
//!
//! Get `VarData` by `id`.
ASMJIT_INLINE VarData* getVdById(uint32_t id) const noexcept {
ASMJIT_ASSERT(id != kInvalidValue);
ASMJIT_ASSERT(static_cast<size_t>(id & Operand::kIdIndexMask) < _varList.getLength());
return _varList[id & Operand::kIdIndexMask];
}
//! \internal
//!
//! Get an array of 'VarData*'.
ASMJIT_INLINE VarData** _getVdArray() const noexcept {
return const_cast<VarData**>(_varList.getData());
}
//! \internal
//!
//! Create a new `VarData`.
ASMJIT_API VarData* _newVd(const VarInfo& vi, const char* name) noexcept;
//! Alloc variable `var`.
ASMJIT_API Error alloc(Var& var) noexcept;
//! Alloc variable `var` using `regIndex` as a register index.
ASMJIT_API Error alloc(Var& var, uint32_t regIndex) noexcept;
//! Alloc variable `var` using `reg` as a register operand.
ASMJIT_API Error alloc(Var& var, const Reg& reg) noexcept;
//! Spill variable `var`.
ASMJIT_API Error spill(Var& var) noexcept;
//! Save variable `var` if the status is `modified` at this point.
ASMJIT_API Error save(Var& var) noexcept;
//! Unuse variable `var`.
ASMJIT_API Error unuse(Var& var) noexcept;
//! Get priority of variable `var`.
ASMJIT_API uint32_t getPriority(Var& var) const noexcept;
//! Set priority of variable `var` to `priority`.
ASMJIT_API void setPriority(Var& var, uint32_t priority) noexcept;
//! Get save-on-unuse `var` property.
ASMJIT_API bool getSaveOnUnuse(Var& var) const noexcept;
//! Set save-on-unuse `var` property to `value`.
ASMJIT_API void setSaveOnUnuse(Var& var, bool value) noexcept;
//! Rename variable `var` to `name`.
//!
//! NOTE: Only new name will appear in the logger.
ASMJIT_API void rename(Var& var, const char* fmt, ...) noexcept;
// --------------------------------------------------------------------------
// [Stack]
// --------------------------------------------------------------------------
//! \internal
//!
//! Create a new memory chunk allocated on the current function's stack.
virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) noexcept = 0;
// --------------------------------------------------------------------------
// [Const]
// --------------------------------------------------------------------------
//! \internal
//!
//! Put data to a constant-pool and get a memory reference to it.
virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) noexcept = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Code-Generation features, used by \ref hasFeature() and \ref setFeature().
uint32_t _features;
//! Maximum count of nodes to look ahead when allocating/spilling
//! registers.
uint32_t _maxLookAhead;
//! Options affecting the next instruction.
uint32_t _instOptions;
//! Processing token generator.
//!
//! Used to get a unique token that is then used to process `HLNode`s. See
//! `Compiler::_getUniqueToken()` for more details.
uint32_t _tokenGenerator;
//! Flow id added to each node created (used only by `Context)`.
uint32_t _nodeFlowId;
//! Flags added to each node created (used only by `Context)`.
uint32_t _nodeFlags;
//! Variable mapping (translates incoming VarType into target).
const uint8_t* _targetVarMapping;
//! First node.
HLNode* _firstNode;
//! Last node.
HLNode* _lastNode;
//! Current node.
HLNode* _cursor;
//! Current function.
HLFunc* _func;
//! General purpose zone allocator.
Zone _zoneAllocator;
//! Variable zone.
Zone _varAllocator;
//! String/data zone.
Zone _stringAllocator;
//! Local constant pool zone.
Zone _constAllocator;
//! VarData list.
PodVector<VarData*> _varList;
//! Local constant pool, flushed at the end of each function.
ConstPool _localConstPool;
//! Global constant pool, flushed at the end of the compilation.
ConstPool _globalConstPool;
//! Label to start of the local constant pool.
Label _localConstPoolLabel;
//! Label to start of the global constant pool.
Label _globalConstPoolLabel;
};
//! \}
// ============================================================================
// [Defined-Later]
// ============================================================================
ASMJIT_INLINE HLNode::HLNode(Compiler* compiler, uint32_t type) noexcept {
_prev = nullptr;
_next = nullptr;
_type = static_cast<uint8_t>(type);
_opCount = 0;
_flags = static_cast<uint16_t>(compiler->_nodeFlags);
_flowId = compiler->_nodeFlowId;
_tokenId = 0;
_comment = nullptr;
_map = nullptr;
_liveness = nullptr;
_state = nullptr;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_COMPILER_H

View File

@@ -1,653 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/compilercontext_p.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Context - Construction / Destruction]
// ============================================================================
Context::Context(Compiler* compiler) :
_compiler(compiler),
_zoneAllocator(8192 - Zone::kZoneOverhead),
_traceNode(nullptr),
_varMapToVaListOffset(0) {
Context::reset();
}
Context::~Context() {}
// ============================================================================
// [asmjit::Context - Reset]
// ============================================================================
void Context::reset(bool releaseMemory) {
_zoneAllocator.reset(releaseMemory);
_func = nullptr;
_start = nullptr;
_end = nullptr;
_extraBlock = nullptr;
_stop = nullptr;
_unreachableList.reset();
_returningList.reset();
_jccList.reset();
_contextVd.reset(releaseMemory);
_memVarCells = nullptr;
_memStackCells = nullptr;
_mem1ByteVarsUsed = 0;
_mem2ByteVarsUsed = 0;
_mem4ByteVarsUsed = 0;
_mem8ByteVarsUsed = 0;
_mem16ByteVarsUsed = 0;
_mem32ByteVarsUsed = 0;
_mem64ByteVarsUsed = 0;
_memStackCellsUsed = 0;
_memMaxAlign = 0;
_memVarTotal = 0;
_memStackTotal = 0;
_memAllTotal = 0;
_annotationLength = 12;
_state = nullptr;
}
// ============================================================================
// [asmjit::Context - Mem]
// ============================================================================
static ASMJIT_INLINE uint32_t BaseContext_getDefaultAlignment(uint32_t size) {
if (size > 32)
return 64;
else if (size > 16)
return 32;
else if (size > 8)
return 16;
else if (size > 4)
return 8;
else if (size > 2)
return 4;
else if (size > 1)
return 2;
else
return 1;
}
VarCell* Context::_newVarCell(VarData* vd) {
ASMJIT_ASSERT(vd->_memCell == nullptr);
VarCell* cell;
uint32_t size = vd->getSize();
if (vd->isStack()) {
cell = _newStackCell(size, vd->getAlignment());
if (cell == nullptr)
return nullptr;
}
else {
cell = static_cast<VarCell*>(_zoneAllocator.alloc(sizeof(VarCell)));
if (cell == nullptr)
goto _NoMemory;
cell->_next = _memVarCells;
_memVarCells = cell;
cell->_offset = 0;
cell->_size = size;
cell->_alignment = size;
_memMaxAlign = Utils::iMax<uint32_t>(_memMaxAlign, size);
_memVarTotal += size;
switch (size) {
case 1: _mem1ByteVarsUsed++ ; break;
case 2: _mem2ByteVarsUsed++ ; break;
case 4: _mem4ByteVarsUsed++ ; break;
case 8: _mem8ByteVarsUsed++ ; break;
case 16: _mem16ByteVarsUsed++; break;
case 32: _mem32ByteVarsUsed++; break;
case 64: _mem64ByteVarsUsed++; break;
default:
ASMJIT_NOT_REACHED();
}
}
vd->_memCell = cell;
return cell;
_NoMemory:
_compiler->setLastError(kErrorNoHeapMemory);
return nullptr;
}
VarCell* Context::_newStackCell(uint32_t size, uint32_t alignment) {
VarCell* cell = static_cast<VarCell*>(_zoneAllocator.alloc(sizeof(VarCell)));
if (cell == nullptr)
goto _NoMemory;
if (alignment == 0)
alignment = BaseContext_getDefaultAlignment(size);
if (alignment > 64)
alignment = 64;
ASMJIT_ASSERT(Utils::isPowerOf2(alignment));
size = Utils::alignTo<uint32_t>(size, alignment);
// Insert it sorted according to the alignment and size.
{
VarCell** pPrev = &_memStackCells;
VarCell* cur = *pPrev;
while (cur != nullptr) {
if ((cur->getAlignment() > alignment) ||
(cur->getAlignment() == alignment && cur->getSize() > size)) {
pPrev = &cur->_next;
cur = *pPrev;
continue;
}
break;
}
cell->_next = cur;
cell->_offset = 0;
cell->_size = size;
cell->_alignment = alignment;
*pPrev = cell;
_memStackCellsUsed++;
_memMaxAlign = Utils::iMax<uint32_t>(_memMaxAlign, alignment);
_memStackTotal += size;
}
return cell;
_NoMemory:
_compiler->setLastError(kErrorNoHeapMemory);
return nullptr;
}
Error Context::resolveCellOffsets() {
VarCell* varCell = _memVarCells;
VarCell* stackCell = _memStackCells;
uint32_t stackAlignment = 0;
if (stackCell != nullptr)
stackAlignment = stackCell->getAlignment();
uint32_t pos64 = 0;
uint32_t pos32 = pos64 + _mem64ByteVarsUsed * 64;
uint32_t pos16 = pos32 + _mem32ByteVarsUsed * 32;
uint32_t pos8 = pos16 + _mem16ByteVarsUsed * 16;
uint32_t pos4 = pos8 + _mem8ByteVarsUsed * 8 ;
uint32_t pos2 = pos4 + _mem4ByteVarsUsed * 4 ;
uint32_t pos1 = pos2 + _mem2ByteVarsUsed * 2 ;
uint32_t stackPos = pos1 + _mem1ByteVarsUsed;
uint32_t gapAlignment = stackAlignment;
uint32_t gapSize = 0;
// TODO: Not used!
if (gapAlignment)
Utils::alignDiff(stackPos, gapAlignment);
stackPos += gapSize;
uint32_t gapPos = stackPos;
uint32_t allTotal = stackPos;
// Vars - Allocated according to alignment/width.
while (varCell != nullptr) {
uint32_t size = varCell->getSize();
uint32_t offset = 0;
switch (size) {
case 1: offset = pos1 ; pos1 += 1 ; break;
case 2: offset = pos2 ; pos2 += 2 ; break;
case 4: offset = pos4 ; pos4 += 4 ; break;
case 8: offset = pos8 ; pos8 += 8 ; break;
case 16: offset = pos16; pos16 += 16; break;
case 32: offset = pos32; pos32 += 32; break;
case 64: offset = pos64; pos64 += 64; break;
default:
ASMJIT_NOT_REACHED();
}
varCell->setOffset(static_cast<int32_t>(offset));
varCell = varCell->_next;
}
// Stack - Allocated according to alignment/width.
while (stackCell != nullptr) {
uint32_t size = stackCell->getSize();
uint32_t alignment = stackCell->getAlignment();
uint32_t offset;
// Try to fill the gap between variables/stack first.
if (size <= gapSize && alignment <= gapAlignment) {
offset = gapPos;
gapSize -= size;
gapPos -= size;
if (alignment < gapAlignment)
gapAlignment = alignment;
}
else {
offset = stackPos;
stackPos += size;
allTotal += size;
}
stackCell->setOffset(offset);
stackCell = stackCell->_next;
}
_memAllTotal = allTotal;
return kErrorOk;
}
// ============================================================================
// [asmjit::Context - RemoveUnreachableCode]
// ============================================================================
Error Context::removeUnreachableCode() {
Compiler* compiler = getCompiler();
PodList<HLNode*>::Link* link = _unreachableList.getFirst();
HLNode* stop = getStop();
while (link != nullptr) {
HLNode* node = link->getValue();
if (node != nullptr && node->getPrev() != nullptr && node != stop) {
// Locate all unreachable nodes.
HLNode* first = node;
do {
if (node->isFetched())
break;
node = node->getNext();
} while (node != stop);
// Remove unreachable nodes that are neither informative nor directives.
if (node != first) {
HLNode* end = node;
node = first;
// NOTE: The strategy is as follows:
// 1. The algorithm removes everything until it finds a first label.
// 2. After the first label is found it removes only removable nodes.
bool removeEverything = true;
do {
HLNode* next = node->getNext();
bool remove = node->isRemovable();
if (!remove) {
if (node->isLabel())
removeEverything = false;
remove = removeEverything;
}
if (remove) {
ASMJIT_TSEC({
this->_traceNode(this, node, "[REMOVED UNREACHABLE] ");
});
compiler->removeNode(node);
}
node = next;
} while (node != end);
}
}
link = link->getNext();
}
return kErrorOk;
}
// ============================================================================
// [asmjit::Context - Liveness Analysis]
// ============================================================================
//! \internal
struct LivenessTarget {
//! Previous target.
LivenessTarget* prev;
//! Target node.
HLLabel* node;
//! Jumped from.
HLJump* from;
};
Error Context::livenessAnalysis() {
uint32_t bLen = static_cast<uint32_t>(
((_contextVd.getLength() + BitArray::kEntityBits - 1) / BitArray::kEntityBits));
// No variables.
if (bLen == 0)
return kErrorOk;
HLFunc* func = getFunc();
HLJump* from = nullptr;
LivenessTarget* ltCur = nullptr;
LivenessTarget* ltUnused = nullptr;
PodList<HLNode*>::Link* retPtr = _returningList.getFirst();
ASMJIT_ASSERT(retPtr != nullptr);
HLNode* node = retPtr->getValue();
size_t varMapToVaListOffset = _varMapToVaListOffset;
BitArray* bCur = newBits(bLen);
if (bCur == nullptr)
goto _NoMemory;
// Allocate bits for code visited first time.
_OnVisit:
for (;;) {
if (node->hasLiveness()) {
if (bCur->_addBitsDelSource(node->getLiveness(), bCur, bLen))
goto _OnPatch;
else
goto _OnDone;
}
BitArray* bTmp = copyBits(bCur, bLen);
if (bTmp == nullptr)
goto _NoMemory;
node->setLiveness(bTmp);
VarMap* map = node->getMap();
if (map != nullptr) {
uint32_t vaCount = map->getVaCount();
VarAttr* vaList = reinterpret_cast<VarAttr*>(((uint8_t*)map) + varMapToVaListOffset);
for (uint32_t i = 0; i < vaCount; i++) {
VarAttr* va = &vaList[i];
VarData* vd = va->getVd();
uint32_t flags = va->getFlags();
uint32_t localId = vd->getLocalId();
if ((flags & kVarAttrWAll) && !(flags & kVarAttrRAll)) {
// Write-Only.
bTmp->setBit(localId);
bCur->delBit(localId);
}
else {
// Read-Only or Read/Write.
bTmp->setBit(localId);
bCur->setBit(localId);
}
}
}
if (node->getType() == HLNode::kTypeLabel)
goto _OnTarget;
if (node == func)
goto _OnDone;
ASMJIT_ASSERT(node->getPrev());
node = node->getPrev();
}
// Patch already generated liveness bits.
_OnPatch:
for (;;) {
ASMJIT_ASSERT(node->hasLiveness());
BitArray* bNode = node->getLiveness();
if (!bNode->_addBitsDelSource(bCur, bLen))
goto _OnDone;
if (node->getType() == HLNode::kTypeLabel)
goto _OnTarget;
if (node == func)
goto _OnDone;
node = node->getPrev();
}
_OnTarget:
if (static_cast<HLLabel*>(node)->getNumRefs() != 0) {
// Push a new LivenessTarget onto the stack if needed.
if (ltCur == nullptr || ltCur->node != node) {
// Allocate a new LivenessTarget object (from pool or zone).
LivenessTarget* ltTmp = ltUnused;
if (ltTmp != nullptr) {
ltUnused = ltUnused->prev;
}
else {
ltTmp = _zoneAllocator.allocT<LivenessTarget>(
sizeof(LivenessTarget) - sizeof(BitArray) + bLen * sizeof(uintptr_t));
if (ltTmp == nullptr)
goto _NoMemory;
}
// Initialize and make current - ltTmp->from will be set later on.
ltTmp->prev = ltCur;
ltTmp->node = static_cast<HLLabel*>(node);
ltCur = ltTmp;
from = static_cast<HLLabel*>(node)->getFrom();
ASMJIT_ASSERT(from != nullptr);
}
else {
from = ltCur->from;
goto _OnJumpNext;
}
// Visit/Patch.
do {
ltCur->from = from;
bCur->copyBits(node->getLiveness(), bLen);
if (!from->hasLiveness()) {
node = from;
goto _OnVisit;
}
// Issue #25: Moved '_OnJumpNext' here since it's important to patch
// code again if there are more live variables than before.
_OnJumpNext:
if (bCur->delBits(from->getLiveness(), bLen)) {
node = from;
goto _OnPatch;
}
from = from->getJumpNext();
} while (from != nullptr);
// Pop the current LivenessTarget from the stack.
{
LivenessTarget* ltTmp = ltCur;
ltCur = ltCur->prev;
ltTmp->prev = ltUnused;
ltUnused = ltTmp;
}
}
bCur->copyBits(node->getLiveness(), bLen);
node = node->getPrev();
if (node->isJmp() || !node->isFetched())
goto _OnDone;
if (!node->hasLiveness())
goto _OnVisit;
if (bCur->delBits(node->getLiveness(), bLen))
goto _OnPatch;
_OnDone:
if (ltCur != nullptr) {
node = ltCur->node;
from = ltCur->from;
goto _OnJumpNext;
}
retPtr = retPtr->getNext();
if (retPtr != nullptr) {
node = retPtr->getValue();
goto _OnVisit;
}
return kErrorOk;
_NoMemory:
return setLastError(kErrorNoHeapMemory);
}
// ============================================================================
// [asmjit::Context - Annotate]
// ============================================================================
Error Context::formatInlineComment(StringBuilder& dst, HLNode* node) {
#if !defined(ASMJIT_DISABLE_LOGGER)
if (node->getComment())
dst.appendString(node->getComment());
if (node->hasLiveness()) {
if (dst.getLength() < _annotationLength)
dst.appendChars(' ', _annotationLength - dst.getLength());
uint32_t vdCount = static_cast<uint32_t>(_contextVd.getLength());
size_t offset = dst.getLength() + 1;
dst.appendChar('[');
dst.appendChars(' ', vdCount);
dst.appendChar(']');
BitArray* liveness = node->getLiveness();
VarMap* map = node->getMap();
uint32_t i;
for (i = 0; i < vdCount; i++) {
if (liveness->getBit(i))
dst.getData()[offset + i] = '.';
}
if (map != nullptr) {
uint32_t vaCount = map->getVaCount();
VarAttr* vaList = reinterpret_cast<VarAttr*>(((uint8_t*)map) + _varMapToVaListOffset);
for (i = 0; i < vaCount; i++) {
VarAttr* va = &vaList[i];
VarData* vd = va->getVd();
uint32_t flags = va->getFlags();
char c = 'u';
if ( (flags & kVarAttrRAll) && !(flags & kVarAttrWAll)) c = 'r';
if (!(flags & kVarAttrRAll) && (flags & kVarAttrWAll)) c = 'w';
if ( (flags & kVarAttrRAll) && (flags & kVarAttrWAll)) c = 'x';
// Uppercase if unused.
if ((flags & kVarAttrUnuse))
c -= 'a' - 'A';
ASMJIT_ASSERT(offset + vd->getLocalId() < dst.getLength());
dst._data[offset + vd->getLocalId()] = c;
}
}
}
#endif // !ASMJIT_DISABLE_LOGGER
return kErrorOk;
}
// ============================================================================
// [asmjit::Context - Cleanup]
// ============================================================================
void Context::cleanup() {
VarData** array = _contextVd.getData();
size_t length = _contextVd.getLength();
for (size_t i = 0; i < length; i++) {
VarData* vd = array[i];
vd->resetLocalId();
vd->resetRegIndex();
}
_contextVd.reset(false);
_extraBlock = nullptr;
}
// ============================================================================
// [asmjit::Context - CompileFunc]
// ============================================================================
Error Context::compile(HLFunc* func) {
HLNode* end = func->getEnd();
HLNode* stop = end->getNext();
_func = func;
_stop = stop;
_extraBlock = end;
ASMJIT_PROPAGATE_ERROR(fetch());
ASMJIT_PROPAGATE_ERROR(removeUnreachableCode());
ASMJIT_PROPAGATE_ERROR(livenessAnalysis());
Compiler* compiler = getCompiler();
#if !defined(ASMJIT_DISABLE_LOGGER)
if (compiler->getAssembler()->hasLogger())
ASMJIT_PROPAGATE_ERROR(annotate());
#endif // !ASMJIT_DISABLE_LOGGER
ASMJIT_PROPAGATE_ERROR(translate());
// We alter the compiler cursor, because it doesn't make sense to reference
// it after compilation - some nodes may disappear and it's forbidden to add
// new code after the compilation is done.
compiler->_setCursor(nullptr);
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER

View File

@@ -1,901 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_COMPILERCONTEXT_P_H
#define _ASMJIT_BASE_COMPILERCONTEXT_P_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/compiler.h"
#include "../base/podvector.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::VarAttrFlags]
// ============================================================================
//! \internal
//!
//! Variable attribute flags.
ASMJIT_ENUM(VarAttrFlags) {
//! Read from register.
kVarAttrRReg = 0x00000001,
//! Write to register.
kVarAttrWReg = 0x00000002,
//! Read/Write from/to register.
kVarAttrXReg = 0x00000003,
//! Read from memory.
kVarAttrRMem = 0x00000004,
//! Write to memory.
kVarAttrWMem = 0x00000008,
//! Read/Write from/to memory.
kVarAttrXMem = 0x0000000C,
//! Register allocator can decide if input will be in register or memory.
kVarAttrRDecide = 0x00000010,
//! Register allocator can decide if output will be in register or memory.
kVarAttrWDecide = 0x00000020,
//! Register allocator can decide if in/out will be in register or memory.
kVarAttrXDecide = 0x00000030,
//! Variable is converted to other type/class on the input.
kVarAttrRConv = 0x00000040,
//! Variable is converted from other type/class on the output.
kVarAttrWConv = 0x00000080,
//! Combination of `kVarAttrRConv` and `kVarAttrWConv`.
kVarAttrXConv = 0x000000C0,
//! Variable is a function call operand.
kVarAttrRCall = 0x00000100,
//! Variable is a function argument passed in register.
kVarAttrRFunc = 0x00000200,
//! Variable is a function return value passed in register.
kVarAttrWFunc = 0x00000400,
//! Variable should be spilled.
kVarAttrSpill = 0x00000800,
//! Variable should be unused at the end of the instruction/node.
kVarAttrUnuse = 0x00001000,
//! All in-flags.
kVarAttrRAll = kVarAttrRReg | kVarAttrRMem | kVarAttrRDecide | kVarAttrRCall | kVarAttrRFunc,
//! All out-flags.
kVarAttrWAll = kVarAttrWReg | kVarAttrWMem | kVarAttrWDecide | kVarAttrWFunc,
//! Variable is already allocated on the input.
kVarAttrAllocRDone = 0x00400000,
//! Variable is already allocated on the output.
kVarAttrAllocWDone = 0x00800000,
kVarAttrX86GpbLo = 0x10000000,
kVarAttrX86GpbHi = 0x20000000,
kVarAttrX86Fld4 = 0x40000000,
kVarAttrX86Fld8 = 0x80000000
};
// ============================================================================
// [asmjit::VarHint]
// ============================================================================
//! \internal
//!
//! Variable hint (used by `Compiler)`.
//!
//! \sa Compiler.
ASMJIT_ENUM(VarHint) {
//! Alloc variable.
kVarHintAlloc = 0,
//! Spill variable.
kVarHintSpill = 1,
//! Save variable if modified.
kVarHintSave = 2,
//! Save variable if modified and mark it as unused.
kVarHintSaveAndUnuse = 3,
//! Mark variable as unused.
kVarHintUnuse = 4
};
// ============================================================================
// [asmjit::kVarState]
// ============================================================================
// TODO: Rename `kVarState` or `VarState`.
//! \internal
//!
//! State of variable.
//!
//! NOTE: Variable states are used only during register allocation.
ASMJIT_ENUM(kVarState) {
//! Variable is currently not used.
kVarStateNone = 0,
//! Variable is currently allocated in register.
kVarStateReg = 1,
//! Variable is currently allocated in memory (or has been spilled).
kVarStateMem = 2
};
// ============================================================================
// [asmjit::VarCell]
// ============================================================================
struct VarCell {
ASMJIT_NO_COPY(VarCell)
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get cell offset.
ASMJIT_INLINE int32_t getOffset() const { return _offset; }
//! Set cell offset.
ASMJIT_INLINE void setOffset(int32_t offset) { _offset = offset; }
//! Get cell size.
ASMJIT_INLINE uint32_t getSize() const { return _size; }
//! Set cell size.
ASMJIT_INLINE void setSize(uint32_t size) { _size = size; }
//! Get cell alignment.
ASMJIT_INLINE uint32_t getAlignment() const { return _alignment; }
//! Set cell alignment.
ASMJIT_INLINE void setAlignment(uint32_t alignment) { _alignment = alignment; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Next active cell.
VarCell* _next;
//! Offset, relative to base-offset.
int32_t _offset;
//! Size.
uint32_t _size;
//! Alignment.
uint32_t _alignment;
};
// ============================================================================
// [asmjit::VarData]
// ============================================================================
//! HL variable data (base).
struct VarData {
// --------------------------------------------------------------------------
// [Accessors - Base]
// --------------------------------------------------------------------------
//! Get variable name.
ASMJIT_INLINE const char* getName() const { return _name; }
//! Get variable id.
ASMJIT_INLINE uint32_t getId() const { return _id; }
//! Get variable type.
ASMJIT_INLINE uint32_t getType() const { return _type; }
//! Get variable class.
ASMJIT_INLINE uint32_t getClass() const { return _class; }
// --------------------------------------------------------------------------
// [Accessors - LocalId]
// --------------------------------------------------------------------------
//! Get whether the variable has a local id.
ASMJIT_INLINE bool hasLocalId() const { return _localId != kInvalidValue; }
//! Get a variable's local id.
ASMJIT_INLINE uint32_t getLocalId() const { return _localId; }
//! Set a variable's local id.
ASMJIT_INLINE void setLocalId(uint32_t localId) { _localId = localId; }
//! Reset a variable's local id.
ASMJIT_INLINE void resetLocalId() { _localId = kInvalidValue; }
// --------------------------------------------------------------------------
// [Accessors - Priority]
// --------------------------------------------------------------------------
//! Get variable priority, used by compiler to decide which variable to spill.
ASMJIT_INLINE uint32_t getPriority() const { return _priority; }
//! Set variable priority.
ASMJIT_INLINE void setPriority(uint32_t priority) {
ASMJIT_ASSERT(priority <= 0xFF);
_priority = static_cast<uint8_t>(priority);
}
// --------------------------------------------------------------------------
// [Accessors - State]
// --------------------------------------------------------------------------
//! Get variable state, only used by `Context`.
ASMJIT_INLINE uint32_t getState() const { return _state; }
//! Set variable state, only used by `Context`.
ASMJIT_INLINE void setState(uint32_t state) {
ASMJIT_ASSERT(state <= 0xFF);
_state = static_cast<uint8_t>(state);
}
// --------------------------------------------------------------------------
// [Accessors - RegIndex]
// --------------------------------------------------------------------------
//! Get register index.
ASMJIT_INLINE uint32_t getRegIndex() const { return _regIndex; }
//! Set register index.
ASMJIT_INLINE void setRegIndex(uint32_t regIndex) {
ASMJIT_ASSERT(regIndex <= kInvalidReg);
_regIndex = static_cast<uint8_t>(regIndex);
}
//! Reset register index.
ASMJIT_INLINE void resetRegIndex() {
_regIndex = static_cast<uint8_t>(kInvalidReg);
}
// --------------------------------------------------------------------------
// [Accessors - HomeIndex/Mask]
// --------------------------------------------------------------------------
//! Get home registers mask.
ASMJIT_INLINE uint32_t getHomeMask() const { return _homeMask; }
//! Add a home register index to the home registers mask.
ASMJIT_INLINE void addHomeIndex(uint32_t regIndex) { _homeMask |= Utils::mask(regIndex); }
// --------------------------------------------------------------------------
// [Accessors - Flags]
// --------------------------------------------------------------------------
//! Get variable flags.
ASMJIT_INLINE uint32_t getFlags() const { return _flags; }
//! Get whether the VarData is only memory allocated on the stack.
ASMJIT_INLINE bool isStack() const { return static_cast<bool>(_isStack); }
//! Get whether the variable is a function argument passed through memory.
ASMJIT_INLINE bool isMemArg() const { return static_cast<bool>(_isMemArg); }
//! Get variable content can be calculated by a simple instruction.
ASMJIT_INLINE bool isCalculated() const { return static_cast<bool>(_isCalculated); }
//! Get whether to save variable when it's unused (spill).
ASMJIT_INLINE bool saveOnUnuse() const { return static_cast<bool>(_saveOnUnuse); }
//! Get whether the variable was changed.
ASMJIT_INLINE bool isModified() const { return static_cast<bool>(_modified); }
//! Set whether the variable was changed.
ASMJIT_INLINE void setModified(bool modified) { _modified = modified; }
//! Get variable alignment.
ASMJIT_INLINE uint32_t getAlignment() const { return _alignment; }
//! Get variable size.
ASMJIT_INLINE uint32_t getSize() const { return _size; }
//! Get home memory offset.
ASMJIT_INLINE int32_t getMemOffset() const { return _memOffset; }
//! Set home memory offset.
ASMJIT_INLINE void setMemOffset(int32_t offset) { _memOffset = offset; }
//! Get home memory cell.
ASMJIT_INLINE VarCell* getMemCell() const { return _memCell; }
//! Set home memory cell.
ASMJIT_INLINE void setMemCell(VarCell* cell) { _memCell = cell; }
// --------------------------------------------------------------------------
// [Accessors - Temporary Usage]
// --------------------------------------------------------------------------
//! Get temporary VarAttr.
ASMJIT_INLINE VarAttr* getVa() const { return _va; }
//! Set temporary VarAttr.
ASMJIT_INLINE void setVa(VarAttr* va) { _va = va; }
//! Reset temporary VarAttr.
ASMJIT_INLINE void resetVa() { _va = nullptr; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variable name.
const char* _name;
//! Variable id.
uint32_t _id;
//! Variable's local id (initially `kInvalidValue`).
uint32_t _localId;
//! Variable type.
uint8_t _type;
//! Variable class.
uint8_t _class;
//! Variable flags.
uint8_t _flags;
//! Variable priority.
uint8_t _priority;
//! Variable state (connected with actual `VarState)`.
uint8_t _state;
//! Actual register index (only used by `Context)`, during translate.
uint8_t _regIndex;
//! Whether the variable is only used as memory allocated on the stack.
uint8_t _isStack : 1;
//! Whether the variable is a function argument passed through memory.
uint8_t _isMemArg : 1;
//! Whether variable content can be calculated by a simple instruction.
//!
//! This is used mainly by MMX and SSE2 code. This flag indicates that
//! register allocator should never reserve memory for this variable, because
//! the content can be generated by a single instruction (for example PXOR).
uint8_t _isCalculated : 1;
//! Save on unuse (at end of the variable scope).
uint8_t _saveOnUnuse : 1;
//! Whether variable was changed (connected with actual `VarState)`.
uint8_t _modified : 1;
//! \internal
uint8_t _reserved0 : 3;
//! Variable natural alignment.
uint8_t _alignment;
//! Variable size.
uint32_t _size;
//! Mask of all registers variable has been allocated to.
uint32_t _homeMask;
//! Home memory offset.
int32_t _memOffset;
//! Home memory cell, used by `Context` (initially nullptr).
VarCell* _memCell;
//! Register read access statistics.
uint32_t rReadCount;
//! Register write access statistics.
uint32_t rWriteCount;
//! Memory read statistics.
uint32_t mReadCount;
//! Memory write statistics.
uint32_t mWriteCount;
// --------------------------------------------------------------------------
// [Members - Temporary Usage]
// --------------------------------------------------------------------------
// These variables are only used during register allocation. They are
// initialized by init() phase and reset by cleanup() phase.
union {
//! Temporary link to VarAttr* used by the `Context` used in
//! various phases, but always set back to nullptr when finished.
//!
//! This temporary data is designed to be used by algorithms that need to
//! store some data into variables themselves during compilation. But it's
//! expected that after variable is compiled & translated the data is set
//! back to zero/null. Initial value is nullptr.
VarAttr* _va;
//! \internal
//!
//! Same as `_va` just provided as `uintptr_t`.
uintptr_t _vaUInt;
};
};
// ============================================================================
// [asmjit::VarAttr]
// ============================================================================
struct VarAttr {
// --------------------------------------------------------------------------
// [Setup]
// --------------------------------------------------------------------------
ASMJIT_INLINE void setup(VarData* vd, uint32_t flags = 0, uint32_t inRegs = 0, uint32_t allocableRegs = 0) {
_vd = vd;
_flags = flags;
_varCount = 0;
_inRegIndex = kInvalidReg;
_outRegIndex = kInvalidReg;
_reserved = 0;
_inRegs = inRegs;
_allocableRegs = allocableRegs;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get VarData.
ASMJIT_INLINE VarData* getVd() const { return _vd; }
//! Set VarData.
ASMJIT_INLINE void setVd(VarData* vd) { _vd = vd; }
//! Get flags.
ASMJIT_INLINE uint32_t getFlags() const { return _flags; }
//! Set flags.
ASMJIT_INLINE void setFlags(uint32_t flags) { _flags = flags; }
//! Get whether `flag` is on.
ASMJIT_INLINE bool hasFlag(uint32_t flag) { return (_flags & flag) != 0; }
//! Add `flags`.
ASMJIT_INLINE void orFlags(uint32_t flags) { _flags |= flags; }
//! Mask `flags`.
ASMJIT_INLINE void andFlags(uint32_t flags) { _flags &= flags; }
//! Clear `flags`.
ASMJIT_INLINE void andNotFlags(uint32_t flags) { _flags &= ~flags; }
//! Get how many times the variable is used by the instruction/node.
ASMJIT_INLINE uint32_t getVarCount() const { return _varCount; }
//! Set how many times the variable is used by the instruction/node.
ASMJIT_INLINE void setVarCount(uint32_t count) { _varCount = static_cast<uint8_t>(count); }
//! Add how many times the variable is used by the instruction/node.
ASMJIT_INLINE void addVarCount(uint32_t count = 1) { _varCount += static_cast<uint8_t>(count); }
//! Get whether the variable has to be allocated in a specific input register.
ASMJIT_INLINE uint32_t hasInRegIndex() const { return _inRegIndex != kInvalidReg; }
//! Get the input register index or `kInvalidReg`.
ASMJIT_INLINE uint32_t getInRegIndex() const { return _inRegIndex; }
//! Set the input register index.
ASMJIT_INLINE void setInRegIndex(uint32_t index) { _inRegIndex = static_cast<uint8_t>(index); }
//! Reset the input register index.
ASMJIT_INLINE void resetInRegIndex() { _inRegIndex = kInvalidReg; }
//! Get whether the variable has to be allocated in a specific output register.
ASMJIT_INLINE uint32_t hasOutRegIndex() const { return _outRegIndex != kInvalidReg; }
//! Get the output register index or `kInvalidReg`.
ASMJIT_INLINE uint32_t getOutRegIndex() const { return _outRegIndex; }
//! Set the output register index.
ASMJIT_INLINE void setOutRegIndex(uint32_t index) { _outRegIndex = static_cast<uint8_t>(index); }
//! Reset the output register index.
ASMJIT_INLINE void resetOutRegIndex() { _outRegIndex = kInvalidReg; }
//! Get whether the mandatory input registers are in used.
ASMJIT_INLINE bool hasInRegs() const { return _inRegs != 0; }
//! Get mandatory input registers (mask).
ASMJIT_INLINE uint32_t getInRegs() const { return _inRegs; }
//! Set mandatory input registers (mask).
ASMJIT_INLINE void setInRegs(uint32_t mask) { _inRegs = mask; }
//! Add mandatory input registers (mask).
ASMJIT_INLINE void addInRegs(uint32_t mask) { _inRegs |= mask; }
//! And mandatory input registers (mask).
ASMJIT_INLINE void andInRegs(uint32_t mask) { _inRegs &= mask; }
//! Clear mandatory input registers (mask).
ASMJIT_INLINE void delInRegs(uint32_t mask) { _inRegs &= ~mask; }
//! Get allocable input registers (mask).
ASMJIT_INLINE uint32_t getAllocableRegs() const { return _allocableRegs; }
//! Set allocable input registers (mask).
ASMJIT_INLINE void setAllocableRegs(uint32_t mask) { _allocableRegs = mask; }
//! Add allocable input registers (mask).
ASMJIT_INLINE void addAllocableRegs(uint32_t mask) { _allocableRegs |= mask; }
//! And allocable input registers (mask).
ASMJIT_INLINE void andAllocableRegs(uint32_t mask) { _allocableRegs &= mask; }
//! Clear allocable input registers (mask).
ASMJIT_INLINE void delAllocableRegs(uint32_t mask) { _allocableRegs &= ~mask; }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE VarAttr& operator=(const VarAttr& other) {
::memcpy(this, &other, sizeof(VarAttr));
return *this;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
VarData* _vd;
//! Flags.
uint32_t _flags;
union {
struct {
//! How many times the variable is used by the instruction/node.
uint8_t _varCount;
//! Input register index or `kInvalidReg` if it's not given.
//!
//! Even if the input register index is not given (i.e. it may by any
//! register), register allocator should assign an index that will be
//! used to persist a variable into this specific index. It's helpful
//! in situations where one variable has to be allocated in multiple
//! registers to determine the register which will be persistent.
uint8_t _inRegIndex;
//! Output register index or `kInvalidReg` if it's not given.
//!
//! Typically `kInvalidReg` if variable is only used on input.
uint8_t _outRegIndex;
//! \internal
uint8_t _reserved;
};
//! \internal
//!
//! Packed data #0.
uint32_t _packed;
};
//! Mandatory input registers.
//!
//! Mandatory input registers are required by the instruction even if
//! there are duplicates. This schema allows us to allocate one variable
//! in one or more register when needed. Required mostly by instructions
//! that have implicit register operands (imul, cpuid, ...) and function
//! call.
uint32_t _inRegs;
//! Allocable input registers.
//!
//! Optional input registers is a mask of all allocable registers for a given
//! variable where we have to pick one of them. This mask is usually not used
//! when _inRegs is set. If both masks are used then the register
//! allocator tries first to find an intersection between these and allocates
//! an extra slot if not found.
uint32_t _allocableRegs;
};
// ============================================================================
// [asmjit::VarMap]
// ============================================================================
//! Variables' map related to a single node (instruction / other node).
struct VarMap {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get count of variables (all).
ASMJIT_INLINE uint32_t getVaCount() const {
return _vaCount;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variables count.
uint32_t _vaCount;
};
// ============================================================================
// [asmjit::VarState]
// ============================================================================
//! Variables' state.
struct VarState {};
// ============================================================================
// [asmjit::Context]
// ============================================================================
//! \internal
//!
//! Code generation context is the logic behind `Compiler`. The context is
//! used to compile the code stored in `Compiler`.
struct Context {
ASMJIT_NO_COPY(Context)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
Context(Compiler* compiler);
virtual ~Context();
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Reset the whole context.
virtual void reset(bool releaseMemory = false);
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get compiler.
ASMJIT_INLINE Compiler* getCompiler() const { return _compiler; }
//! Get function.
ASMJIT_INLINE HLFunc* getFunc() const { return _func; }
//! Get stop node.
ASMJIT_INLINE HLNode* getStop() const { return _stop; }
//! Get start of the current scope.
ASMJIT_INLINE HLNode* getStart() const { return _start; }
//! Get end of the current scope.
ASMJIT_INLINE HLNode* getEnd() const { return _end; }
//! Get extra block.
ASMJIT_INLINE HLNode* getExtraBlock() const { return _extraBlock; }
//! Set extra block.
ASMJIT_INLINE void setExtraBlock(HLNode* node) { _extraBlock = node; }
// --------------------------------------------------------------------------
// [Error]
// --------------------------------------------------------------------------
//! Get the last error code.
ASMJIT_INLINE Error getLastError() const {
return getCompiler()->getLastError();
}
//! Set the last error code and propagate it through the error handler.
ASMJIT_INLINE Error setLastError(Error error, const char* message = nullptr) {
return getCompiler()->setLastError(error, message);
}
// --------------------------------------------------------------------------
// [State]
// --------------------------------------------------------------------------
//! Get current state.
ASMJIT_INLINE VarState* getState() const { return _state; }
//! Load current state from `target` state.
virtual void loadState(VarState* src) = 0;
//! Save current state, returning new `VarState` instance.
virtual VarState* saveState() = 0;
//! Change the current state to `target` state.
virtual void switchState(VarState* src) = 0;
//! Change the current state to the intersection of two states `a` and `b`.
virtual void intersectStates(VarState* a, VarState* b) = 0;
// --------------------------------------------------------------------------
// [Context]
// --------------------------------------------------------------------------
ASMJIT_INLINE Error _registerContextVar(VarData* vd) {
if (vd->hasLocalId())
return kErrorOk;
uint32_t cid = static_cast<uint32_t>(_contextVd.getLength());
ASMJIT_PROPAGATE_ERROR(_contextVd.append(vd));
vd->setLocalId(cid);
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Mem]
// --------------------------------------------------------------------------
VarCell* _newVarCell(VarData* vd);
VarCell* _newStackCell(uint32_t size, uint32_t alignment);
ASMJIT_INLINE VarCell* getVarCell(VarData* vd) {
VarCell* cell = vd->getMemCell();
return cell ? cell : _newVarCell(vd);
}
virtual Error resolveCellOffsets();
// --------------------------------------------------------------------------
// [Bits]
// --------------------------------------------------------------------------
ASMJIT_INLINE BitArray* newBits(uint32_t len) {
return static_cast<BitArray*>(
_zoneAllocator.allocZeroed(static_cast<size_t>(len) * BitArray::kEntitySize));
}
ASMJIT_INLINE BitArray* copyBits(const BitArray* src, uint32_t len) {
return static_cast<BitArray*>(
_zoneAllocator.dup(src, static_cast<size_t>(len) * BitArray::kEntitySize));
}
// --------------------------------------------------------------------------
// [Fetch]
// --------------------------------------------------------------------------
//! Fetch.
//!
//! Fetch iterates over all nodes and gathers information about all variables
//! used. The process generates information required by register allocator,
//! variable liveness analysis and translator.
virtual Error fetch() = 0;
// --------------------------------------------------------------------------
// [Unreachable Code]
// --------------------------------------------------------------------------
//! Add unreachable-flow data to the unreachable flow list.
ASMJIT_INLINE Error addUnreachableNode(HLNode* node) {
PodList<HLNode*>::Link* link = _zoneAllocator.allocT<PodList<HLNode*>::Link>();
if (link == nullptr)
return setLastError(kErrorNoHeapMemory);
link->setValue(node);
_unreachableList.append(link);
return kErrorOk;
}
//! Remove unreachable code.
virtual Error removeUnreachableCode();
// --------------------------------------------------------------------------
// [Code-Flow]
// --------------------------------------------------------------------------
//! Add returning node (i.e. node that returns and where liveness analysis
//! should start).
ASMJIT_INLINE Error addReturningNode(HLNode* node) {
PodList<HLNode*>::Link* link = _zoneAllocator.allocT<PodList<HLNode*>::Link>();
if (link == nullptr)
return setLastError(kErrorNoHeapMemory);
link->setValue(node);
_returningList.append(link);
return kErrorOk;
}
//! Add jump-flow data to the jcc flow list.
ASMJIT_INLINE Error addJccNode(HLNode* node) {
PodList<HLNode*>::Link* link = _zoneAllocator.allocT<PodList<HLNode*>::Link>();
if (link == nullptr)
return setLastError(kErrorNoHeapMemory);
link->setValue(node);
_jccList.append(link);
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Analyze]
// --------------------------------------------------------------------------
//! Perform variable liveness analysis.
//!
//! Analysis phase iterates over nodes in reverse order and generates a bit
//! array describing variables that are alive at every node in the function.
//! When the analysis start all variables are assumed dead. When a read or
//! read/write operations of a variable is detected the variable becomes
//! alive; when only write operation is detected the variable becomes dead.
//!
//! When a label is found all jumps to that label are followed and analysis
//! repeats until all variables are resolved.
virtual Error livenessAnalysis();
// --------------------------------------------------------------------------
// [Annotate]
// --------------------------------------------------------------------------
virtual Error annotate() = 0;
virtual Error formatInlineComment(StringBuilder& dst, HLNode* node);
// --------------------------------------------------------------------------
// [Translate]
// --------------------------------------------------------------------------
//! Translate code by allocating registers and handling state changes.
virtual Error translate() = 0;
// --------------------------------------------------------------------------
// [Cleanup]
// --------------------------------------------------------------------------
virtual void cleanup();
// --------------------------------------------------------------------------
// [Compile]
// --------------------------------------------------------------------------
virtual Error compile(HLFunc* func);
// --------------------------------------------------------------------------
// [Serialize]
// --------------------------------------------------------------------------
virtual Error serialize(Assembler* assembler, HLNode* start, HLNode* stop) = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Compiler.
Compiler* _compiler;
//! Function.
HLFunc* _func;
//! Zone allocator.
Zone _zoneAllocator;
//! \internal
typedef void (ASMJIT_CDECL* TraceNodeFunc)(Context* self, HLNode* node_, const char* prefix);
//! \internal
//!
//! Only non-NULL when ASMJIT_TRACE is enabled.
TraceNodeFunc _traceNode;
//! \internal
//!
//! Offset (how many bytes to add) to `VarMap` to get `VarAttr` array. Used
//! by liveness analysis shared across all backends. This is needed because
//! `VarMap` is a base class for a specialized version that liveness analysis
//! doesn't use, it just needs `VarAttr` array.
uint32_t _varMapToVaListOffset;
//! Start of the current active scope.
HLNode* _start;
//! End of the current active scope.
HLNode* _end;
//! Node that is used to insert extra code after the function body.
HLNode* _extraBlock;
//! Stop node.
HLNode* _stop;
//! Unreachable nodes.
PodList<HLNode*> _unreachableList;
//! Returning nodes.
PodList<HLNode*> _returningList;
//! Jump nodes.
PodList<HLNode*> _jccList;
//! All variables used by the current function.
PodVector<VarData*> _contextVd;
//! Memory used to spill variables.
VarCell* _memVarCells;
//! Memory used to alloc memory on the stack.
VarCell* _memStackCells;
//! Count of 1-byte cells.
uint32_t _mem1ByteVarsUsed;
//! Count of 2-byte cells.
uint32_t _mem2ByteVarsUsed;
//! Count of 4-byte cells.
uint32_t _mem4ByteVarsUsed;
//! Count of 8-byte cells.
uint32_t _mem8ByteVarsUsed;
//! Count of 16-byte cells.
uint32_t _mem16ByteVarsUsed;
//! Count of 32-byte cells.
uint32_t _mem32ByteVarsUsed;
//! Count of 64-byte cells.
uint32_t _mem64ByteVarsUsed;
//! Count of stack memory cells.
uint32_t _memStackCellsUsed;
//! Maximum memory alignment used by the function.
uint32_t _memMaxAlign;
//! Count of bytes used by variables.
uint32_t _memVarTotal;
//! Count of bytes used by stack.
uint32_t _memStackTotal;
//! Count of bytes used by variables and stack after alignment.
uint32_t _memAllTotal;
//! Default lenght of annotated instruction.
uint32_t _annotationLength;
//! Current state (used by register allocator).
VarState* _state;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_COMPILERCONTEXT_P_H

View File

@@ -1,679 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_COMPILERFUNC_H
#define _ASMJIT_BASE_COMPILERFUNC_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/operand.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::FuncHint]
// ============================================================================
//! Function hints.
//!
//! For a platform specific calling conventions, see:
//! - `X86FuncHint` - X86/X64 function hints.
ASMJIT_ENUM(FuncHint) {
//! Generate a naked function by omitting its prolog and epilog (default true).
//!
//! Naked functions should always result in less code required for function's
//! prolog and epilog. In addition, on X86/64 naked functions save one register
//! (ebp or rbp), which can be used by the function instead.
kFuncHintNaked = 0,
//! Generate a compact function prolog/epilog if possible (default true).
//!
//! X86/X64 Specific
//! ----------------
//!
//! Use shorter, but possible slower prolog/epilog sequence to save/restore
//! registers. At the moment this only enables emitting `leave` in function's
//! epilog to make the code shorter, however, the counterpart `enter` is not
//! used in function's prolog for performance reasons.
kFuncHintCompact = 1,
//! Emit `emms` instruction in the function's epilog.
kFuncHintX86Emms = 17,
//! Emit `sfence` instruction in the function's epilog.
kFuncHintX86SFence = 18,
//! Emit `lfence` instruction in the function's epilog.
kFuncHintX86LFence = 19
};
// ============================================================================
// [asmjit::FuncFlags]
// ============================================================================
//! Function flags.
ASMJIT_ENUM(FuncFlags) {
//! Whether the function is using naked (minimal) prolog / epilog.
kFuncFlagIsNaked = 0x00000001,
//! Whether an another function is called from this function.
kFuncFlagIsCaller = 0x00000002,
//! Whether the stack is not aligned to the required stack alignment,
//! thus it has to be aligned manually.
kFuncFlagIsStackMisaligned = 0x00000004,
//! Whether the stack pointer is adjusted by the stack size needed
//! to save registers and function variables.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Stack pointer (ESP/RSP) is adjusted by 'sub' instruction in prolog and by
//! 'add' instruction in epilog (only if function is not naked). If function
//! needs to perform manual stack alignment more instructions are used to
//! adjust the stack (like "and zsp, -Alignment").
kFuncFlagIsStackAdjusted = 0x00000008,
//! Whether the function is finished using `Compiler::endFunc()`.
kFuncFlagIsFinished = 0x80000000,
//! Whether to emit `leave` instead of two instructions in case that the
//! function saves and restores the frame pointer.
kFuncFlagX86Leave = 0x00010000,
//! Whether it's required to move arguments to a new stack location,
//! because of manual aligning.
kFuncFlagX86MoveArgs = 0x00040000,
//! Whether to emit `emms` instruction in epilog (auto-detected).
kFuncFlagX86Emms = 0x01000000,
//! Whether to emit `sfence` instruction in epilog (auto-detected).
//!
//! `kFuncFlagX86SFence` with `kFuncFlagX86LFence` results in emitting `mfence`.
kFuncFlagX86SFence = 0x02000000,
//! Whether to emit `lfence` instruction in epilog (auto-detected).
//!
//! `kFuncFlagX86SFence` with `kFuncFlagX86LFence` results in emitting `mfence`.
kFuncFlagX86LFence = 0x04000000
};
// ============================================================================
// [asmjit::FuncDir]
// ============================================================================
//! Function arguments direction.
ASMJIT_ENUM(FuncDir) {
//! Arguments are passed left to right.
//!
//! This arguments direction is unusual in C, however it's used in Pascal.
kFuncDirLTR = 0,
//! Arguments are passed right ro left
//!
//! This is the default argument direction in C.
kFuncDirRTL = 1
};
// ============================================================================
// [asmjit::FuncMisc]
// ============================================================================
enum {
//! Function doesn't have variable number of arguments (`...`) (default).
kFuncNoVarArgs = 0xFF,
//! Invalid stack offset in function or function parameter.
kFuncStackInvalid = -1
};
// ============================================================================
// [asmjit::FuncArgIndex]
// ============================================================================
//! Function argument index (lo/hi).
ASMJIT_ENUM(FuncArgIndex) {
//! Maxumum number of function arguments supported by AsmJit.
kFuncArgCount = 16,
//! Extended maximum number of arguments (used internally).
kFuncArgCountLoHi = kFuncArgCount * 2,
//! Index to the LO part of function argument (default).
//!
//! This value is typically omitted and added only if there is HI argument
//! accessed.
kFuncArgLo = 0,
//! Index to the HI part of function argument.
//!
//! HI part of function argument depends on target architecture. On x86 it's
//! typically used to transfer 64-bit integers (they form a pair of 32-bit
//! integers).
kFuncArgHi = kFuncArgCount
};
// ============================================================================
// [asmjit::FuncRet]
// ============================================================================
//! Function return value (lo/hi) specification.
ASMJIT_ENUM(FuncRet) {
//! Index to the LO part of function return value.
kFuncRetLo = 0,
//! Index to the HI part of function return value.
kFuncRetHi = 1
};
// ============================================================================
// [asmjit::TypeId]
// ============================================================================
//! Function builder's `void` type.
struct Void {};
//! Function builder's `int8_t` type.
struct Int8Type {};
//! Function builder's `uint8_t` type.
struct UInt8Type {};
//! Function builder's `int16_t` type.
struct Int16Type {};
//! Function builder's `uint16_t` type.
struct UInt16Type {};
//! Function builder's `int32_t` type.
struct Int32Type {};
//! Function builder's `uint32_t` type.
struct UInt32Type {};
//! Function builder's `int64_t` type.
struct Int64Type {};
//! Function builder's `uint64_t` type.
struct UInt64Type {};
//! Function builder's `intptr_t` type.
struct IntPtrType {};
//! Function builder's `uintptr_t` type.
struct UIntPtrType {};
//! Function builder's `float` type.
struct FloatType {};
//! Function builder's `double` type.
struct DoubleType {};
#if !defined(ASMJIT_DOCGEN)
template<typename T>
struct TypeId {
// Let it fail here if `T` was not specialized.
};
template<typename T>
struct TypeId<T*> {
enum { kId = kVarTypeIntPtr };
};
template<typename T>
struct TypeIdOfInt {
enum { kId = (sizeof(T) == 1) ? (int)(IntTraits<T>::kIsSigned ? kVarTypeInt8 : kVarTypeUInt8 ) :
(sizeof(T) == 2) ? (int)(IntTraits<T>::kIsSigned ? kVarTypeInt16 : kVarTypeUInt16) :
(sizeof(T) == 4) ? (int)(IntTraits<T>::kIsSigned ? kVarTypeInt32 : kVarTypeUInt32) :
(sizeof(T) == 8) ? (int)(IntTraits<T>::kIsSigned ? kVarTypeInt64 : kVarTypeUInt64) : (int)kInvalidVar
};
};
#define ASMJIT_TYPE_ID(T, ID) \
template<> struct TypeId<T> { enum { kId = ID }; }
ASMJIT_TYPE_ID(void , kInvalidVar);
ASMJIT_TYPE_ID(signed char , TypeIdOfInt<signed char>::kId);
ASMJIT_TYPE_ID(unsigned char , TypeIdOfInt<unsigned char>::kId);
ASMJIT_TYPE_ID(short , TypeIdOfInt<short>::kId);
ASMJIT_TYPE_ID(unsigned short , TypeIdOfInt<unsigned short>::kId);
ASMJIT_TYPE_ID(int , TypeIdOfInt<int>::kId);
ASMJIT_TYPE_ID(unsigned int , TypeIdOfInt<unsigned int>::kId);
ASMJIT_TYPE_ID(long , TypeIdOfInt<long>::kId);
ASMJIT_TYPE_ID(unsigned long , TypeIdOfInt<unsigned long>::kId);
ASMJIT_TYPE_ID(float , kVarTypeFp32);
ASMJIT_TYPE_ID(double , kVarTypeFp64);
#if ASMJIT_CC_HAS_NATIVE_CHAR
ASMJIT_TYPE_ID(char , TypeIdOfInt<char>::kId);
#endif
#if ASMJIT_CC_HAS_NATIVE_WCHAR_T
ASMJIT_TYPE_ID(wchar_t , TypeIdOfInt<wchar_t>::kId);
#endif
#if ASMJIT_CC_HAS_NATIVE_CHAR16_T
ASMJIT_TYPE_ID(char16_t , TypeIdOfInt<char16_t>::kId);
#endif
#if ASMJIT_CC_HAS_NATIVE_CHAR32_T
ASMJIT_TYPE_ID(char32_t , TypeIdOfInt<char32_t>::kId);
#endif
#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(16, 0, 0)
ASMJIT_TYPE_ID(__int64 , TypeIdOfInt<__int64>::kId);
ASMJIT_TYPE_ID(unsigned __int64 , TypeIdOfInt<unsigned __int64>::kId);
#else
ASMJIT_TYPE_ID(long long , TypeIdOfInt<long long>::kId);
ASMJIT_TYPE_ID(unsigned long long, TypeIdOfInt<unsigned long long>::kId);
#endif
ASMJIT_TYPE_ID(Void , kInvalidVar);
ASMJIT_TYPE_ID(Int8Type , kVarTypeInt8);
ASMJIT_TYPE_ID(UInt8Type , kVarTypeUInt8);
ASMJIT_TYPE_ID(Int16Type , kVarTypeInt16);
ASMJIT_TYPE_ID(UInt16Type , kVarTypeUInt16);
ASMJIT_TYPE_ID(Int32Type , kVarTypeInt32);
ASMJIT_TYPE_ID(UInt32Type , kVarTypeUInt32);
ASMJIT_TYPE_ID(Int64Type , kVarTypeInt64);
ASMJIT_TYPE_ID(UInt64Type , kVarTypeUInt64);
ASMJIT_TYPE_ID(IntPtrType , kVarTypeIntPtr);
ASMJIT_TYPE_ID(UIntPtrType , kVarTypeUIntPtr);
ASMJIT_TYPE_ID(FloatType , kVarTypeFp32);
ASMJIT_TYPE_ID(DoubleType , kVarTypeFp64);
#endif // !ASMJIT_DOCGEN
// ============================================================================
// [asmjit::FuncInOut]
// ============================================================================
//! Function in/out - argument or return value translated from `FuncPrototype`.
struct FuncInOut {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getVarType() const noexcept { return _varType; }
ASMJIT_INLINE bool hasRegIndex() const noexcept { return _regIndex != kInvalidReg; }
ASMJIT_INLINE uint32_t getRegIndex() const noexcept { return _regIndex; }
ASMJIT_INLINE bool hasStackOffset() const noexcept { return _stackOffset != kFuncStackInvalid; }
ASMJIT_INLINE int32_t getStackOffset() const noexcept { return static_cast<int32_t>(_stackOffset); }
//! Get whether the argument / return value is assigned.
ASMJIT_INLINE bool isSet() const noexcept {
return (_regIndex != kInvalidReg) | (_stackOffset != kFuncStackInvalid);
}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Reset the function argument to "unassigned state".
ASMJIT_INLINE void reset() noexcept { _packed = 0xFFFFFFFFU; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
//! Variable type, see \ref VarType.
uint8_t _varType;
//! Register index if argument / return value is a register.
uint8_t _regIndex;
//! Stack offset if argument / return value is on the stack.
int16_t _stackOffset;
};
//! All members packed into single 32-bit integer.
uint32_t _packed;
};
};
// ============================================================================
// [asmjit::FuncPrototype]
// ============================================================================
//! Function prototype.
//!
//! Function prototype contains information about function return type, count
//! of arguments and their types. Function prototype is a low level structure
//! which doesn't contain platform specific or calling convention specific
//! information. Function prototype is used to create a `FuncDecl`.
struct FuncPrototype {
// --------------------------------------------------------------------------
// [Setup]
// --------------------------------------------------------------------------
//! Setup the prototype.
ASMJIT_INLINE void setup(
uint32_t callConv,
uint32_t ret,
const uint32_t* args, uint32_t numArgs) noexcept {
ASMJIT_ASSERT(callConv <= 0xFF);
ASMJIT_ASSERT(numArgs <= 0xFF);
_callConv = static_cast<uint8_t>(callConv);
_varArgs = kFuncNoVarArgs;
_numArgs = static_cast<uint8_t>(numArgs);
_reserved = 0;
_ret = ret;
_args = args;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the function's calling convention.
ASMJIT_INLINE uint32_t getCallConv() const noexcept { return _callConv; }
//! Get the variable arguments `...` index, `kFuncNoVarArgs` if none.
ASMJIT_INLINE uint32_t getVarArgs() const noexcept { return _varArgs; }
//! Get the number of function arguments.
ASMJIT_INLINE uint32_t getNumArgs() const noexcept { return _numArgs; }
//! Get the return value type.
ASMJIT_INLINE uint32_t getRet() const noexcept { return _ret; }
//! Get the type of the argument at index `i`.
ASMJIT_INLINE uint32_t getArg(uint32_t i) const noexcept {
ASMJIT_ASSERT(i < _numArgs);
return _args[i];
}
//! Get the array of function arguments' types.
ASMJIT_INLINE const uint32_t* getArgs() const noexcept { return _args; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint8_t _callConv;
uint8_t _varArgs;
uint8_t _numArgs;
uint8_t _reserved;
uint32_t _ret;
const uint32_t* _args;
};
// ============================================================================
// [asmjit::FuncBuilderX]
// ============================================================================
// TODO: Rename to `DynamicFuncBuilder`
//! Custom function builder for up to 32 function arguments.
struct FuncBuilderX : public FuncPrototype {
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE FuncBuilderX(uint32_t callConv = kCallConvHost) noexcept {
setup(callConv, kInvalidVar, _builderArgList, 0);
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE void setCallConv(uint32_t callConv) noexcept {
ASMJIT_ASSERT(callConv <= 0xFF);
_callConv = static_cast<uint8_t>(callConv);
}
//! Set the return type to `retType`.
ASMJIT_INLINE void setRet(uint32_t retType) noexcept {
_ret = retType;
}
//! Set the return type based on `T`.
template<typename T>
ASMJIT_INLINE void setRetT() noexcept { setRet(TypeId<T>::kId); }
//! Set the argument at index `i` to the `type`
ASMJIT_INLINE void setArg(uint32_t i, uint32_t type) noexcept {
ASMJIT_ASSERT(i < _numArgs);
_builderArgList[i] = type;
}
//! Set the argument at index `i` to the type based on `T`.
template<typename T>
ASMJIT_INLINE void setArgT(uint32_t i) noexcept { setArg(i, TypeId<T>::kId); }
//! Append an argument of `type` to the function prototype.
ASMJIT_INLINE void addArg(uint32_t type) noexcept {
ASMJIT_ASSERT(_numArgs < kFuncArgCount);
_builderArgList[_numArgs++] = type;
}
//! Append an argument of type based on `T` to the function prototype.
template<typename T>
ASMJIT_INLINE void addArgT() noexcept { addArg(TypeId<T>::kId); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _builderArgList[kFuncArgCount];
};
//! \internal
#define T(_Type_) TypeId<_Type_>::kId
//! Function prototype (no args).
template<typename RET>
struct FuncBuilder0 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder0(uint32_t callConv = kCallConvHost) noexcept {
setup(callConv, T(RET), nullptr, 0);
}
};
//! Function prototype (1 argument).
template<typename RET, typename P0>
struct FuncBuilder1 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder1(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (2 arguments).
template<typename RET, typename P0, typename P1>
struct FuncBuilder2 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder2(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (3 arguments).
template<typename RET, typename P0, typename P1, typename P2>
struct FuncBuilder3 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder3(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (4 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3>
struct FuncBuilder4 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder4(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (5 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4>
struct FuncBuilder5 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder5(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (6 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5>
struct FuncBuilder6 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder6(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (7 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
struct FuncBuilder7 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder7(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (8 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7>
struct FuncBuilder8 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder8(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (9 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8>
struct FuncBuilder9 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder9(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7), T(P8) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (10 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9>
struct FuncBuilder10 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder10(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7), T(P8), T(P9) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
#undef T
// ============================================================================
// [asmjit::FuncDecl]
// ============================================================================
//! Function declaration.
struct FuncDecl {
// --------------------------------------------------------------------------
// [Accessors - Calling Convention]
// --------------------------------------------------------------------------
//! Get the function's calling convention, see `CallConv`.
ASMJIT_INLINE uint32_t getCallConv() const noexcept { return _callConv; }
//! Get whether the callee pops the stack.
ASMJIT_INLINE uint32_t getCalleePopsStack() const noexcept { return _calleePopsStack; }
//! Get direction of arguments passed on the stack.
//!
//! Direction should be always `kFuncDirRTL`.
//!
//! NOTE: This is related to used calling convention, it's not affected by
//! number of function arguments or their types.
ASMJIT_INLINE uint32_t getArgsDirection() const noexcept { return _argsDirection; }
//! Get stack size needed for function arguments passed on the stack.
ASMJIT_INLINE uint32_t getArgStackSize() const noexcept { return _argStackSize; }
//! Get size of "Red Zone".
ASMJIT_INLINE uint32_t getRedZoneSize() const noexcept { return _redZoneSize; }
//! Get size of "Spill Zone".
ASMJIT_INLINE uint32_t getSpillZoneSize() const noexcept { return _spillZoneSize; }
// --------------------------------------------------------------------------
// [Accessors - Arguments and Return]
// --------------------------------------------------------------------------
//! Get whether the function has a return value.
ASMJIT_INLINE bool hasRet() const noexcept { return _retCount != 0; }
//! Get count of function return values.
ASMJIT_INLINE uint32_t getRetCount() const noexcept { return _retCount; }
//! Get function return value.
ASMJIT_INLINE FuncInOut& getRet(uint32_t index = kFuncRetLo) noexcept { return _rets[index]; }
//! Get function return value.
ASMJIT_INLINE const FuncInOut& getRet(uint32_t index = kFuncRetLo) const noexcept { return _rets[index]; }
//! Get the number of function arguments.
ASMJIT_INLINE uint32_t getNumArgs() const noexcept { return _numArgs; }
//! Get function arguments array.
ASMJIT_INLINE FuncInOut* getArgs() noexcept { return _args; }
//! Get function arguments array (const).
ASMJIT_INLINE const FuncInOut* getArgs() const noexcept { return _args; }
//! Get function argument at index `index`.
ASMJIT_INLINE FuncInOut& getArg(size_t index) noexcept {
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
return _args[index];
}
//! Get function argument at index `index`.
ASMJIT_INLINE const FuncInOut& getArg(size_t index) const noexcept {
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
return _args[index];
}
ASMJIT_INLINE void resetArg(size_t index) noexcept {
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
_args[index].reset();
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Calling convention.
uint8_t _callConv;
//! Whether a callee pops stack.
uint8_t _calleePopsStack : 1;
//! Direction for arguments passed on the stack, see `FuncDir`.
uint8_t _argsDirection : 1;
//! Reserved #0 (alignment).
uint8_t _reserved0 : 6;
//! Number of function arguments.
uint8_t _numArgs;
//! Number of function return values.
uint8_t _retCount;
//! Count of bytes consumed by arguments on the stack (aligned).
uint32_t _argStackSize;
//! Size of "Red Zone".
//!
//! NOTE: Used by AMD64-ABI (128 bytes).
uint16_t _redZoneSize;
//! Size of "Spill Zone".
//!
//! NOTE: Used by WIN64-ABI (32 bytes).
uint16_t _spillZoneSize;
//! Function arguments (LO & HI) mapped to physical registers and stack.
FuncInOut _args[kFuncArgCountLoHi];
//! Function return value(s).
FuncInOut _rets[2];
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_COMPILERFUNC_H

View File

@@ -11,8 +11,10 @@
#include "../base/constpool.h"
#include "../base/utils.h"
#include <algorithm>
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
@@ -31,7 +33,7 @@ static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* no
ConstPool::Node* link = node->_link[0];
uint32_t level = node->_level;
if (level != 0 && link != nullptr && link->_level == level) {
if (level != 0 && link && link->_level == level) {
node->_link[0] = link->_link[1];
link->_link[1] = node;
@@ -48,7 +50,7 @@ static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* n
ConstPool::Node* link = node->_link[1];
uint32_t level = node->_level;
if (level != 0 && link != nullptr && link->_link[1] != nullptr && link->_link[1]->_level == level) {
if (level != 0 && link && link->_link[1] && link->_link[1]->_level == level) {
node->_link[1] = link->_link[0];
link->_link[0] = node;
@@ -63,7 +65,7 @@ ConstPool::Node* ConstPool::Tree::get(const void* data) noexcept {
ConstPool::Node* node = _root;
size_t dataSize = _dataSize;
while (node != nullptr) {
while (node) {
int c = ::memcmp(node->getData(), data, dataSize);
if (c == 0)
return node;
@@ -75,9 +77,9 @@ ConstPool::Node* ConstPool::Tree::get(const void* data) noexcept {
void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept {
size_t dataSize = _dataSize;
_length++;
if (_root == nullptr) {
if (!_root) {
_root = newNode;
return;
}
@@ -94,8 +96,7 @@ void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept {
dir = ::memcmp(node->getData(), newNode->getData(), dataSize) < 0;
ConstPool::Node* link = node->_link[dir];
if (link == nullptr)
break;
if (!link) break;
node = link;
}
@@ -126,31 +127,22 @@ void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept {
// [asmjit::ConstPool - Construction / Destruction]
// ============================================================================
ConstPool::ConstPool(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].setDataSize(dataSize);
_gaps[i] = nullptr;
dataSize <<= 1;
}
_gapPool = nullptr;
_size = 0;
_alignment = 0;
}
ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
ConstPool::~ConstPool() noexcept {}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
void ConstPool::reset() noexcept {
void ConstPool::reset(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_tree[i].setDataSize(dataSize);
_gaps[i] = nullptr;
dataSize <<= 1;
}
_gapPool = nullptr;
@@ -164,8 +156,7 @@ void ConstPool::reset() noexcept {
static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (gap == nullptr)
return self->_zone->allocT<ConstPool::Gap>();
if (!gap) return self->_zone->allocT<ConstPool::Gap>();
self->_gapPool = gap->_next;
return gap;
@@ -183,8 +174,8 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) noex
size_t gapIndex;
size_t gapLength;
if (length >= 16 && Utils::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
if (length >= 16 && Utils::isAligned<size_t>(offset, 16)) {
gapLength = 16;
}
else if (length >= 8 && Utils::isAligned<size_t>(offset, 8)) {
@@ -208,8 +199,7 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) noex
// happened (just the gap won't be visible) and it will fail again at
// place where checking will cause kErrorNoHeapMemory.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (gap == nullptr)
return;
if (!gap) return;
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
@@ -238,10 +228,10 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
else if (size == 1)
treeIndex = kIndex1;
else
return kErrorInvalidArgument;
return DebugUtils::errored(kErrorInvalidArgument);
ConstPool::Node* node = _tree[treeIndex].get(data);
if (node != nullptr) {
if (node) {
dstOffset = node->_offset;
return kErrorOk;
}
@@ -255,7 +245,7 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
ConstPool::Gap* gap = _gaps[treeIndex];
// Check if there is a gap.
if (gap != nullptr) {
if (gap) {
size_t gapOffset = gap->_offset;
size_t gapLength = gap->_length;
@@ -290,11 +280,10 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (node == nullptr)
return kErrorNoHeapMemory;
if (!node) return DebugUtils::errored(kErrorNoHeapMemory);
_tree[treeIndex].put(node);
_alignment = Utils::iMax<size_t>(_alignment, size);
_alignment = std::max<size_t>(_alignment, size);
dstOffset = offset;
@@ -312,9 +301,7 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += size) {
node = _tree[treeIndex].get(pData);
if (node != nullptr)
continue;
if (node) continue;
node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
_tree[treeIndex].put(node);
@@ -372,23 +359,23 @@ UNIT(base_constpool) {
uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk,
"pool.add() - Returned error.");
"pool.add() - Returned error");
EXPECT(prevOffset == 0,
"pool.add() - First constant should have zero offset.");
"pool.add() - First constant should have zero offset");
for (i = 1; i < kCount; i++) {
c++;
EXPECT(pool.add(&c, 8, curOffset) == kErrorOk,
"pool.add() - Returned error.");
"pool.add() - Returned error");
EXPECT(prevOffset + 8 == curOffset,
"pool.add() - Returned incorrect curOffset.");
"pool.add() - Returned incorrect curOffset");
EXPECT(pool.getSize() == (i + 1) * 8,
"pool.getSize() - Reported incorrect size.");
"pool.getSize() - Reported incorrect size");
prevOffset = curOffset;
}
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
"pool.getAlignment() - Expected 8-byte alignment");
}
INFO("Retrieving %u constants from the pool.", kCount);
@@ -398,74 +385,75 @@ UNIT(base_constpool) {
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk,
"pool.add() - Returned error.");
"pool.add() - Returned error");
EXPECT(offset == i * 8,
"pool.add() - Should have reused constant.");
"pool.add() - Should have reused constant");
c++;
}
}
INFO("Checking if the constants were split into 4-byte patterns.");
INFO("Checking if the constants were split into 4-byte patterns");
{
uint32_t c = 0x01010101;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 4, offset) == kErrorOk,
"pool.add() - Returned error.");
"pool.add() - Returned error");
EXPECT(offset == i * 8,
"pool.add() - Should reuse existing constant.");
"pool.add() - Should reuse existing constant");
c++;
}
}
INFO("Adding 2 byte constant to misalign the current offset.");
INFO("Adding 2 byte constant to misalign the current offset");
{
uint16_t c = 0xFFFF;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk,
"pool.add() - Returned error.");
"pool.add() - Returned error");
EXPECT(offset == kCount * 8,
"pool.add() - Didn't return expected position.");
"pool.add() - Didn't return expected position");
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
"pool.getAlignment() - Expected 8-byte alignment");
}
INFO("Adding 8 byte constant to check if pool gets aligned again.");
INFO("Adding 8 byte constant to check if pool gets aligned again");
{
uint64_t c = ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF);
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk,
"pool.add() - Returned error.");
"pool.add() - Returned error");
EXPECT(offset == kCount * 8 + 8,
"pool.add() - Didn't return aligned offset.");
"pool.add() - Didn't return aligned offset");
}
INFO("Adding 2 byte constant to verify the gap is filled.");
INFO("Adding 2 byte constant to verify the gap is filled");
{
uint16_t c = 0xFFFE;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk,
"pool.add() - Returned error.");
"pool.add() - Returned error");
EXPECT(offset == kCount * 8 + 2,
"pool.add() - Didn't fill the gap.");
"pool.add() - Didn't fill the gap");
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
"pool.getAlignment() - Expected 8-byte alignment");
}
INFO("Checking reset functionality.");
INFO("Checking reset functionality");
{
pool.reset();
pool.reset(&zone);
zone.reset();
EXPECT(pool.getSize() == 0,
"pool.getSize() - Expected pool size to be zero.");
"pool.getSize() - Expected pool size to be zero");
EXPECT(pool.getAlignment() == 0,
"pool.getSize() - Expected pool alignment to be zero.");
"pool.getSize() - Expected pool alignment to be zero");
}
INFO("Checking pool alignment when combined constants are added.");
INFO("Checking pool alignment when combined constants are added");
{
uint8_t bytes[32] = { 0 };
size_t offset;
@@ -473,46 +461,46 @@ UNIT(base_constpool) {
pool.add(bytes, 1, offset);
EXPECT(pool.getSize() == 1,
"pool.getSize() - Expected pool size to be 1 byte.");
"pool.getSize() - Expected pool size to be 1 byte");
EXPECT(pool.getAlignment() == 1,
"pool.getSize() - Expected pool alignment to be 1 byte.");
"pool.getSize() - Expected pool alignment to be 1 byte");
EXPECT(offset == 0,
"pool.getSize() - Expected offset returned to be zero.");
"pool.getSize() - Expected offset returned to be zero");
pool.add(bytes, 2, offset);
EXPECT(pool.getSize() == 4,
"pool.getSize() - Expected pool size to be 4 bytes.");
"pool.getSize() - Expected pool size to be 4 bytes");
EXPECT(pool.getAlignment() == 2,
"pool.getSize() - Expected pool alignment to be 2 bytes.");
"pool.getSize() - Expected pool alignment to be 2 bytes");
EXPECT(offset == 2,
"pool.getSize() - Expected offset returned to be 2.");
"pool.getSize() - Expected offset returned to be 2");
pool.add(bytes, 4, offset);
EXPECT(pool.getSize() == 8,
"pool.getSize() - Expected pool size to be 8 bytes.");
"pool.getSize() - Expected pool size to be 8 bytes");
EXPECT(pool.getAlignment() == 4,
"pool.getSize() - Expected pool alignment to be 4 bytes.");
"pool.getSize() - Expected pool alignment to be 4 bytes");
EXPECT(offset == 4,
"pool.getSize() - Expected offset returned to be 4.");
"pool.getSize() - Expected offset returned to be 4");
pool.add(bytes, 4, offset);
EXPECT(pool.getSize() == 8,
"pool.getSize() - Expected pool size to be 8 bytes.");
"pool.getSize() - Expected pool size to be 8 bytes");
EXPECT(pool.getAlignment() == 4,
"pool.getSize() - Expected pool alignment to be 4 bytes.");
"pool.getSize() - Expected pool alignment to be 4 bytes");
EXPECT(offset == 4,
"pool.getSize() - Expected offset returned to be 8.");
"pool.getSize() - Expected offset returned to be 8");
pool.add(bytes, 32, offset);
EXPECT(pool.getSize() == 64,
"pool.getSize() - Expected pool size to be 64 bytes.");
"pool.getSize() - Expected pool size to be 64 bytes");
EXPECT(pool.getAlignment() == 32,
"pool.getSize() - Expected pool alignment to be 32 bytes.");
"pool.getSize() - Expected pool alignment to be 32 bytes");
EXPECT(offset == 32,
"pool.getSize() - Expected offset returned to be 32.");
"pool.getSize() - Expected offset returned to be 32");
}
}
#endif // ASMJIT_TEST
@@ -520,4 +508,4 @@ UNIT(base_constpool) {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

View File

@@ -12,7 +12,7 @@
#include "../base/zone.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
@@ -25,8 +25,8 @@ namespace asmjit {
//! Constant pool.
class ConstPool {
public:
ASMJIT_NO_COPY(ConstPool)
public:
ASMJIT_NONCOPYABLE(ConstPool)
enum {
kIndex1 = 0,
@@ -46,12 +46,9 @@ class ConstPool {
//!
//! Zone-allocated const-pool gap.
struct Gap {
//! Link to the next gap
Gap* _next;
//! Offset of the gap.
size_t _offset;
//! Remaining bytes of the gap (basically a gap size).
size_t _length;
Gap* _next; //!< Pointer to the next gap
size_t _offset; //!< Offset of the gap.
size_t _length; //!< Remaining bytes of the gap (basically a gap size).
};
// --------------------------------------------------------------------------
@@ -62,26 +59,14 @@ class ConstPool {
//!
//! Zone-allocated const-pool node.
struct Node {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE void* getData() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Left/Right nodes.
Node* _link[2];
//! Horizontal level for balance.
uint32_t _level : 31;
//! Whether this constant is shared with another.
uint32_t _shared : 1;
//! Data offset from the beginning of the pool.
uint32_t _offset;
Node* _link[2]; //!< Left/Right nodes.
uint32_t _level : 31; //!< Horizontal level for balance.
uint32_t _shared : 1; //!< If this constant is shared with another.
uint32_t _offset; //!< Data offset from the beginning of the pool.
};
// --------------------------------------------------------------------------
@@ -142,8 +127,7 @@ class ConstPool {
template<typename Visitor>
ASMJIT_INLINE void iterate(Visitor& visitor) const noexcept {
Node* node = const_cast<Node*>(_root);
if (node == nullptr)
return;
if (!node) return;
Node* stack[kHeightLimit];
size_t top = 0;
@@ -158,7 +142,7 @@ class ConstPool {
continue;
}
L_Visit:
Visit:
visitor.visit(node);
node = node->_link[1];
if (node != nullptr)
@@ -168,7 +152,7 @@ L_Visit:
return;
node = stack[--top];
goto L_Visit;
goto Visit;
}
}
@@ -178,8 +162,7 @@ L_Visit:
static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(sizeof(Node) + size);
if (node == nullptr)
return nullptr;
if (ASMJIT_UNLIKELY(!node)) return nullptr;
node->_link[0] = nullptr;
node->_link[1] = nullptr;
@@ -195,12 +178,9 @@ L_Visit:
// [Members]
// --------------------------------------------------------------------------
//! Root of the tree
Node* _root;
//! Length of the tree (count of nodes).
size_t _length;
//! Size of the data.
size_t _dataSize;
Node* _root; //!< Root of the tree
size_t _length; //!< Length of the tree (count of nodes).
size_t _dataSize; //!< Size of the data.
};
// --------------------------------------------------------------------------
@@ -214,7 +194,7 @@ L_Visit:
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_API void reset() noexcept;
ASMJIT_API void reset(Zone* zone) noexcept;
// --------------------------------------------------------------------------
// [Ops]
@@ -257,19 +237,13 @@ L_Visit:
// [Members]
// --------------------------------------------------------------------------
//! Zone allocator.
Zone* _zone;
//! Tree per size.
Tree _tree[kIndexCount];
//! Gaps per size.
Gap* _gaps[kIndexCount];
//! Gaps pool
Gap* _gapPool;
Zone* _zone; //!< Zone allocator.
Tree _tree[kIndexCount]; //!< Tree per size.
Gap* _gaps[kIndexCount]; //!< Gaps per size.
Gap* _gapPool; //!< Gaps pool
//! Size of the pool (in bytes).
size_t _size;
//! Alignemnt.
size_t _alignment;
size_t _size; //!< Size of the pool (in bytes).
size_t _alignment; //!< Required pool alignment.
};
//! \}
@@ -277,7 +251,7 @@ L_Visit:
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CONSTPOOL_H

View File

@@ -1,550 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CONTAINERS_H
#define _ASMJIT_BASE_CONTAINERS_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::BitArray]
// ============================================================================
//! Fixed size bit-array.
//!
//! Used by variable liveness analysis.
struct BitArray {
// --------------------------------------------------------------------------
// [Enums]
// --------------------------------------------------------------------------
enum {
kEntitySize = static_cast<int>(sizeof(uintptr_t)),
kEntityBits = kEntitySize * 8
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uintptr_t getBit(uint32_t index) const noexcept {
return (data[index / kEntityBits] >> (index % kEntityBits)) & 1;
}
ASMJIT_INLINE void setBit(uint32_t index) noexcept {
data[index / kEntityBits] |= static_cast<uintptr_t>(1) << (index % kEntityBits);
}
ASMJIT_INLINE void delBit(uint32_t index) noexcept {
data[index / kEntityBits] &= ~(static_cast<uintptr_t>(1) << (index % kEntityBits));
}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Copy bits from `s0`, returns `true` if at least one bit is set in `s0`.
ASMJIT_INLINE bool copyBits(const BitArray* s0, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool addBits(const BitArray* s0, uint32_t len) noexcept {
return addBits(this, s0, len);
}
ASMJIT_INLINE bool addBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] | s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool andBits(const BitArray* s1, uint32_t len) noexcept {
return andBits(this, s1, len);
}
ASMJIT_INLINE bool andBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool delBits(const BitArray* s1, uint32_t len) noexcept {
return delBits(this, s1, len);
}
ASMJIT_INLINE bool delBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & ~s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool _addBitsDelSource(BitArray* s1, uint32_t len) noexcept {
return _addBitsDelSource(this, s1, len);
}
ASMJIT_INLINE bool _addBitsDelSource(const BitArray* s0, BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t a = s0->data[i];
uintptr_t b = s1->data[i];
this->data[i] = a | b;
b &= ~a;
s1->data[i] = b;
r |= b;
}
return r != 0;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uintptr_t data[1];
};
// ============================================================================
// [asmjit::PodList<T>]
// ============================================================================
//! \internal
template <typename T>
class PodList {
public:
ASMJIT_NO_COPY(PodList<T>)
// --------------------------------------------------------------------------
// [Link]
// --------------------------------------------------------------------------
struct Link {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get next node.
ASMJIT_INLINE Link* getNext() const noexcept { return _next; }
//! Get value.
ASMJIT_INLINE T getValue() const noexcept { return _value; }
//! Set value to `value`.
ASMJIT_INLINE void setValue(const T& value) noexcept { _value = value; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Link* _next;
T _value;
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE PodList() noexcept : _first(nullptr), _last(nullptr) {}
ASMJIT_INLINE ~PodList() noexcept {}
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isEmpty() const noexcept { return _first != nullptr; }
ASMJIT_INLINE Link* getFirst() const noexcept { return _first; }
ASMJIT_INLINE Link* getLast() const noexcept { return _last; }
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() noexcept {
_first = nullptr;
_last = nullptr;
}
ASMJIT_INLINE void prepend(Link* link) noexcept {
link->_next = _first;
if (_first == nullptr)
_last = link;
_first = link;
}
ASMJIT_INLINE void append(Link* link) noexcept {
link->_next = nullptr;
if (_first == nullptr)
_first = link;
else
_last->_next = link;
_last = link;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Link* _first;
Link* _last;
};
// ============================================================================
// [asmjit::StringBuilder]
// ============================================================================
//! String builder.
//!
//! String builder was designed to be able to build a string using append like
//! operation to append numbers, other strings, or signle characters. It can
//! allocate it's own buffer or use a buffer created on the stack.
//!
//! String builder contains method specific to AsmJit functionality, used for
//! logging or HTML output.
class StringBuilder {
public:
ASMJIT_NO_COPY(StringBuilder)
// --------------------------------------------------------------------------
// [Enums]
// --------------------------------------------------------------------------
//! \internal
//!
//! String operation.
ASMJIT_ENUM(StringOp) {
//! Replace the current string by a given content.
kStringOpSet = 0,
//! Append a given content to the current string.
kStringOpAppend = 1
};
//! \internal
//!
//! String format flags.
ASMJIT_ENUM(StringFormatFlags) {
kStringFormatShowSign = 0x00000001,
kStringFormatShowSpace = 0x00000002,
kStringFormatAlternate = 0x00000004,
kStringFormatSigned = 0x80000000
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API StringBuilder() noexcept;
ASMJIT_API ~StringBuilder() noexcept;
ASMJIT_INLINE StringBuilder(const _NoInit&) noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get string builder capacity.
ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; }
//! Get length.
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
//! Get null-terminated string data.
ASMJIT_INLINE char* getData() noexcept { return _data; }
//! Get null-terminated string data (const).
ASMJIT_INLINE const char* getData() const noexcept { return _data; }
// --------------------------------------------------------------------------
// [Prepare / Reserve]
// --------------------------------------------------------------------------
//! Prepare to set/append.
ASMJIT_API char* prepare(uint32_t op, size_t len) noexcept;
//! Reserve `to` bytes in string builder.
ASMJIT_API bool reserve(size_t to) noexcept;
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
//! Clear the content in String builder.
ASMJIT_API void clear() noexcept;
// --------------------------------------------------------------------------
// [Op]
// --------------------------------------------------------------------------
ASMJIT_API bool _opString(uint32_t op, const char* str, size_t len = kInvalidIndex) noexcept;
ASMJIT_API bool _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
ASMJIT_API bool _opChar(uint32_t op, char c) noexcept;
ASMJIT_API bool _opChars(uint32_t op, char c, size_t len) noexcept;
ASMJIT_API bool _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
ASMJIT_API bool _opHex(uint32_t op, const void* data, size_t len) noexcept;
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
//! Replace the current content by `str` of `len`.
ASMJIT_INLINE bool setString(const char* str, size_t len = kInvalidIndex) noexcept {
return _opString(kStringOpSet, str, len);
}
//! Replace the current content by formatted string `fmt`.
ASMJIT_INLINE bool setVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kStringOpSet, fmt, ap);
}
//! Replace the current content by formatted string `fmt`.
ASMJIT_API bool setFormat(const char* fmt, ...) noexcept;
//! Replace the current content by `c` character.
ASMJIT_INLINE bool setChar(char c) noexcept {
return _opChar(kStringOpSet, c);
}
//! Replace the current content by `c` of `len`.
ASMJIT_INLINE bool setChars(char c, size_t len) noexcept {
return _opChars(kStringOpSet, c, len);
}
//! Replace the current content by formatted integer `i`.
ASMJIT_INLINE bool setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned);
}
//! Replace the current content by formatted integer `i`.
ASMJIT_INLINE bool setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags);
}
//! Replace the current content by the given `data` converted to a HEX string.
ASMJIT_INLINE bool setHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpSet, data, len);
}
// --------------------------------------------------------------------------
// [Append]
// --------------------------------------------------------------------------
//! Append `str` of `len`.
ASMJIT_INLINE bool appendString(const char* str, size_t len = kInvalidIndex) noexcept {
return _opString(kStringOpAppend, str, len);
}
//! Append a formatted string `fmt` to the current content.
ASMJIT_INLINE bool appendVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kStringOpAppend, fmt, ap);
}
//! Append a formatted string `fmt` to the current content.
ASMJIT_API bool appendFormat(const char* fmt, ...) noexcept;
//! Append `c` character.
ASMJIT_INLINE bool appendChar(char c) noexcept {
return _opChar(kStringOpAppend, c);
}
//! Append `c` of `len`.
ASMJIT_INLINE bool appendChars(char c, size_t len) noexcept {
return _opChars(kStringOpAppend, c, len);
}
//! Append `i`.
ASMJIT_INLINE bool appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, static_cast<uint64_t>(i), base, width, flags | kStringFormatSigned);
}
//! Append `i`.
ASMJIT_INLINE bool appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, i, base, width, flags);
}
//! Append the given `data` converted to a HEX string.
ASMJIT_INLINE bool appendHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpAppend, data, len);
}
// --------------------------------------------------------------------------
// [_Append]
// --------------------------------------------------------------------------
//! Append `str` of `len`, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendString(const char* str, size_t len = kInvalidIndex) noexcept {
// len should be a constant if we are inlining.
if (len == kInvalidIndex) {
char* p = &_data[_length];
while (*str) {
ASMJIT_ASSERT(p < _data + _capacity);
*p++ = *str++;
}
*p = '\0';
_length = (size_t)(p - _data);
}
else {
ASMJIT_ASSERT(_capacity - _length >= len);
char* p = &_data[_length];
char* pEnd = p + len;
while (p < pEnd)
*p++ = *str++;
*p = '\0';
_length += len;
}
}
//! Append `c` character, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendChar(char c) noexcept {
ASMJIT_ASSERT(_capacity - _length >= 1);
_data[_length] = c;
_length++;
_data[_length] = '\0';
}
//! Append `c` of `len`, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendChars(char c, size_t len) noexcept {
ASMJIT_ASSERT(_capacity - _length >= len);
char* p = &_data[_length];
char* pEnd = p + len;
while (p < pEnd)
*p++ = c;
*p = '\0';
_length += len;
}
ASMJIT_INLINE void _appendUInt32(uint32_t i) noexcept {
char buf_[32];
char* pEnd = buf_ + ASMJIT_ARRAY_SIZE(buf_);
char* pBuf = pEnd;
do {
uint32_t d = i / 10;
uint32_t r = i % 10;
*--pBuf = static_cast<uint8_t>(r + '0');
i = d;
} while (i);
ASMJIT_ASSERT(_capacity - _length >= (size_t)(pEnd - pBuf));
char* p = &_data[_length];
do {
*p++ = *pBuf;
} while (++pBuf != pEnd);
*p = '\0';
_length = (size_t)(p - _data);
}
// --------------------------------------------------------------------------
// [Eq]
// --------------------------------------------------------------------------
//! Check for equality with other `str` of `len`.
ASMJIT_API bool eq(const char* str, size_t len = kInvalidIndex) const noexcept;
//! Check for equality with `other`.
ASMJIT_INLINE bool eq(const StringBuilder& other) const noexcept { return eq(other._data); }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool operator==(const StringBuilder& other) const noexcept { return eq(other); }
ASMJIT_INLINE bool operator!=(const StringBuilder& other) const noexcept { return !eq(other); }
ASMJIT_INLINE bool operator==(const char* str) const noexcept { return eq(str); }
ASMJIT_INLINE bool operator!=(const char* str) const noexcept { return !eq(str); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! String data.
char* _data;
//! Length.
size_t _length;
//! Capacity.
size_t _capacity;
//! Whether the string can be freed.
size_t _canFree;
};
// ============================================================================
// [asmjit::StringBuilderTmp]
// ============================================================================
//! Temporary string builder, has statically allocated `N` bytes.
template<size_t N>
class StringBuilderTmp : public StringBuilder {
public:
ASMJIT_NO_COPY(StringBuilderTmp<N>)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE StringBuilderTmp() noexcept : StringBuilder(NoInit) {
_data = _embeddedData;
_data[0] = 0;
_length = 0;
_capacity = N;
_canFree = false;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Embedded data.
char _embeddedData[static_cast<size_t>(
N + 1 + sizeof(intptr_t)) & ~static_cast<size_t>(sizeof(intptr_t) - 1)];
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CONTAINERS_H

View File

@@ -31,32 +31,44 @@
#endif
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CpuInfo - Detect ARM & ARM64]
// [asmjit::CpuInfo - Detect ARM]
// ============================================================================
// ARM information has to be retrieved by the OS (this is how ARM was designed).
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
#if ASMJIT_ARCH_ARM64
static void armPopulateBaseline64Features(CpuInfo* cpuInfo) noexcept {
// Thumb (including all variations) is only supported on ARM32.
#if ASMJIT_ARCH_ARM32
static ASMJIT_INLINE void armPopulateBaselineA32Features(CpuInfo* cpuInfo) noexcept {
cpuInfo->_archInfo.init(ArchInfo::kTypeA32);
}
#endif // ASMJIT_ARCH_ARM32
// ARM64 is based on ARMv8 and newer.
#if ASMJIT_ARCH_ARM64
static ASMJIT_INLINE void armPopulateBaselineA64Features(CpuInfo* cpuInfo) noexcept {
cpuInfo->_archInfo.init(ArchInfo::kTypeA64);
// Thumb (including all variations) is supported on A64 (but not accessible from A64).
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB);
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2);
// A64 is based on ARMv8 and newer.
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
// ARM64 comes with these features by default.
cpuInfo->addFeature(CpuInfo::kArmFeatureDSP);
cpuInfo->addFeature(CpuInfo::kArmFeatureIDIV);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP4);
// A64 comes with these features by default.
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv3);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv4);
cpuInfo->addFeature(CpuInfo::kArmFeatureEDSP);
cpuInfo->addFeature(CpuInfo::kArmFeatureASIMD);
cpuInfo->addFeature(CpuInfo::kArmFeatureIDIVA);
cpuInfo->addFeature(CpuInfo::kArmFeatureIDIVT);
}
#endif // ASMJIT_ARCH_ARM64
@@ -66,39 +78,39 @@ static void armPopulateBaseline64Features(CpuInfo* cpuInfo) noexcept {
//! Detect ARM CPU features on Windows.
//!
//! The detection is based on `IsProcessorFeaturePresent()` API call.
static void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept {
static ASMJIT_INLINE void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_ARCH_ARM32
cpuInfo->setArch(kArchArm32);
armPopulateBaselineA32Features(cpuInfo);
// Windows for ARM requires at least ARMv7 with DSP extensions.
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
cpuInfo->addFeature(CpuInfo::kArmFeatureDSP);
cpuInfo->addFeature(CpuInfo::kArmFeatureEDSP);
// Windows for ARM requires VFP3.
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3);
// Windows for ARM requires VFPv3.
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv3);
// Windows for ARM requires and uses THUMB2.
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB);
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2);
#else
cpuInfo->setArch(kArchArm64);
armPopulateBaseline64Features(cpuInfo);
armPopulateBaselineA64Features(cpuInfo);
#endif
// Windows for ARM requires NEON.
cpuInfo->addFeature(CpuInfo::kArmFeatureNEON);
// Windows for ARM requires ASIMD.
cpuInfo->addFeature(CpuInfo::kArmFeatureASIMD);
// Detect additional CPU features by calling `IsProcessorFeaturePresent()`.
struct WinPFPMapping {
uint32_t pfpId, featureId;
uint32_t pfpId;
uint32_t featureId;
};
static const WinPFPMapping mapping[] = {
{ PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFP4 },
{ PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFPv4 },
{ PF_ARM_VFP_32_REGISTERS_AVAILABLE , CpuInfo::kArmFeatureVFP_D32 },
{ PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIV },
{ PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIVT },
{ PF_ARM_64BIT_LOADSTORE_ATOMIC , CpuInfo::kArmFeatureAtomics64 }
};
@@ -110,13 +122,13 @@ static void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_OS_LINUX
struct LinuxHWCapMapping {
uint32_t hwcapMask, featureId;
uint32_t hwcapMask;
uint32_t featureId;
};
static void armDetectHWCaps(CpuInfo* cpuInfo,
unsigned long type, const LinuxHWCapMapping* mapping, size_t length) noexcept {
static void armDetectHWCaps(CpuInfo* cpuInfo, unsigned long type, const LinuxHWCapMapping* mapping, size_t length) noexcept {
unsigned long mask = getauxval(type);
for (size_t i = 0; i < length; i++)
if ((mask & mapping[i].hwcapMask) == mapping[i].hwcapMask)
cpuInfo->addFeature(mapping[i].featureId);
@@ -127,41 +139,46 @@ static void armDetectHWCaps(CpuInfo* cpuInfo,
//! Detect ARM CPU features on Linux.
//!
//! The detection is based on `getauxval()`.
static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept {
ASMJIT_FAVOR_SIZE static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_ARCH_ARM32
cpuInfo->setArch(kArchArm32);
armPopulateBaselineA32Features(cpuInfo);
// `AT_HWCAP` provides ARMv7 (and less) related flags.
static const LinuxHWCapMapping hwCapMapping[] = {
{ /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFP3 },
{ /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFP4 },
{ /* HWCAP_IDIVA */ (3 << 17), CpuInfo::kArmFeatureIDIV },
{ /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 },
{ /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureNEON },
{ /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureDSP }
{ /* HWCAP_VFP */ (1 << 6), CpuInfo::kArmFeatureVFPv2 },
{ /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureEDSP },
{ /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureASIMD },
{ /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFPv3 },
{ /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFPv4 },
{ /* HWCAP_IDIVA */ (1 << 17), CpuInfo::kArmFeatureIDIVA },
{ /* HWCAP_IDIVT */ (1 << 18), CpuInfo::kArmFeatureIDIVT },
{ /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
// VFP3 implies VFP2.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3))
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
// VFPv3 implies VFPv2.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv3)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
}
// VFP2 implies ARMv6.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP2))
// VFPv2 implies ARMv6.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv2)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
}
// VFP3 or NEON implies ARMv7.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureNEON))
// VFPv3 or ASIMD implies ARMv7.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv3) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureASIMD)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
}
// `AT_HWCAP2` provides ARMv8 related flags.
// `AT_HWCAP2` provides ARMv8+ related flags.
static const LinuxHWCapMapping hwCap2Mapping[] = {
{ /* HWCAP2_AES */ (1 << 0), CpuInfo::kArmFeatureAES },
{ /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 },
{ /* HWCAP2_PMULL */ (1 << 1), CpuInfo::kArmFeaturePMULL },
{ /* HWCAP2_SHA1 */ (1 << 2), CpuInfo::kArmFeatureSHA1 },
{ /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 }
{ /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 },
{ /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP2, hwCap2Mapping, ASMJIT_ARRAY_SIZE(hwCap2Mapping));
@@ -173,17 +190,16 @@ static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept {
cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
}
#else
cpuInfo->setArch(kArchArm64);
armPopulateBaseline64Features(cpuInfo);
armPopulateBaselineA64Features(cpuInfo);
// `AT_HWCAP` provides ARMv8 related flags.
// `AT_HWCAP` provides ARMv8+ related flags.
static const LinuxHWCapMapping hwCapMapping[] = {
{ /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureNEON },
{ /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureASIMD },
{ /* HWCAP_AES */ (1 << 3), CpuInfo::kArmFeatureAES },
{ /* HWCAP_CRC32 */ (1 << 7), CpuInfo::kArmFeatureCRC32 },
{ /* HWCAP_PMULL */ (1 << 4), CpuInfo::kArmFeaturePMULL },
{ /* HWCAP_SHA1 */ (1 << 5), CpuInfo::kArmFeatureSHA1 },
{ /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 }
{ /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 },
{ /* HWCAP_ATOMICS */ (1 << 8), CpuInfo::kArmFeatureAtomics64 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
@@ -193,7 +209,7 @@ static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept {
}
#endif // ASMJIT_OS_LINUX
static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept {
ASMJIT_FAVOR_SIZE static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_OS_WINDOWS
armDetectCpuInfoOnWindows(cpuInfo);
#elif ASMJIT_OS_LINUX
@@ -205,7 +221,7 @@ static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept {
#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
// ============================================================================
// [asmjit::CpuInfo - Detect X86 & X64]
// [asmjit::CpuInfo - Detect X86]
// ============================================================================
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
@@ -228,7 +244,7 @@ struct XGetBVResult {
//! \internal
//!
//! HACK: VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However,
//! 64-bit calling convention specifies the first parameter to be passed in
//! 64-bit calling convention specifies the first parameter to be passed by
//! ECX, so we may be lucky if compiler doesn't move the register, otherwise
//! the result would be wrong.
static void ASMJIT_NOINLINE void x86CallCpuIdWorkaround(uint32_t inEcx, uint32_t inEax, CpuIdResult* result) noexcept {
@@ -291,7 +307,7 @@ static void ASMJIT_INLINE x86CallCpuId(CpuIdResult* result, uint32_t inEax, uint
//! \internal
//!
//! Wrapper to call `xgetbv` instruction.
static void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept {
static ASMJIT_INLINE void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept {
#if ASMJIT_CC_MSC_GE(16, 0, 40219) // 2010SP1+
uint64_t value = _xgetbv(inEcx);
result->eax = static_cast<uint32_t>(value & 0xFFFFFFFFU);
@@ -315,7 +331,7 @@ static void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept {
//! \internal
//!
//! Map a 12-byte vendor string returned by `cpuid` into a `CpuInfo::Vendor` ID.
static uint32_t x86GetCpuVendorID(const char* vendorString) noexcept {
static ASMJIT_INLINE uint32_t x86GetCpuVendorID(const char* vendorString) noexcept {
struct VendorData {
uint32_t id;
char text[12];
@@ -372,14 +388,13 @@ L_Skip:
d[0] = '\0';
}
static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
ASMJIT_FAVOR_SIZE static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
uint32_t i, maxId;
CpuIdResult regs;
XGetBVResult xcr0 = { 0, 0 };
// Architecture is known at compile-time.
cpuInfo->setArch(ASMJIT_ARCH_X86 ? kArchX86 : kArchX64);
cpuInfo->_archInfo.init(ArchInfo::kTypeHost);
// --------------------------------------------------------------------------
// [CPUID EAX=0x0]
@@ -443,13 +458,10 @@ static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
.addFeature(CpuInfo::kX86FeatureSSE2);
if (regs.edx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMT);
// AMD sets multi-threading ON if it has two or more cores.
if (cpuInfo->_hwThreadsCount == 1 && cpuInfo->_vendorId == CpuInfo::kVendorAMD && (regs.edx & 0x10000000U))
cpuInfo->_hwThreadsCount = 2;
// Get the content of XCR0 if supported by CPU and enabled by OS.
if ((regs.ecx & 0x0C000000U) == 0x0C000000U)
if ((regs.ecx & 0x0C000000U) == 0x0C000000U) {
x86CallXGetBV(&xcr0, 0);
}
// Detect AVX+.
if (regs.ecx & 0x10000000U) {
@@ -492,8 +504,9 @@ static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHWT1);
// Detect AVX2.
if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX))
if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) {
if (regs.ebx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX2);
}
// Detect AVX-512+.
if (regs.ebx & 0x00010000U) {
@@ -502,16 +515,19 @@ static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
// - XCR0[7:5] == 111b
// Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 need to be enabled by the OS.
if ((xcr0.eax & 0x000000E6U) == 0x000000E6U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512F);
cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_F);
if (regs.ebx & 0x00020000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512DQ);
if (regs.ebx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512IFMA);
if (regs.ebx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512PF);
if (regs.ebx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512ER);
if (regs.ebx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512CD);
if (regs.ebx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512BW);
if (regs.ebx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512VL);
if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512VBMI);
if (regs.ebx & 0x00020000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_DQ);
if (regs.ebx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_IFMA);
if (regs.ebx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_PFI);
if (regs.ebx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_ERI);
if (regs.ebx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_CDI);
if (regs.ebx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_BW);
if (regs.ebx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VL);
if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VBMI);
if (regs.ecx & 0x00004000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VPOPCNTDQ);
if (regs.edx & 0x00000004U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_4VNNIW);
if (regs.edx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_4FMAPS);
}
}
}
@@ -533,6 +549,9 @@ static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
// [CPUID EAX=0x80000000...maxId]
// --------------------------------------------------------------------------
// The highest EAX that we understand.
uint32_t kHighestProcessedEAX = 0x80000008U;
// Several CPUID calls are required to get the whole branc string. It's easy
// to copy one DWORD at a time instead of performing a byte copy.
uint32_t* brand = reinterpret_cast<uint32_t*>(cpuInfo->_brandString);
@@ -542,7 +561,7 @@ static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
x86CallCpuId(&regs, i);
switch (i) {
case 0x80000000U:
maxId = Utils::iMin<uint32_t>(regs.eax, 0x80000004);
maxId = std::min<uint32_t>(regs.eax, kHighestProcessedEAX);
break;
case 0x80000001U:
@@ -573,14 +592,16 @@ static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
*brand++ = regs.ebx;
*brand++ = regs.ecx;
*brand++ = regs.edx;
// Go directly to the last one.
if (i == 0x80000004U) i = 0x80000008U - 1;
break;
default:
// Stop the loop, additional features can be detected in the future.
i = maxId;
case 0x80000008U:
if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLZERO);
break;
}
} while (i++ < maxId);
} while (++i <= maxId);
// Simplify CPU brand string by removing unnecessary spaces.
x86SimplifyBrandString(cpuInfo->_brandString);
@@ -591,7 +612,7 @@ static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
// [asmjit::CpuInfo - Detect - HWThreadsCount]
// ============================================================================
static uint32_t cpuDetectHWThreadsCount() noexcept {
static ASMJIT_INLINE uint32_t cpuDetectHWThreadsCount() noexcept {
#if ASMJIT_OS_WINDOWS
SYSTEM_INFO info;
::GetSystemInfo(&info);
@@ -609,12 +630,9 @@ static uint32_t cpuDetectHWThreadsCount() noexcept {
// [asmjit::CpuInfo - Detect]
// ============================================================================
void CpuInfo::detect() noexcept {
ASMJIT_FAVOR_SIZE void CpuInfo::detect() noexcept {
reset();
// Detect the number of hardware threads available.
_hwThreadsCount = cpuDetectHWThreadsCount();
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
armDetectCpuInfo(this);
#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
@@ -622,6 +640,8 @@ void CpuInfo::detect() noexcept {
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
x86DetectCpuInfo(this);
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
_hwThreadsCount = cpuDetectHWThreadsCount();
}
// ============================================================================
@@ -640,4 +660,4 @@ const CpuInfo& CpuInfo::getHost() noexcept {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

View File

@@ -1,4 +1,4 @@
// [AsmJit]
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
@@ -9,10 +9,10 @@
#define _ASMJIT_BASE_CPUINFO_H
// [Dependencies]
#include "../base/globals.h"
#include "../base/arch.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
@@ -25,11 +25,7 @@ namespace asmjit {
//! CPU information.
class CpuInfo {
public:
// --------------------------------------------------------------------------
// [Vendor]
// --------------------------------------------------------------------------
public:
//! CPU vendor ID.
ASMJIT_ENUM(Vendor) {
kVendorNone = 0, //!< Generic or unknown.
@@ -38,38 +34,31 @@ class CpuInfo {
kVendorVIA = 3 //!< VIA vendor.
};
// --------------------------------------------------------------------------
// [ArmFeatures]
// --------------------------------------------------------------------------
//! ARM/ARM64 CPU features.
ASMJIT_ENUM(ArmFeatures) {
kArmFeatureV6, //!< ARMv6 instruction set.
kArmFeatureV7, //!< ARMv7 instruction set.
kArmFeatureV8, //!< ARMv8 instruction set.
kArmFeatureTHUMB, //!< CPU provides THUMB v1 instruction set (ARM only).
kArmFeatureTHUMB2, //!< CPU provides THUMB v2 instruction set (ARM only).
kArmFeatureVFP2, //!< CPU provides VFPv2 instruction set.
kArmFeatureVFP3, //!< CPU provides VFPv3 instruction set.
kArmFeatureVFP4, //!< CPU provides VFPv4 instruction set.
kArmFeatureTHUMB, //!< CPU provides THUMB v1 instruction set (THUMB mode).
kArmFeatureTHUMB2, //!< CPU provides THUMB v2 instruction set (THUMB mode).
kArmFeatureVFPv2, //!< CPU provides VFPv2 instruction set.
kArmFeatureVFPv3, //!< CPU provides VFPv3 instruction set.
kArmFeatureVFPv4, //!< CPU provides VFPv4 instruction set.
kArmFeatureVFP_D32, //!< CPU provides 32 VFP-D (64-bit) registers.
kArmFeatureNEON, //!< CPU provides NEON instruction set.
kArmFeatureDSP, //!< CPU provides DSP extensions.
kArmFeatureIDIV, //!< CPU provides hardware support for SDIV and UDIV.
kArmFeatureEDSP, //!< CPU provides EDSP extensions.
kArmFeatureASIMD, //!< CPU provides 'Advanced SIMD'.
kArmFeatureIDIVA, //!< CPU provides hardware SDIV and UDIV (ARM mode).
kArmFeatureIDIVT, //!< CPU provides hardware SDIV and UDIV (THUMB mode).
kArmFeatureAES, //!< CPU provides AES instructions (ARM64 only).
kArmFeatureCRC32, //!< CPU provides CRC32 instructions (ARM64 only).
kArmFeatureCRC32, //!< CPU provides CRC32 instructions.
kArmFeaturePMULL, //!< CPU provides PMULL instructions (ARM64 only).
kArmFeatureSHA1, //!< CPU provides SHA1 instructions (ARM64 only).
kArmFeatureSHA256, //!< CPU provides SHA256 instructions (ARM64 only).
kArmFeatureSHA1, //!< CPU provides SHA1 instructions.
kArmFeatureSHA256, //!< CPU provides SHA256 instructions.
kArmFeatureAtomics64, //!< CPU provides 64-bit load/store atomics (ARM64 only).
kArmFeaturesCount //!< Count of ARM/ARM64 CPU features.
};
// --------------------------------------------------------------------------
// [X86Features]
// --------------------------------------------------------------------------
//! X86/X64 CPU features.
ASMJIT_ENUM(X86Features) {
kX86FeatureNX = 0, //!< CPU has Not-Execute-Bit.
@@ -82,6 +71,7 @@ class CpuInfo {
kX86FeatureCLFLUSH, //!< CPU has CLFUSH.
kX86FeatureCLFLUSH_OPT, //!< CPU has CLFUSH (optimized).
kX86FeatureCLWB, //!< CPU has CLWB.
kX86FeatureCLZERO, //!< CPU has CLZERO.
kX86FeaturePCOMMIT, //!< CPU has PCOMMIT.
kX86FeaturePREFETCH, //!< CPU has PREFETCH.
kX86FeaturePREFETCHWT1, //!< CPU has PREFETCHWT1.
@@ -90,8 +80,8 @@ class CpuInfo {
kX86FeatureFXSR_OPT, //!< CPU has FXSAVE/FXRSTOR (optimized).
kX86FeatureMMX, //!< CPU has MMX.
kX86FeatureMMX2, //!< CPU has extended MMX.
kX86Feature3DNOW, //!< CPU has 3dNow!
kX86Feature3DNOW2, //!< CPU has enhanced 3dNow!
kX86Feature3DNOW, //!< CPU has 3DNOW!
kX86Feature3DNOW2, //!< CPU has enhanced 3DNOW!
kX86FeatureSSE, //!< CPU has SSE.
kX86FeatureSSE2, //!< CPU has SSE2.
kX86FeatureSSE3, //!< CPU has SSE3.
@@ -128,23 +118,22 @@ class CpuInfo {
kX86FeatureRTM, //!< CPU has RTM.
kX86FeatureERMS, //!< CPU has ERMS (enhanced REP MOVSB/STOSB).
kX86FeatureFSGSBASE, //!< CPU has FSGSBASE.
kX86FeatureAVX512F, //!< CPU has AVX-512F (foundation).
kX86FeatureAVX512CD, //!< CPU has AVX-512CD (conflict detection).
kX86FeatureAVX512PF, //!< CPU has AVX-512PF (prefetch instructions).
kX86FeatureAVX512ER, //!< CPU has AVX-512ER (exponential and reciprocal instructions).
kX86FeatureAVX512DQ, //!< CPU has AVX-512DQ (DWORD/QWORD).
kX86FeatureAVX512BW, //!< CPU has AVX-512BW (BYTE/WORD).
kX86FeatureAVX512VL, //!< CPU has AVX VL (vector length extensions).
kX86FeatureAVX512IFMA, //!< CPU has AVX IFMA (integer fused multiply add using 52-bit precision).
kX86FeatureAVX512VBMI, //!< CPU has AVX VBMI (vector byte manipulation instructions).
kX86FeatureAVX512_F, //!< CPU has AVX512-F (foundation).
kX86FeatureAVX512_CDI, //!< CPU has AVX512-CDI (conflict detection).
kX86FeatureAVX512_PFI, //!< CPU has AVX512-PFI (prefetch instructions).
kX86FeatureAVX512_ERI, //!< CPU has AVX512-ERI (exponential and reciprocal).
kX86FeatureAVX512_DQ, //!< CPU has AVX512-DQ (DWORD/QWORD).
kX86FeatureAVX512_BW, //!< CPU has AVX512-BW (BYTE/WORD).
kX86FeatureAVX512_VL, //!< CPU has AVX512-VL (vector length extensions).
kX86FeatureAVX512_IFMA, //!< CPU has AVX512-IFMA (integer fused-multiply-add using 52-bit precision).
kX86FeatureAVX512_VBMI, //!< CPU has AVX512-VBMI (vector byte manipulation).
kX86FeatureAVX512_VPOPCNTDQ, //!< CPU has AVX512-VPOPCNTDQ (VPOPCNT[D|Q] instructions).
kX86FeatureAVX512_4VNNIW, //!< CPU has AVX512-VNNIW (vector NN instructions word variable precision).
kX86FeatureAVX512_4FMAPS, //!< CPU has AVX512-FMAPS (FMA packed single).
kX86FeaturesCount //!< Count of X86/X64 CPU features.
};
// --------------------------------------------------------------------------
// [Other]
// --------------------------------------------------------------------------
//! \internal
enum {
kFeaturesPerUInt32 = static_cast<int>(sizeof(uint32_t)) * 8
@@ -175,9 +164,14 @@ class CpuInfo {
ASMJIT_INLINE CpuInfo() noexcept { reset(); }
// --------------------------------------------------------------------------
// [Reset]
// [Init / Reset]
// --------------------------------------------------------------------------
//! Initialize CpuInfo to the given architecture, see \ArchInfo.
ASMJIT_INLINE void initArch(uint32_t archType, uint32_t archMode = 0) noexcept {
_archInfo.init(archType, archMode);
}
ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(CpuInfo)); }
// --------------------------------------------------------------------------
@@ -190,10 +184,12 @@ class CpuInfo {
// [Accessors]
// --------------------------------------------------------------------------
//! Get CPU architecture, see \Arch.
ASMJIT_INLINE uint32_t getArch() const noexcept { return _arch; }
//! Set CPU architecture, see \Arch.
ASMJIT_INLINE void setArch(uint32_t arch) noexcept { _arch = static_cast<uint8_t>(arch); }
//! Get generic architecture information.
ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _archInfo; }
//! Get CPU architecture type, see \ArchInfo::Type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archInfo.getType(); }
//! Get CPU architecture sub-type, see \ArchInfo::SubType.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _archInfo.getSubType(); }
//! Get CPU vendor string.
ASMJIT_INLINE const char* getVendorString() const noexcept { return _vendorString; }
@@ -268,35 +264,21 @@ class CpuInfo {
// --------------------------------------------------------------------------
//! Get the host CPU information.
static ASMJIT_API const CpuInfo& getHost() noexcept;
ASMJIT_API static const CpuInfo& getHost() noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! CPU vendor string.
char _vendorString[16];
//! CPU brand string.
char _brandString[64];
//! CPU architecture, see \ref Arch.
uint8_t _arch;
//! \internal
uint8_t _reserved[3];
//! CPU vendor id, see \ref CpuVendor.
uint32_t _vendorId;
//! CPU family ID.
uint32_t _family;
//! CPU model ID.
uint32_t _model;
//! CPU stepping.
uint32_t _stepping;
//! Number of hardware threads.
uint32_t _hwThreadsCount;
//! CPU features (bit-array).
uint32_t _features[8];
ArchInfo _archInfo; //!< CPU architecture information.
char _vendorString[16]; //!< CPU vendor string.
char _brandString[64]; //!< CPU brand string.
uint32_t _vendorId; //!< CPU vendor id, see \ref Vendor.
uint32_t _family; //!< CPU family ID.
uint32_t _model; //!< CPU model ID.
uint32_t _stepping; //!< CPU stepping.
uint32_t _hwThreadsCount; //!< Number of hardware threads.
uint32_t _features[8]; //!< CPU features (bit-array).
// Architecture specific data.
union {
@@ -310,7 +292,7 @@ class CpuInfo {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CPUINFO_H

186
src/asmjit/base/func.cpp Normal file
View File

@@ -0,0 +1,186 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/arch.h"
#include "../base/func.h"
#if defined(ASMJIT_BUILD_X86)
#include "../x86/x86internal_p.h"
#include "../x86/x86operand.h"
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
#include "../arm/arminternal_p.h"
#include "../arm/armoperand.h"
#endif // ASMJIT_BUILD_ARM
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CallConv - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId) noexcept {
reset();
#if defined(ASMJIT_BUILD_X86)
if (CallConv::isX86Family(ccId))
return X86Internal::initCallConv(*this, ccId);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (CallConv::isArmFamily(ccId))
return ArmInternal::initCallConv(*this, ccId);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncDetail - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& sign) {
uint32_t ccId = sign.getCallConv();
CallConv& cc = _callConv;
uint32_t argCount = sign.getArgCount();
if (ASMJIT_UNLIKELY(argCount > kFuncArgCount))
return DebugUtils::errored(kErrorInvalidArgument);
ASMJIT_PROPAGATE(cc.init(ccId));
uint32_t gpSize = (cc.getArchType() == ArchInfo::kTypeX86) ? 4 : 8;
uint32_t deabstractDelta = TypeId::deabstractDeltaOfSize(gpSize);
const uint8_t* args = sign.getArgs();
for (uint32_t i = 0; i < static_cast<int32_t>(argCount); i++) {
Value& arg = _args[i];
arg.initTypeId(TypeId::deabstract(args[i], deabstractDelta));
}
_argCount = static_cast<uint8_t>(argCount);
uint32_t ret = sign.getRet();
if (ret != TypeId::kVoid) {
_rets[0].initTypeId(TypeId::deabstract(ret, deabstractDelta));
_retCount = 1;
}
#if defined(ASMJIT_BUILD_X86)
if (CallConv::isX86Family(ccId))
return X86Internal::initFuncDetail(*this, sign, gpSize);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (CallConv::isArmFamily(ccId))
return ArmInternal::initFuncDetail(*this, sign, gpSize);
#endif // ASMJIT_BUILD_ARM
// We should never bubble here as if `cc.init()` succeeded then there has to
// be an implementation for the current architecture. However, stay safe.
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncFrameLayout - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncFrameLayout::init(const FuncDetail& func, const FuncFrameInfo& ffi) noexcept {
uint32_t ccId = func.getCallConv().getId();
#if defined(ASMJIT_BUILD_X86)
if (CallConv::isX86Family(ccId))
return X86Internal::initFrameLayout(*this, func, ffi);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (CallConv::isArmFamily(ccId))
return ArmInternal::initFrameLayout(*this, func, ffi);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncArgsMapper]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncArgsMapper::updateFrameInfo(FuncFrameInfo& ffi) const noexcept {
const FuncDetail* func = getFuncDetail();
if (!func) return DebugUtils::errored(kErrorInvalidState);
uint32_t ccId = func->getCallConv().getId();
#if defined(ASMJIT_BUILD_X86)
if (CallConv::isX86Family(ccId))
return X86Internal::argsToFrameInfo(*this, ffi);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (CallConv::isArmFamily(ccId))
return ArmInternal::argsToFrameInfo(*this, ffi);
#endif // ASMJIT_BUILD_X86
return DebugUtils::errored(kErrorInvalidArch);
}
// ============================================================================
// [asmjit::FuncUtils]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncUtils::emitProlog(CodeEmitter* emitter, const FuncFrameLayout& layout) {
#if defined(ASMJIT_BUILD_X86)
if (emitter->getArchInfo().isX86Family())
return X86Internal::emitProlog(static_cast<X86Emitter*>(emitter), layout);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (emitter->getArchInfo().isArmFamily())
return ArmInternal::emitProlog(static_cast<ArmEmitter*>(emitter), layout);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArch);
}
ASMJIT_FAVOR_SIZE Error FuncUtils::emitEpilog(CodeEmitter* emitter, const FuncFrameLayout& layout) {
#if defined(ASMJIT_BUILD_X86)
if (emitter->getArchInfo().isX86Family())
return X86Internal::emitEpilog(static_cast<X86Emitter*>(emitter), layout);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (emitter->getArchInfo().isArmFamily())
return ArmInternal::emitEpilog(static_cast<ArmEmitter*>(emitter), layout);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArch);
}
ASMJIT_FAVOR_SIZE Error FuncUtils::allocArgs(CodeEmitter* emitter, const FuncFrameLayout& layout, const FuncArgsMapper& args) {
#if defined(ASMJIT_BUILD_X86)
if (emitter->getArchInfo().isX86Family())
return X86Internal::allocArgs(static_cast<X86Emitter*>(emitter), layout, args);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (emitter->getArchInfo().isArmFamily())
return ArmInternal::allocArgs(static_cast<ArmEmitter*>(emitter), layout, args);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArch);
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

1256
src/asmjit/base/func.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -9,9 +9,10 @@
// [Dependencies]
#include "../base/globals.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
@@ -20,7 +21,7 @@ namespace asmjit {
// ============================================================================
#if !defined(ASMJIT_DISABLE_TEXT)
static const char errorMessages[] = {
static const char errorMessages[] =
"Ok\0"
"No heap memory\0"
"No virtual memory\0"
@@ -28,45 +29,60 @@ static const char errorMessages[] = {
"Invalid state\0"
"Invalid architecture\0"
"Not initialized\0"
"Already initialized\0"
"Feature not enabled\0"
"Slot occupied\0"
"No code generated\0"
"Code too large\0"
"Invalid label\0"
"Label index overflow\0"
"Label already bound\0"
"Unknown instruction\0"
"Illegal instruction\0"
"Illegal addressing\0"
"Illegal displacement\0"
"Overlapped arguments\0"
"Unknown error\0"
};
static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) noexcept {
uint32_t i = 0;
if (id > maxId)
id = maxId;
while (i < id) {
while (p[0])
p++;
p++;
i++;
}
return p;
}
"Label already defined\0"
"Label name too long\0"
"Invalid label name\0"
"Invalid parent label\0"
"Non-local label can't have parent\0"
"Relocation index overflow\0"
"Invalid relocation entry\0"
"Invalid instruction\0"
"Invalid register type\0"
"Invalid register kind\0"
"Invalid register's physical id\0"
"Invalid register's virtual id\0"
"Invalid rex prefix\0"
"Invalid mask, expected {k}\0"
"Invalid use of {k}\0"
"Invalid use of {k}{z}\0"
"Invalid broadcast {1tox}\0"
"Invalid {sae} or {rc} option\0"
"Invalid address\0"
"Invalid address index\0"
"Invalid address scale\0"
"Invalid use of 64-bit address\0"
"Invalid displacement\0"
"Invalid segment\0"
"Operand size mismatch\0"
"Ambiguous operand size\0"
"Invalid type-info\0"
"Invalid use of a low 8-bit GPB register\0"
"Invalid use of a 64-bit GPQ register in 32-bit mode\0"
"Invalid use of an 80-bit float\0"
"No more physical registers\0"
"Overlapped registers\0"
"Overlapping register and arguments base-address register\0"
"Unknown error\0";
#endif // ASMJIT_DISABLE_TEXT
const char* DebugUtils::errorAsString(Error err) noexcept {
ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#if !defined(ASMJIT_DISABLE_TEXT)
return findPackedString(errorMessages, err, kErrorCount);
return Utils::findPackedString(errorMessages, std::min<Error>(err, kErrorCount));
#else
static const char noMessage[] = "";
return noMessage;
#endif
}
void DebugUtils::debugOutput(const char* str) noexcept {
ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#if ASMJIT_OS_WINDOWS
::OutputDebugStringA(str);
#else
@@ -74,7 +90,7 @@ void DebugUtils::debugOutput(const char* str) noexcept {
#endif
}
void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
char str[1024];
snprintf(str, 1024,
@@ -91,4 +107,4 @@ void DebugUtils::assertionFailed(const char* file, int line, const char* msg) no
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

View File

@@ -9,10 +9,10 @@
#define _ASMJIT_BASE_GLOBALS_H
// [Dependencies]
#include "../build.h"
#include "../asmjit_build.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
@@ -20,25 +20,13 @@ namespace asmjit {
//! \{
// ============================================================================
// [asmjit::TypeDefs]
// [asmjit::Globals]
// ============================================================================
//! AsmJit error core (unsigned integer).
typedef uint32_t Error;
enum { kInvalidValue = 0xFFFFFFFFU };
//! 64-bit unsigned pointer, compatible with JIT and non-JIT generators.
//!
//! This is the preferred pointer type to use with AsmJit library. It has a
//! capability to hold any pointer for any architecture making it an ideal
//! candidate for a cross-platform code generator.
typedef uint64_t Ptr;
//! like \ref Ptr, but signed.
typedef int64_t SignedPtr;
// ============================================================================
// [asmjit::GlobalDefs]
// ============================================================================
//! AsmJit globals.
namespace Globals {
//! Invalid index
//!
@@ -48,432 +36,69 @@ typedef int64_t SignedPtr;
static const size_t kInvalidIndex = ~static_cast<size_t>(0);
//! Invalid base address.
static const Ptr kNoBaseAddress = static_cast<Ptr>(static_cast<SignedPtr>(-1));
static const uint64_t kNoBaseAddress = ~static_cast<uint64_t>(0);
//! Global constants.
ASMJIT_ENUM(GlobalDefs) {
//! Invalid value or operand id.
kInvalidValue = 0xFFFFFFFF,
//! Invalid register index.
kInvalidReg = 0xFF,
//! Invalid variable type.
kInvalidVar = 0xFF,
//! Global definitions.
ASMJIT_ENUM(Defs) {
//! Invalid instruction id.
kInvalidInstId = 0,
//! Invalid register id.
kInvalidRegId = 0xFF,
//! Host memory allocator overhead.
//!
//! The overhead is decremented from all zone allocators so the operating
//! system doesn't have to allocate one extra virtual page to keep tract of
//! the requested memory block.
//!
//! The number is actually a guess.
kMemAllocOverhead = sizeof(intptr_t) * 4,
//! Memory grow threshold.
//!
//! After the grow threshold is reached the capacity won't be doubled
//! anymore.
kMemAllocGrowMax = 8192 * 1024
kAllocOverhead = static_cast<int>(sizeof(intptr_t) * 4),
//! Aggressive growing strategy threshold.
kAllocThreshold = 8192 * 1024
};
// ============================================================================
// [asmjit::ArchId]
// ============================================================================
ASMJIT_ENUM(Limits) {
//! Count of register kinds that are important to Function API and CodeCompiler.
//! The target architecture can define more register kinds for special registers,
//! but these will never map to virtual registers and will never be used to pass
//! and return function arguments and function return values, respectively.
kMaxVRegKinds = 4,
//! CPU architecture identifier.
ASMJIT_ENUM(ArchId) {
//! No/Unknown architecture.
kArchNone = 0,
//! Maximum number of physical registers of all kinds of all supported
//! architectures. This is only important for \ref CodeCompiler and its
//! \ref RAPass (register allocator pass).
//!
//! NOTE: The distribution of these registers is architecture specific.
kMaxPhysRegs = 64,
//! X86 architecture (32-bit).
kArchX86 = 1,
//! X64 architecture (64-bit), also called AMD64.
kArchX64 = 2,
//! X32 architecture (64-bit with 32-bit pointers) (NOT USED ATM).
kArchX32 = 3,
//! Maximum alignment.
kMaxAlignment = 64,
//! Arm architecture (32-bit).
kArchArm32 = 4,
//! Arm64 architecture (64-bit).
kArchArm64 = 5,
#if ASMJIT_ARCH_X86
kArchHost = kArchX86
#elif ASMJIT_ARCH_X64
kArchHost = kArchX64
#elif ASMJIT_ARCH_ARM32
kArchHost = kArchArm32
#elif ASMJIT_ARCH_ARM64
kArchHost = kArchArm64
#else
# error "[asmjit] Unsupported host architecture."
#endif
//! Maximum label or symbol length in bytes (take into consideration that a
//! single UTF-8 character can take more than single byte to encode it).
kMaxLabelLength = 2048
};
} // Globals namespace
// ============================================================================
// [asmjit::CallConv]
// [asmjit::AnyInst]
// ============================================================================
//! Function calling convention.
//!
//! Calling convention is a scheme that defines how function arguments are
//! passed and how the return value handled. In assembler programming it's
//! always needed to comply with function calling conventions, because even
//! small inconsistency can cause undefined behavior or application's crash.
//!
//! Platform Independent Conventions
//! --------------------------------
//!
//! - `kCallConvHost` - Should match the current C++ compiler native calling
//! convention.
//!
//! X86/X64 Specific Conventions
//! ----------------------------
//!
//! List of calling conventions for 32-bit x86 mode:
//! - `kCallConvX86CDecl` - Calling convention for C runtime.
//! - `kCallConvX86StdCall` - Calling convention for WinAPI functions.
//! - `kCallConvX86MsThisCall` - Calling convention for C++ members under
//! Windows (produced by MSVC and all MSVC compatible compilers).
//! - `kCallConvX86MsFastCall` - Fastest calling convention that can be used
//! by MSVC compiler.
//! - `kCallConvX86BorlandFastCall` - Borland fastcall convention.
//! - `kCallConvX86GccFastCall` - GCC fastcall convention (2 register arguments).
//! - `kCallConvX86GccRegParm1` - GCC regparm(1) convention.
//! - `kCallConvX86GccRegParm2` - GCC regparm(2) convention.
//! - `kCallConvX86GccRegParm3` - GCC regparm(3) convention.
//!
//! List of calling conventions for 64-bit x86 mode (x64):
//! - `kCallConvX64Win` - Windows 64-bit calling convention (WIN64 ABI).
//! - `kCallConvX64Unix` - Unix 64-bit calling convention (AMD64 ABI).
//!
//! ARM Specific Conventions
//! ------------------------
//!
//! List of ARM calling conventions:
//! - `kCallConvArm32SoftFP` - Legacy calling convention, floating point
//! arguments are passed via GP registers.
//! - `kCallConvArm32HardFP` - Modern calling convention, uses VFP registers
//! to pass floating point arguments.
ASMJIT_ENUM(CallConv) {
//! Calling convention is invalid (can't be used).
kCallConvNone = 0,
//! Definitions and utilities related to instructions used by all architectures.
namespace AnyInst {
// --------------------------------------------------------------------------
// [X86]
// --------------------------------------------------------------------------
//! X86 `__cdecl` calling convention (used by C runtime and libraries).
//!
//! Compatible across MSVC and GCC.
//!
//! Arguments direction:
//! - Right to left.
//!
//! Stack is cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86CDecl = 1,
//! X86 `__stdcall` calling convention (used mostly by WinAPI).
//!
//! Compatible across MSVC and GCC.
//!
//! Arguments direction:
//! - Right to left.
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86StdCall = 2,
//! X86 `__thiscall` calling convention (MSVC/Intel specific).
//!
//! This is MSVC (and Intel) specific calling convention used when targeting
//! Windows platform for C++ class methods. Implicit `this` pointer (defined
//! as the first argument) is stored in `ecx` register instead of storing it
//! on the stack.
//!
//! This calling convention is implicitly used by MSVC for class functions.
//!
//! C++ class functions that have variable number of arguments use `__cdecl`
//! calling convention instead.
//!
//! Arguments direction:
//! - Right to left (except for the first argument passed in `ecx`).
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86MsThisCall = 3,
//! X86 `__fastcall` convention (MSVC/Intel specific).
//!
//! The first two arguments (evaluated from the left to the right) are passed
//! in `ecx` and `edx` registers, all others on the stack from the right to
//! the left.
//!
//! Arguments direction:
//! - Right to left (except for the first two integers passed in `ecx` and `edx`).
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
//!
//! NOTE: This calling convention differs from GCC's one.
kCallConvX86MsFastCall = 4,
//! X86 `__fastcall` convention (Borland specific).
//!
//! The first two arguments (evaluated from the left to the right) are passed
//! in `ecx` and `edx` registers, all others on the stack from the left to
//! the right.
//!
//! Arguments direction:
//! - Left to right (except for the first two integers passed in `ecx` and `edx`).
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
//!
//! NOTE: Arguments on the stack are in passed in left to right order, which
//! is really Borland specific, all other `__fastcall` calling conventions
//! use right to left order.
kCallConvX86BorlandFastCall = 5,
//! X86 `__fastcall` convention (GCC specific).
//!
//! The first two arguments (evaluated from the left to the right) are passed
//! in `ecx` and `edx` registers, all others on the stack from the right to
//! the left.
//!
//! Arguments direction:
//! - Right to left (except for the first two integers passed in `ecx` and `edx`).
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
//!
//! NOTE: This calling convention should be compatible with `kCallConvX86MsFastCall`.
kCallConvX86GccFastCall = 6,
//! X86 `regparm(1)` convention (GCC specific).
//!
//! The first argument (evaluated from the left to the right) is passed in
//! `eax` register, all others on the stack from the right to the left.
//!
//! Arguments direction:
//! - Right to left (except for the first integer passed in `eax`).
//!
//! Stack is cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86GccRegParm1 = 7,
//! X86 `regparm(2)` convention (GCC specific).
//!
//! The first two arguments (evaluated from the left to the right) are passed
//! in `ecx` and `edx` registers, all others on the stack from the right to
//! the left.
//!
//! Arguments direction:
//! - Right to left (except for the first two integers passed in `ecx` and `edx`).
//!
//! Stack is cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86GccRegParm2 = 8,
//! X86 `regparm(3)` convention (GCC specific).
//!
//! Three first parameters (evaluated from left-to-right) are in
//! EAX:EDX:ECX registers, all others on the stack in right-to-left direction.
//!
//! Arguments direction:
//! - Right to left (except for the first three integers passed in `ecx`,
//! `edx`, and `ecx`).
//!
//! Stack is cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86GccRegParm3 = 9,
// --------------------------------------------------------------------------
// [X64]
// --------------------------------------------------------------------------
//! X64 calling convention used by Windows platform (WIN64-ABI).
//!
//! The first 4 arguments are passed in the following registers:
//! - 1. 32/64-bit integer in `rcx` and floating point argument in `xmm0`
//! - 2. 32/64-bit integer in `rdx` and floating point argument in `xmm1`
//! - 3. 32/64-bit integer in `r8` and floating point argument in `xmm2`
//! - 4. 32/64-bit integer in `r9` and floating point argument in `xmm3`
//!
//! If one or more argument from the first four doesn't match the list above
//! it is simply skipped. WIN64-ABI is very specific about this.
//!
//! All other arguments are pushed on the stack from the right to the left.
//! Stack has to be aligned by 16 bytes, always. There is also a 32-byte
//! shadow space on the stack that can be used to save up to four 64-bit
//! registers.
//!
//! Arguments direction:
//! - Right to left (except for all parameters passed in registers).
//!
//! Stack cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `rax`.
//! - Floating point - `xmm0`.
//!
//! Stack is always aligned to 16 bytes.
//!
//! More information about this calling convention can be found on MSDN
//! <http://msdn.microsoft.com/en-us/library/9b372w95.aspx>.
kCallConvX64Win = 10,
//! X64 calling convention used by Unix platforms (AMD64-ABI).
//!
//! First six 32 or 64-bit integer arguments are passed in `rdi`, `rsi`,
//! `rdx`, `rcx`, `r8`, and `r9` registers. First eight floating point or xmm
//! arguments are passed in `xmm0`, `xmm1`, `xmm2`, `xmm3`, `xmm4`, `xmm5`,
//! `xmm6`, and `xmm7` registers.
//!
//! There is also a red zene below the stack pointer that can be used by the
//! function. The red zone is typically from [rsp-128] to [rsp-8], however,
//! red zone can also be disabled.
//!
//! Arguments direction:
//! - Right to left (except for all arguments passed in registers).
//!
//! Stack cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `rax`.
//! - Floating point - `xmm0`.
//!
//! Stack is always aligned to 16 bytes.
kCallConvX64Unix = 11,
// --------------------------------------------------------------------------
// [ARM]
// --------------------------------------------------------------------------
kCallConvArm32SoftFP = 16,
kCallConvArm32HardFP = 17,
// --------------------------------------------------------------------------
// [Internal]
// --------------------------------------------------------------------------
//! \internal
_kCallConvX86Start = 1,
//! \internal
_kCallConvX86End = 9,
//! \internal
_kCallConvX64Start = 10,
//! \internal
_kCallConvX64End = 11,
//! \internal
_kCallConvArmStart = 16,
//! \internal
_kCallConvArmEnd = 17,
// --------------------------------------------------------------------------
// [Host]
// --------------------------------------------------------------------------
#if defined(ASMJIT_DOCGEN)
//! Default calling convention based on the current compiler's settings.
//!
//! NOTE: This should be always the same as `kCallConvHostCDecl`, but some
//! compilers allow to override the default calling convention. Overriding
//! is not detected at the moment.
kCallConvHost = DETECTED_AT_COMPILE_TIME,
//! Default C calling convention based on the current compiler's settings.
kCallConvHostCDecl = DETECTED_AT_COMPILE_TIME,
//! Compatibility for `__stdcall` calling convention.
//!
//! NOTE: This enumeration is always set to a value which is compatible with
//! the current compiler's `__stdcall` calling convention. In 64-bit mode
//! there is no such convention and the value is mapped to `kCallConvX64Win`
//! or `kCallConvX64Unix`, depending on the host architecture.
kCallConvHostStdCall = DETECTED_AT_COMPILE_TIME,
//! Compatibility for `__fastcall` calling convention.
//!
//! NOTE: This enumeration is always set to a value which is compatible with
//! the current compiler's `__fastcall` calling convention. In 64-bit mode
//! there is no such convention and the value is mapped to `kCallConvX64Win`
//! or `kCallConvX64Unix`, depending on the host architecture.
kCallConvHostFastCall = DETECTED_AT_COMPILE_TIME
#elif ASMJIT_ARCH_X86
// X86 Host Support.
kCallConvHost = kCallConvX86CDecl,
kCallConvHostCDecl = kCallConvX86CDecl,
kCallConvHostStdCall = kCallConvX86StdCall,
kCallConvHostFastCall =
ASMJIT_CC_MSC ? kCallConvX86MsFastCall :
ASMJIT_CC_GCC ? kCallConvX86GccFastCall :
ASMJIT_CC_CLANG ? kCallConvX86GccFastCall :
ASMJIT_CC_CODEGEAR ? kCallConvX86BorlandFastCall : kCallConvNone
#elif ASMJIT_ARCH_X64
// X64 Host Support.
kCallConvHost = ASMJIT_OS_WINDOWS ? kCallConvX64Win : kCallConvX64Unix,
// These don't exist in 64-bit mode.
kCallConvHostCDecl = kCallConvHost,
kCallConvHostStdCall = kCallConvHost,
kCallConvHostFastCall = kCallConvHost
#elif ASMJIT_ARCH_ARM32
# if defined(__SOFTFP__)
kCallConvHost = kCallConvArm32SoftFP,
# else
kCallConvHost = kCallConvArm32HardFP,
# endif
// These don't exist on ARM.
kCallConvHostCDecl = kCallConvHost,
kCallConvHostStdCall = kCallConvHost,
kCallConvHostFastCall = kCallConvHost
#else
# error "[asmjit] Couldn't determine the target's calling convention."
#endif
ASMJIT_ENUM(JumpType) {
kJumpTypeNone = 0, //!< Instruction doesn't jump (regular instruction).
kJumpTypeDirect = 1, //!< Instruction is a unconditional (direct) jump.
kJumpTypeConditional = 2, //!< Instruction is a conditional jump.
kJumpTypeCall = 3, //!< Instruction is a function call.
kJumpTypeReturn = 4 //!< Instruction is a function return.
};
} // AnyInst namespace
// ============================================================================
// [asmjit::ErrorCode]
// [asmjit::Error]
// ============================================================================
//! AsmJit error type (uint32_t).
typedef uint32_t Error;
//! AsmJit error codes.
ASMJIT_ENUM(ErrorCode) {
//! No error (success).
@@ -491,72 +116,209 @@ ASMJIT_ENUM(ErrorCode) {
kErrorInvalidArgument,
//! Invalid state.
//!
//! If this error is returned it means that either you are doing something
//! wrong or AsmJit caught itself by doing something wrong. This error should
//! not be underestimated.
kErrorInvalidState,
//! Invalid architecture.
//! Invalid or incompatible architecture.
kErrorInvalidArch,
//! The object is not initialized.
kErrorNotInitialized,
//! The object is already initialized.
kErrorAlreadyInitialized,
//! Built-in feature was disabled at compile time and it's not available.
kErrorFeatureNotEnabled,
//! CodeHolder can't have attached more than one \ref Assembler at a time.
kErrorSlotOccupied,
//! No code generated.
//!
//! Returned by runtime if the code-generator contains no code.
//! Returned by runtime if the \ref CodeHolder contains no code.
kErrorNoCodeGenerated,
//! Code generated is too large to fit in memory reserved.
//!
//! Returned by `StaticRuntime` in case that the code generated is too large
//! to fit in the memory already reserved for it.
//! Code generated is larger than allowed.
kErrorCodeTooLarge,
//! Attempt to use uninitialized label.
kErrorInvalidLabel,
//! Label index overflow - a single `Assembler` instance can hold more than
//! 2 billion labels (2147483391 to be exact). If there is an attempt to
//! create more labels this error is returned.
kErrorLabelIndexOverflow,
//! Label is already bound.
kErrorLabelAlreadyBound,
//! Label is already defined (named labels).
kErrorLabelAlreadyDefined,
//! Label name is too long.
kErrorLabelNameTooLong,
//! Label must always be local if it's anonymous (without a name).
kErrorInvalidLabelName,
//! Parent id passed to `CodeHolder::newNamedLabelId()` was invalid.
kErrorInvalidParentLabel,
//! Parent id specified for a non-local (global) label.
kErrorNonLocalLabelCantHaveParent,
//! Unknown instruction (an instruction ID is out of bounds or instruction
//! name is invalid).
kErrorUnknownInst,
//! Relocation index overflow.
kErrorRelocIndexOverflow,
//! Invalid relocation entry.
kErrorInvalidRelocEntry,
//! Illegal instruction.
//!
//! This status code can also be returned in X64 mode if AH, BH, CH or DH
//! registers have been used together with a REX prefix. The instruction
//! is not encodable in such case.
//!
//! Example of raising `kErrorIllegalInst` error.
//!
//! ~~~
//! // Invalid address size.
//! a.mov(dword_ptr(eax), al);
//!
//! // Undecodable instruction - AH used with R10, however R10 can only be
//! // encoded by using REX prefix, which conflicts with AH.
//! a.mov(byte_ptr(r10), ah);
//! ~~~
//!
//! NOTE: In debug mode assertion is raised instead of returning an error.
kErrorIllegalInst,
//! Invalid instruction.
kErrorInvalidInstruction,
//! Invalid register type.
kErrorInvalidRegType,
//! Invalid register kind.
kErrorInvalidRegKind,
//! Invalid register's physical id.
kErrorInvalidPhysId,
//! Invalid register's virtual id.
kErrorInvalidVirtId,
//! Invalid REX prefix.
kErrorInvalidRexPrefix,
//! Invalid mask register (not 'k').
kErrorInvalidKMaskReg,
//! Invalid {k} use (not supported by the instruction).
kErrorInvalidKMaskUse,
//! Invalid {k}{z} use (not supported by the instruction).
kErrorInvalidKZeroUse,
//! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox}.
kErrorInvalidBroadcast,
//! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512).
kErrorInvalidEROrSAE,
//! Invalid address used (not encodable).
kErrorInvalidAddress,
//! Invalid index register used in memory address (not encodable).
kErrorInvalidAddressIndex,
//! Invalid address scale (not encodable).
kErrorInvalidAddressScale,
//! Invalid use of 64-bit address.
kErrorInvalidAddress64Bit,
//! Invalid displacement (not encodable).
kErrorInvalidDisplacement,
//! Invalid segment.
kErrorInvalidSegment,
//! Illegal (unencodable) addressing used.
kErrorIllegalAddresing,
//! Mismatching operand size (size of multiple operands doesn't match the operation size).
kErrorOperandSizeMismatch,
//! Ambiguous operand size (memory has zero size while it's required to determine the operation type.
kErrorAmbiguousOperandSize,
//! Illegal (unencodable) displacement used.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Short form of jump instruction has been used, but the displacement is out
//! of bounds.
kErrorIllegalDisplacement,
//! Invalid TypeId.
kErrorInvalidTypeId,
//! Invalid use of a 8-bit GPB-HIGH register.
kErrorInvalidUseOfGpbHi,
//! Invalid use of a 64-bit GPQ register in 32-bit mode.
kErrorInvalidUseOfGpq,
//! Invalid use of an 80-bit float (TypeId::kF80).
kErrorInvalidUseOfF80,
//! A variable has been assigned more than once to a function argument (Compiler).
kErrorOverlappedArgs,
//! AsmJit requires a physical register, but no one is available.
kErrorNoMorePhysRegs,
//! A variable has been assigned more than once to a function argument (CodeCompiler).
kErrorOverlappedRegs,
//! Invalid register to hold stack arguments offset.
kErrorOverlappingStackRegWithRegArg,
//! Count of AsmJit error codes.
kErrorCount
};
//! \}
// ============================================================================
// [asmjit::Internal]
// ============================================================================
namespace Internal {
#if defined(ASMJIT_CUSTOM_ALLOC) && \
defined(ASMJIT_CUSTOM_REALLOC) && \
defined(ASMJIT_CUSTOM_FREE)
static ASMJIT_INLINE void* allocMemory(size_t size) noexcept { return ASMJIT_CUSTOM_ALLOC(size); }
static ASMJIT_INLINE void* reallocMemory(void* p, size_t size) noexcept { return ASMJIT_CUSTOM_REALLOC(p, size); }
static ASMJIT_INLINE void releaseMemory(void* p) noexcept { ASMJIT_CUSTOM_FREE(p); }
#elif !defined(ASMJIT_CUSTOM_ALLOC) && \
!defined(ASMJIT_CUSTOM_REALLOC) && \
!defined(ASMJIT_CUSTOM_FREE)
static ASMJIT_INLINE void* allocMemory(size_t size) noexcept { return ::malloc(size); }
static ASMJIT_INLINE void* reallocMemory(void* p, size_t size) noexcept { return ::realloc(p, size); }
static ASMJIT_INLINE void releaseMemory(void* p) noexcept { ::free(p); }
#else
# error "[asmjit] You must provide either none or all of ASMJIT_CUSTOM_[ALLOC|REALLOC|FREE]"
#endif
//! Cast designed to cast between function and void* pointers.
template<typename Dst, typename Src>
static ASMJIT_INLINE Dst ptr_cast(Src p) noexcept { return (Dst)p; }
} // Internal namespace
template<typename Func>
static ASMJIT_INLINE Func ptr_as_func(void* func) noexcept { return Internal::ptr_cast<Func, void*>(func); }
template<typename Func>
static ASMJIT_INLINE void* func_as_ptr(Func func) noexcept { return Internal::ptr_cast<void*, Func>(func); }
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
namespace DebugUtils {
//! Returns the error `err` passed.
//!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can
//! help with tracing the origin of any error reported / returned by AsmJit.
static ASMJIT_INLINE Error errored(Error err) noexcept { return err; }
//! Get a printable version of `asmjit::Error` code.
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! Called to output debugging message(s).
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertions put a breakpoint at assertionFailed()
//! function (asmjit/base/globals.cpp) and check the call stack to locate the
//! failing code.
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
#if defined(ASMJIT_DEBUG)
# define ASMJIT_ASSERT(exp) \
do { \
if (ASMJIT_LIKELY(exp)) \
break; \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #exp); \
} while (0)
# define ASMJIT_NOT_REACHED() \
do { \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, \
"ASMJIT_NOT_REACHED has been reached"); \
ASMJIT_ASSUME(0); \
} while (0)
#else
# define ASMJIT_ASSERT(exp) ASMJIT_NOP
# define ASMJIT_NOT_REACHED() ASMJIT_ASSUME(0)
#endif // DEBUG
//! \internal
//!
//! Used by AsmJit to propagate a possible `Error` produced by `...` to the caller.
#define ASMJIT_PROPAGATE(...) \
do { \
::asmjit::Error _err = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err)) \
return _err; \
} while (0)
} // DebugUtils namespace
// ============================================================================
// [asmjit::Init / NoInit]
@@ -570,97 +332,12 @@ struct _NoInit {};
static const _NoInit NoInit = {};
#endif // !ASMJIT_DOCGEN
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
namespace DebugUtils {
//! Get a printable version of `asmjit::Error` value.
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! \addtogroup asmjit_base
//! \{
//! Called in debug build to output a debugging message caused by assertion
//! failure or tracing.
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called in debug build on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertions put a breakpoint at assertionFailed()
//! function (asmjit/base/globals.cpp) and check the call stack to locate the
//! failing code.
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
//! \}
} // DebugUtils namespace
} // asmjit namespace
// ============================================================================
// [ASMJIT_ASSERT]
// ============================================================================
#if defined(ASMJIT_DEBUG)
# define ASMJIT_ASSERT(exp) \
do { \
if (!(exp)) { \
::asmjit::DebugUtils::assertionFailed( \
__FILE__ + ::asmjit::DebugUtils::kSourceRelativePathOffset, \
__LINE__, \
#exp); \
} \
} while (0)
# define ASMJIT_NOT_REACHED() \
::asmjit::DebugUtils::assertionFailed( \
__FILE__ + ::asmjit::DebugUtils::kSourceRelativePathOffset, \
__LINE__, \
"MUST NOT BE REACHED")
#else
# define ASMJIT_ASSERT(exp) ASMJIT_NOP
# define ASMJIT_NOT_REACHED() ASMJIT_ASSUME(0)
#endif // DEBUG
// ============================================================================
// [ASMJIT_PROPAGATE_ERROR]
// ============================================================================
//! \internal
//!
//! Used by AsmJit to return the `_Exp_` result if it's an error.
#define ASMJIT_PROPAGATE_ERROR(_Exp_) \
do { \
::asmjit::Error _errval = (_Exp_); \
if (_errval != ::asmjit::kErrorOk) \
return _errval; \
} while (0)
// ============================================================================
// [asmjit_cast<>]
// ============================================================================
//! \addtogroup asmjit_base
//! \{
//! Cast used to cast pointer to function. It's like reinterpret_cast<>,
//! but uses internally C style cast to work with MinGW.
//!
//! If you are using single compiler and `reinterpret_cast<>` works for you,
//! there is no reason to use `asmjit_cast<>`. If you are writing
//! cross-platform software with various compiler support, consider using
//! `asmjit_cast<>` instead of `reinterpret_cast<>`.
template<typename T, typename Z>
static ASMJIT_INLINE T asmjit_cast(Z* p) noexcept { return (T)p; }
//! \}
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_GLOBALS_H

View File

@@ -1,20 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/hlstream.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

File diff suppressed because it is too large Load Diff

View File

@@ -1,194 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_LOGGER)
// [Dependencies]
#include "../base/containers.h"
#include "../base/logger.h"
#include "../base/utils.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::LogUtil]
// ============================================================================
bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept {
size_t currentLen = sb.getLength();
size_t commentLen = comment ? Utils::strLen(comment, kMaxCommentLength) : 0;
ASMJIT_ASSERT(binLen >= dispLen);
if ((binLen != 0 && binLen != kInvalidIndex) || commentLen) {
size_t align = kMaxInstLength;
char sep = ';';
for (size_t i = (binLen == kInvalidIndex); i < 2; i++) {
size_t begin = sb.getLength();
// Append align.
if (currentLen < align) {
if (!sb.appendChars(' ', align - currentLen))
return false;
}
// Append separator.
if (sep) {
if (!(sb.appendChar(sep) & sb.appendChar(' ')))
return false;
}
// Append binary data or comment.
if (i == 0) {
if (!sb.appendHex(binData, binLen - dispLen - imLen))
return false;
if (!sb.appendChars('.', dispLen * 2))
return false;
if (!sb.appendHex(binData + binLen - imLen, imLen))
return false;
if (commentLen == 0)
break;
}
else {
if (!sb.appendString(comment, commentLen))
return false;
}
currentLen += sb.getLength() - begin;
align += kMaxBinaryLength;
sep = '|';
}
}
return sb.appendChar('\n');
}
// ============================================================================
// [asmjit::Logger - Construction / Destruction]
// ============================================================================
Logger::Logger() noexcept {
_options = 0;
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
}
Logger::~Logger() noexcept {}
// ============================================================================
// [asmjit::Logger - Logging]
// ============================================================================
void Logger::logFormat(uint32_t style, const char* fmt, ...) noexcept {
char buf[1024];
size_t len;
va_list ap;
va_start(ap, fmt);
len = vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
if (len >= sizeof(buf))
len = sizeof(buf) - 1;
logString(style, buf, len);
}
void Logger::logBinary(uint32_t style, const void* data, size_t size) noexcept {
static const char prefix[] = ".data ";
static const char hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
const uint8_t* s = static_cast<const uint8_t*>(data);
size_t i = size;
char buffer[128];
::memcpy(buffer, prefix, ASMJIT_ARRAY_SIZE(prefix) - 1);
while (i) {
uint32_t n = static_cast<uint32_t>(Utils::iMin<size_t>(i, 16));
char* p = buffer + ASMJIT_ARRAY_SIZE(prefix) - 1;
i -= n;
do {
uint32_t c = s[0];
p[0] = hex[c >> 4];
p[1] = hex[c & 15];
p += 2;
s += 1;
} while (--n);
*p++ = '\n';
logString(style, buffer, (size_t)(p - buffer));
}
}
// ============================================================================
// [asmjit::Logger - Indentation]
// ============================================================================
void Logger::setIndentation(const char* indentation) noexcept {
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
if (!indentation)
return;
size_t length = Utils::strLen(indentation, ASMJIT_ARRAY_SIZE(_indentation) - 1);
::memcpy(_indentation, indentation, length);
}
// ============================================================================
// [asmjit::FileLogger - Construction / Destruction]
// ============================================================================
FileLogger::FileLogger(FILE* stream) noexcept : _stream(nullptr) { setStream(stream); }
FileLogger::~FileLogger() noexcept {}
// ============================================================================
// [asmjit::FileLogger - Logging]
// ============================================================================
void FileLogger::logString(uint32_t style, const char* buf, size_t len) noexcept {
if (!_stream)
return;
if (len == kInvalidIndex)
len = strlen(buf);
fwrite(buf, 1, len, _stream);
}
// ============================================================================
// [asmjit::StringLogger - Construction / Destruction]
// ============================================================================
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
// ============================================================================
// [asmjit::StringLogger - Logging]
// ============================================================================
void StringLogger::logString(uint32_t style, const char* buf, size_t len) noexcept {
_stringBuilder.appendString(buf, len);
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGER

499
src/asmjit/base/logging.cpp Normal file
View File

@@ -0,0 +1,499 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_LOGGING)
// [Dependencies]
#include "../base/logging.h"
#include "../base/utils.h"
#if !defined(ASMJIT_DISABLE_BUILDER)
#include "../base/codebuilder.h"
#endif // !ASMJIT_DISABLE_BUILDER
#if !defined(ASMJIT_DISABLE_COMPILER)
#include "../base/codecompiler.h"
#endif // !ASMJIT_DISABLE_COMPILER
#if defined(ASMJIT_BUILD_X86)
#include "../x86/x86logging_p.h"
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
#include "../arm/armlogging_p.h"
#endif // ASMJIT_BUILD_ARM
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Logger - Construction / Destruction]
// ============================================================================
Logger::Logger() noexcept {
_options = 0;
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
}
Logger::~Logger() noexcept {}
// ============================================================================
// [asmjit::Logger - Logging]
// ============================================================================
Error Logger::logf(const char* fmt, ...) noexcept {
Error err;
va_list ap;
va_start(ap, fmt);
err = logv(fmt, ap);
va_end(ap);
return err;
}
Error Logger::logv(const char* fmt, va_list ap) noexcept {
char buf[1024];
size_t len = vsnprintf(buf, sizeof(buf), fmt, ap);
if (len >= sizeof(buf))
len = sizeof(buf) - 1;
return log(buf, len);
}
Error Logger::logBinary(const void* data, size_t size) noexcept {
static const char prefix[] = ".data ";
static const char hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
const uint8_t* s = static_cast<const uint8_t*>(data);
size_t i = size;
char buffer[128];
::memcpy(buffer, prefix, ASMJIT_ARRAY_SIZE(prefix) - 1);
while (i) {
uint32_t n = static_cast<uint32_t>(std::min<size_t>(i, 16));
char* p = buffer + ASMJIT_ARRAY_SIZE(prefix) - 1;
i -= n;
do {
uint32_t c = s[0];
p[0] = hex[c >> 4];
p[1] = hex[c & 15];
p += 2;
s += 1;
} while (--n);
*p++ = '\n';
ASMJIT_PROPAGATE(log(buffer, (size_t)(p - buffer)));
}
return kErrorOk;
}
// ============================================================================
// [asmjit::Logger - Indentation]
// ============================================================================
void Logger::setIndentation(const char* indentation) noexcept {
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
if (!indentation)
return;
size_t length = Utils::strLen(indentation, ASMJIT_ARRAY_SIZE(_indentation) - 1);
::memcpy(_indentation, indentation, length);
}
// ============================================================================
// [asmjit::FileLogger - Construction / Destruction]
// ============================================================================
FileLogger::FileLogger(FILE* stream) noexcept : _stream(nullptr) { setStream(stream); }
FileLogger::~FileLogger() noexcept {}
// ============================================================================
// [asmjit::FileLogger - Logging]
// ============================================================================
Error FileLogger::_log(const char* buf, size_t len) noexcept {
if (!_stream)
return kErrorOk;
if (len == Globals::kInvalidIndex)
len = strlen(buf);
fwrite(buf, 1, len, _stream);
return kErrorOk;
}
// ============================================================================
// [asmjit::StringLogger - Construction / Destruction]
// ============================================================================
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
// ============================================================================
// [asmjit::StringLogger - Logging]
// ============================================================================
Error StringLogger::_log(const char* buf, size_t len) noexcept {
return _stringBuilder.appendString(buf, len);
}
// ============================================================================
// [asmjit::Logging]
// ============================================================================
Error Logging::formatLabel(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t labelId) noexcept {
const LabelEntry* le = emitter->getCode()->getLabelEntry(labelId);
if (ASMJIT_UNLIKELY(!le))
return sb.appendFormat("InvalidLabel[Id=%u]", static_cast<unsigned int>(labelId));
if (le->hasName()) {
if (le->hasParent()) {
uint32_t parentId = le->getParentId();
const LabelEntry* pe = emitter->getCode()->getLabelEntry(parentId);
if (ASMJIT_UNLIKELY(!pe))
ASMJIT_PROPAGATE(sb.appendFormat("InvalidLabel[Id=%u]", static_cast<unsigned int>(labelId)));
else if (ASMJIT_UNLIKELY(!pe->hasName()))
ASMJIT_PROPAGATE(sb.appendFormat("L%u", Operand::unpackId(parentId)));
else
ASMJIT_PROPAGATE(sb.appendString(pe->getName()));
ASMJIT_PROPAGATE(sb.appendChar('.'));
}
return sb.appendString(le->getName());
}
else {
return sb.appendFormat("L%u", Operand::unpackId(labelId));
}
}
Error Logging::formatRegister(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t regType,
uint32_t regId) noexcept {
#if defined(ASMJIT_BUILD_X86)
return X86Logging::formatRegister(sb, logOptions, emitter, archType, regType, regId);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
return ArmLogging::formatRegister(sb, logOptions, emitter, archType, regType, regId);
#endif // ASMJIT_BUILD_ARM
return kErrorInvalidArch;
}
Error Logging::formatOperand(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Operand_& op) noexcept {
#if defined(ASMJIT_BUILD_X86)
return X86Logging::formatOperand(sb, logOptions, emitter, archType, op);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
return ArmLogging::formatOperand(sb, logOptions, emitter, archType, op);
#endif // ASMJIT_BUILD_ARM
return kErrorInvalidArch;
}
Error Logging::formatInstruction(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t instId,
uint32_t options,
const Operand_& opExtra,
const Operand_* opArray, uint32_t opCount) noexcept {
#if defined(ASMJIT_BUILD_X86)
return X86Logging::formatInstruction(sb, logOptions, emitter, archType, instId, options, opExtra, opArray, opCount);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
return ArmLogging::formatInstruction(sb, logOptions, emitter, archType, instId, options, opExtra, opArray, opCount);
#endif // ASMJIT_BUILD_ARM
return kErrorInvalidArch;
}
#if !defined(ASMJIT_DISABLE_BUILDER)
static Error formatTypeId(StringBuilder& sb, uint32_t typeId) noexcept {
if (typeId == TypeId::kVoid)
return sb.appendString("void");
if (!TypeId::isValid(typeId))
return sb.appendString("unknown");
const char* typeName = "unknown";
uint32_t typeSize = TypeId::sizeOf(typeId);
uint32_t elementId = TypeId::elementOf(typeId);
switch (elementId) {
case TypeId::kIntPtr : typeName = "intptr" ; break;
case TypeId::kUIntPtr: typeName = "uintptr"; break;
case TypeId::kI8 : typeName = "i8" ; break;
case TypeId::kU8 : typeName = "u8" ; break;
case TypeId::kI16 : typeName = "i16" ; break;
case TypeId::kU16 : typeName = "u16" ; break;
case TypeId::kI32 : typeName = "i32" ; break;
case TypeId::kU32 : typeName = "u32" ; break;
case TypeId::kI64 : typeName = "i64" ; break;
case TypeId::kU64 : typeName = "u64" ; break;
case TypeId::kF32 : typeName = "f32" ; break;
case TypeId::kF64 : typeName = "f64" ; break;
case TypeId::kF80 : typeName = "f80" ; break;
case TypeId::kMask8 : typeName = "mask8" ; break;
case TypeId::kMask16 : typeName = "mask16" ; break;
case TypeId::kMask32 : typeName = "mask32" ; break;
case TypeId::kMask64 : typeName = "mask64" ; break;
case TypeId::kMmx32 : typeName = "mmx32" ; break;
case TypeId::kMmx64 : typeName = "mmx64" ; break;
}
uint32_t elementSize = TypeId::sizeOf(elementId);
if (typeSize > elementSize) {
unsigned int numElements = typeSize / elementSize;
return sb.appendFormat("%sx%u", typeName, numElements);
}
else {
return sb.appendString(typeName);
}
}
static Error formatFuncDetailValue(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
FuncDetail::Value value) noexcept {
uint32_t typeId = value.getTypeId();
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
if (value.byReg()) {
ASMJIT_PROPAGATE(sb.appendChar(':'));
ASMJIT_PROPAGATE(Logging::formatRegister(sb, logOptions, emitter, emitter->getArchType(), value.getRegType(), value.getRegId()));
}
if (value.byStack()) {
ASMJIT_PROPAGATE(sb.appendFormat(":[%d]", static_cast<int>(value.getStackOffset())));
}
return kErrorOk;
}
static Error formatFuncRets(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
const FuncDetail& fd,
VirtReg* const* vRegs) noexcept {
if (!fd.hasRet())
return sb.appendString("void");
for (uint32_t i = 0; i < fd.getRetCount(); i++) {
if (i) ASMJIT_PROPAGATE(sb.appendString(", "));
ASMJIT_PROPAGATE(formatFuncDetailValue(sb, logOptions, emitter, fd.getRet(i)));
if (vRegs)
ASMJIT_PROPAGATE(sb.appendFormat(" {%s}", vRegs[i]->getName()));
}
return kErrorOk;
}
static Error formatFuncArgs(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
const FuncDetail& fd,
VirtReg* const* vRegs) noexcept {
for (uint32_t i = 0; i < fd.getArgCount(); i++) {
if (i) ASMJIT_PROPAGATE(sb.appendString(", "));
ASMJIT_PROPAGATE(formatFuncDetailValue(sb, logOptions, emitter, fd.getArg(i)));
if (vRegs)
ASMJIT_PROPAGATE(sb.appendFormat(" {%s}", vRegs[i]->getName()));
}
return kErrorOk;
}
Error Logging::formatNode(
StringBuilder& sb,
uint32_t logOptions,
const CodeBuilder* cb,
const CBNode* node_) noexcept {
if (node_->hasPosition())
ASMJIT_PROPAGATE(sb.appendFormat("<%04u> ", node_->getPosition()));
switch (node_->getType()) {
case CBNode::kNodeInst: {
const CBInst* node = node_->as<CBInst>();
ASMJIT_PROPAGATE(
Logging::formatInstruction(sb, logOptions, cb,
cb->getArchType(),
node->getInstId(),
node->getOptions(),
node->getOpExtra(),
node->getOpArray(), node->getOpCount()));
break;
}
case CBNode::kNodeLabel: {
const CBLabel* node = node_->as<CBLabel>();
ASMJIT_PROPAGATE(sb.appendFormat("L%u:", Operand::unpackId(node->getId())));
break;
}
case CBNode::kNodeData: {
const CBData* node = node_->as<CBData>();
ASMJIT_PROPAGATE(sb.appendFormat(".embed (%u bytes)", node->getSize()));
break;
}
case CBNode::kNodeAlign: {
const CBAlign* node = node_->as<CBAlign>();
ASMJIT_PROPAGATE(
sb.appendFormat(".align %u (%s)",
node->getAlignment(),
node->getMode() == kAlignCode ? "code" : "data"));
break;
}
case CBNode::kNodeComment: {
const CBComment* node = node_->as<CBComment>();
ASMJIT_PROPAGATE(sb.appendFormat("; %s", node->getInlineComment()));
break;
}
case CBNode::kNodeSentinel: {
const CBSentinel* node = node_->as<CBSentinel>();
ASMJIT_PROPAGATE(sb.appendString("[sentinel]"));
break;
}
#if !defined(ASMJIT_DISABLE_COMPILER)
case CBNode::kNodeFunc: {
const CCFunc* node = node_->as<CCFunc>();
ASMJIT_PROPAGATE(formatLabel(sb, logOptions, cb, node->getId()));
ASMJIT_PROPAGATE(sb.appendString(": ["));
ASMJIT_PROPAGATE(formatFuncRets(sb, logOptions, cb, node->getDetail(), nullptr));
ASMJIT_PROPAGATE(sb.appendString("]"));
ASMJIT_PROPAGATE(sb.appendString("("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, logOptions, cb, node->getDetail(), node->getArgs()));
ASMJIT_PROPAGATE(sb.appendString(")"));
break;
}
case CBNode::kNodeFuncExit: {
const CCFuncRet* node = node_->as<CCFuncRet>();
ASMJIT_PROPAGATE(sb.appendString("[ret]"));
break;
}
case CBNode::kNodeFuncCall: {
const CCFuncCall* node = node_->as<CCFuncCall>();
ASMJIT_PROPAGATE(
Logging::formatInstruction(sb, logOptions, cb,
cb->getArchType(),
node->getInstId(),
node->getOptions(),
node->getOpExtra(),
node->getOpArray(), node->getOpCount()));
break;
}
#endif // !ASMJIT_DISABLE_COMPILER
default: {
ASMJIT_PROPAGATE(sb.appendFormat("[unknown (type=%u)]", node_->getType()));
break;
}
}
return kErrorOk;
}
#endif // !ASMJIT_DISABLE_BUILDER
Error Logging::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept {
size_t currentLen = sb.getLength();
size_t commentLen = comment ? Utils::strLen(comment, kMaxCommentLength) : 0;
ASMJIT_ASSERT(binLen >= dispLen);
if ((binLen != 0 && binLen != Globals::kInvalidIndex) || commentLen) {
size_t align = kMaxInstLength;
char sep = ';';
for (size_t i = (binLen == Globals::kInvalidIndex); i < 2; i++) {
size_t begin = sb.getLength();
// Append align.
if (currentLen < align)
ASMJIT_PROPAGATE(sb.appendChars(' ', align - currentLen));
// Append separator.
if (sep) {
ASMJIT_PROPAGATE(sb.appendChar(sep));
ASMJIT_PROPAGATE(sb.appendChar(' '));
}
// Append binary data or comment.
if (i == 0) {
ASMJIT_PROPAGATE(sb.appendHex(binData, binLen - dispLen - imLen));
ASMJIT_PROPAGATE(sb.appendChars('.', dispLen * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData + binLen - imLen, imLen));
if (commentLen == 0) break;
}
else {
ASMJIT_PROPAGATE(sb.appendString(comment, commentLen));
}
currentLen += sb.getLength() - begin;
align += kMaxBinaryLength;
sep = '|';
}
}
return sb.appendChar('\n');
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGING

View File

@@ -5,61 +5,53 @@
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_LOGGER_H
#define _ASMJIT_BASE_LOGGER_H
#ifndef _ASMJIT_BASE_LOGGING_H
#define _ASMJIT_BASE_LOGGING_H
#include "../build.h"
#include "../asmjit_build.h"
// [Dependencies]
#include "../base/containers.h"
#include <stdarg.h>
#include "../base/string.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
#if !defined(ASMJIT_DISABLE_LOGGER)
#if !defined(ASMJIT_DISABLE_LOGGING)
// ============================================================================
// [asmjit::LogUtil]
// [Forward Declarations]
// ============================================================================
// Only used by asmjit internals, not available to consumers.
#if defined(ASMJIT_EXPORTS)
struct LogUtil {
enum {
// Has to be big to be able to hold all metadata compiler can assign to a
// single instruction.
kMaxCommentLength = 512,
kMaxInstLength = 40,
kMaxBinaryLength = 26
};
class CodeEmitter;
class Reg;
struct Operand_;
static bool formatLine(
StringBuilder& sb,
const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept;
};
#endif // ASMJIT_EXPORTS
#if !defined(ASMJIT_DISABLE_BUILDER)
class CodeBuilder;
class CBNode;
#endif // !ASMJIT_DISABLE_BUILDER
// ============================================================================
// [asmjit::Logger]
// ============================================================================
//! Abstract logging class.
//! Abstract logging interface and helpers.
//!
//! This class can be inherited and reimplemented to fit into your logging
//! subsystem. When reimplementing use `Logger::log()` method to log into
//! subsystem. When reimplementing use `Logger::_log()` method to log into
//! a custom stream.
//!
//! This class also contain `_enabled` member that can be used to enable
//! or disable logging.
//! There are two \ref Logger implementations offered by AsmJit:
//! - \ref FileLogger - allows to log into a `FILE*` stream.
//! - \ref StringLogger - logs into a \ref StringBuilder.
class ASMJIT_VIRTAPI Logger {
public:
ASMJIT_NO_COPY(Logger)
public:
ASMJIT_NONCOPYABLE(Logger)
// --------------------------------------------------------------------------
// [Options]
@@ -68,23 +60,9 @@ class ASMJIT_VIRTAPI Logger {
//! Logger options.
ASMJIT_ENUM(Options) {
kOptionBinaryForm = 0x00000001, //! Output instructions also in binary form.
kOptionHexImmediate = 0x00000002, //! Output immediates as hexadecimal numbers.
kOptionHexDisplacement = 0x00000004 //! Output displacements as hexadecimal numbers.
};
// --------------------------------------------------------------------------
// [Style]
// --------------------------------------------------------------------------
//! Logger style.
ASMJIT_ENUM(Style) {
kStyleDefault = 0,
kStyleDirective = 1,
kStyleLabel = 2,
kStyleData = 3,
kStyleComment = 4,
kStyleCount = 5
kOptionImmExtended = 0x00000002, //! Output a meaning of some immediates.
kOptionHexImmediate = 0x00000004, //! Output constants in hexadecimal form.
kOptionHexDisplacement = 0x00000008 //! Output displacements in hexadecimal form.
};
// --------------------------------------------------------------------------
@@ -100,13 +78,20 @@ class ASMJIT_VIRTAPI Logger {
// [Logging]
// --------------------------------------------------------------------------
//! Log output.
virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept = 0;
//! Log `str` - must be reimplemented.
virtual Error _log(const char* str, size_t len) noexcept = 0;
//! Log formatter message (like sprintf) sending output to `logString()` method.
ASMJIT_API void logFormat(uint32_t style, const char* fmt, ...) noexcept;
//! Log a string `str`, which is either null terminated or having `len` length.
ASMJIT_INLINE Error log(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _log(str, len); }
//! Log a content of a `StringBuilder` `str`.
ASMJIT_INLINE Error log(const StringBuilder& str) noexcept { return _log(str.getData(), str.getLength()); }
//! Format the message by using `sprintf()` and then send to `log()`.
ASMJIT_API Error logf(const char* fmt, ...) noexcept;
//! Format the message by using `vsprintf()` and then send to `log()`.
ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept;
//! Log binary data.
ASMJIT_API void logBinary(uint32_t style, const void* data, size_t size) noexcept;
ASMJIT_API Error logBinary(const void* data, size_t size) noexcept;
// --------------------------------------------------------------------------
// [Options]
@@ -114,11 +99,8 @@ class ASMJIT_VIRTAPI Logger {
//! Get all logger options as a single integer.
ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; }
//! Get the given logger option.
ASMJIT_INLINE bool hasOption(uint32_t option) const noexcept {
return (_options & option) != 0;
}
ASMJIT_INLINE bool hasOption(uint32_t option) const noexcept { return (_options & option) != 0; }
ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; }
ASMJIT_INLINE void clearOptions(uint32_t options) noexcept { _options &= ~options; }
@@ -127,17 +109,11 @@ class ASMJIT_VIRTAPI Logger {
// --------------------------------------------------------------------------
//! Get indentation.
ASMJIT_INLINE const char* getIndentation() const noexcept {
return _indentation;
}
ASMJIT_INLINE const char* getIndentation() const noexcept { return _indentation; }
//! Set indentation.
ASMJIT_API void setIndentation(const char* indentation) noexcept;
//! Reset indentation.
ASMJIT_INLINE void resetIndentation() noexcept {
setIndentation(nullptr);
}
ASMJIT_INLINE void resetIndentation() noexcept { setIndentation(nullptr); }
// --------------------------------------------------------------------------
// [Members]
@@ -154,10 +130,10 @@ class ASMJIT_VIRTAPI Logger {
// [asmjit::FileLogger]
// ============================================================================
//! Logger that can log to standard C `FILE*` stream.
//! Logger that can log to a `FILE*` stream.
class ASMJIT_VIRTAPI FileLogger : public Logger {
public:
ASMJIT_NO_COPY(FileLogger)
public:
ASMJIT_NONCOPYABLE(FileLogger)
// --------------------------------------------------------------------------
// [Construction / Destruction]
@@ -165,7 +141,6 @@ class ASMJIT_VIRTAPI FileLogger : public Logger {
//! Create a new `FileLogger` that logs to a `FILE` stream.
ASMJIT_API FileLogger(FILE* stream = nullptr) noexcept;
//! Destroy the `FileLogger`.
ASMJIT_API virtual ~FileLogger() noexcept;
@@ -173,24 +148,21 @@ class ASMJIT_VIRTAPI FileLogger : public Logger {
// [Accessors]
// --------------------------------------------------------------------------
//! Get `FILE*` stream.
//!
//! NOTE: Return value can be `nullptr`.
ASMJIT_INLINE FILE* getStream() const noexcept {
return _stream;
}
//! Get the logging out put stream or null.
ASMJIT_INLINE FILE* getStream() const noexcept { return _stream; }
//! Set `FILE*` stream, can be set to `nullptr` to disable logging, although
//! the `ExternalTool` will still call `logString` even if there is no stream.
ASMJIT_INLINE void setStream(FILE* stream) noexcept {
_stream = stream;
}
//! Set the logging output stream to `stream` or null.
//!
//! NOTE: If the `stream` is null it will disable logging, but it won't
//! stop calling `log()` unless the logger is detached from the
//! \ref Assembler.
ASMJIT_INLINE void setStream(FILE* stream) noexcept { _stream = stream; }
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept;
ASMJIT_API Error _log(const char* buf, size_t len = Globals::kInvalidIndex) noexcept override;
// --------------------------------------------------------------------------
// [Members]
@@ -204,10 +176,10 @@ class ASMJIT_VIRTAPI FileLogger : public Logger {
// [asmjit::StringLogger]
// ============================================================================
//! String logger.
//! Logger that stores everything in an internal string buffer.
class ASMJIT_VIRTAPI StringLogger : public Logger {
public:
ASMJIT_NO_COPY(StringLogger)
public:
ASMJIT_NONCOPYABLE(StringLogger)
// --------------------------------------------------------------------------
// [Construction / Destruction]
@@ -215,7 +187,6 @@ class ASMJIT_VIRTAPI StringLogger : public Logger {
//! Create new `StringLogger`.
ASMJIT_API StringLogger() noexcept;
//! Destroy the `StringLogger`.
ASMJIT_API virtual ~StringLogger() noexcept;
@@ -226,43 +197,96 @@ class ASMJIT_VIRTAPI StringLogger : public Logger {
//! Get `char*` pointer which represents the resulting string.
//!
//! The pointer is owned by `StringLogger`, it can't be modified or freed.
ASMJIT_INLINE const char* getString() const noexcept {
return _stringBuilder.getData();
}
ASMJIT_INLINE const char* getString() const noexcept { return _stringBuilder.getData(); }
//! Clear the resulting string.
ASMJIT_INLINE void clearString() noexcept { _stringBuilder.clear(); }
//! Get the length of the string returned by `getString()`.
ASMJIT_INLINE size_t getLength() const noexcept {
return _stringBuilder.getLength();
}
//! Clear the resulting string.
ASMJIT_INLINE void clearString() noexcept {
_stringBuilder.clear();
}
ASMJIT_INLINE size_t getLength() const noexcept { return _stringBuilder.getLength(); }
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept;
ASMJIT_API Error _log(const char* buf, size_t len = Globals::kInvalidIndex) noexcept override;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Output.
//! Output string.
StringBuilder _stringBuilder;
};
// ============================================================================
// [asmjit::Logging]
// ============================================================================
struct Logging {
ASMJIT_API static Error formatRegister(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t regType,
uint32_t regId) noexcept;
ASMJIT_API static Error formatLabel(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t labelId) noexcept;
ASMJIT_API static Error formatOperand(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Operand_& op) noexcept;
ASMJIT_API static Error formatInstruction(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t instId,
uint32_t options,
const Operand_& opExtra,
const Operand_* opArray, uint32_t opCount) noexcept;
#if !defined(ASMJIT_DISABLE_BUILDER)
ASMJIT_API static Error formatNode(
StringBuilder& sb,
uint32_t logOptions,
const CodeBuilder* cb,
const CBNode* node_) noexcept;
#endif // !ASMJIT_DISABLE_BUILDER
// Only used by AsmJit internals, not available for users.
#if defined(ASMJIT_EXPORTS)
enum {
// Has to be big to be able to hold all metadata compiler can assign to a
// single instruction.
kMaxCommentLength = 512,
kMaxInstLength = 40,
kMaxBinaryLength = 26
};
static Error formatLine(
StringBuilder& sb,
const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept;
#endif // ASMJIT_EXPORTS
};
#else
struct Logger;
#endif // !ASMJIT_DISABLE_LOGGER
class Logger;
#endif // !ASMJIT_DISABLE_LOGGING
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_LOGGER_H

74
src/asmjit/base/misc_p.h Normal file
View File

@@ -0,0 +1,74 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_MISC_P_H
#define _ASMJIT_BASE_MISC_P_H
// [Dependencies]
#include "../asmjit_build.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
//! \internal
//!
//! Macro used to populate a table with 16 elements starting at `I`.
#define ASMJIT_TABLE_16(DEF, I) DEF(I + 0), DEF(I + 1), DEF(I + 2), DEF(I + 3), \
DEF(I + 4), DEF(I + 5), DEF(I + 6), DEF(I + 7), \
DEF(I + 8), DEF(I + 9), DEF(I + 10), DEF(I + 11), \
DEF(I + 12), DEF(I + 13), DEF(I + 14), DEF(I + 15)
#define ASMJIT_TABLE_T_8(TABLE, VALUE, I) \
TABLE< I + 0 >::VALUE, TABLE< I + 1 >::VALUE, \
TABLE< I + 2 >::VALUE, TABLE< I + 3 >::VALUE, \
TABLE< I + 4 >::VALUE, TABLE< I + 5 >::VALUE, \
TABLE< I + 6 >::VALUE, TABLE< I + 7 >::VALUE
#define ASMJIT_TABLE_T_16(TABLE, VALUE, I) \
ASMJIT_TABLE_T_8(TABLE, VALUE, I), \
ASMJIT_TABLE_T_8(TABLE, VALUE, I + 8)
#define ASMJIT_TABLE_T_32(TABLE, VALUE, I) \
ASMJIT_TABLE_T_16(TABLE, VALUE, I), \
ASMJIT_TABLE_T_16(TABLE, VALUE, I + 16)
#define ASMJIT_TABLE_T_64(TABLE, VALUE, I) \
ASMJIT_TABLE_T_32(TABLE, VALUE, I), \
ASMJIT_TABLE_T_32(TABLE, VALUE, I + 32)
#define ASMJIT_TABLE_T_128(TABLE, VALUE, I) \
ASMJIT_TABLE_T_64(TABLE, VALUE, I), \
ASMJIT_TABLE_T_64(TABLE, VALUE, I + 64)
#define ASMJIT_TABLE_T_256(TABLE, VALUE, I) \
ASMJIT_TABLE_T_128(TABLE, VALUE, I), \
ASMJIT_TABLE_T_128(TABLE, VALUE, I + 128)
#define ASMJIT_TABLE_T_512(TABLE, VALUE, I) \
ASMJIT_TABLE_T_256(TABLE, VALUE, I), \
ASMJIT_TABLE_T_256(TABLE, VALUE, I + 256)
#define ASMJIT_TABLE_T_1024(TABLE, VALUE, I) \
ASMJIT_TABLE_T_512(TABLE, VALUE, I), \
ASMJIT_TABLE_T_512(TABLE, VALUE, I + 512)
//! \}
} // asmjit namespace
//! \}
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_MISC_P_H

View File

@@ -8,45 +8,202 @@
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/globals.h"
#include "../base/operand.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Operand]
// [asmjit::TypeId]
// ============================================================================
// Prevent static initialization.
class Operand {
public:
struct BaseOp {
uint8_t op;
uint8_t size;
uint8_t reserved_2_1;
uint8_t reserved_3_1;
uint32_t id;
uint32_t reserved_8_4;
uint32_t reserved_12_4;
};
// Kept in union to prevent LTO warnings.
union {
BaseOp _base;
// Required to properly align this _fake_ `Operand`, not used.
uint64_t _data[2];
template<int ID>
struct TypeIdSizeOf_T {
enum {
kValue = (ID == TypeId::kI8 ) ? 1 :
(ID == TypeId::kU8 ) ? 1 :
(ID == TypeId::kI16 ) ? 2 :
(ID == TypeId::kU16 ) ? 2 :
(ID == TypeId::kI32 ) ? 4 :
(ID == TypeId::kU32 ) ? 4 :
(ID == TypeId::kI64 ) ? 8 :
(ID == TypeId::kU64 ) ? 8 :
(ID == TypeId::kF32 ) ? 4 :
(ID == TypeId::kF64 ) ? 8 :
(ID == TypeId::kF80 ) ? 10 :
(ID == TypeId::kMask8 ) ? 1 :
(ID == TypeId::kMask16) ? 2 :
(ID == TypeId::kMask32) ? 4 :
(ID == TypeId::kMask64) ? 8 :
(ID == TypeId::kMmx32 ) ? 4 :
(ID == TypeId::kMmx64 ) ? 8 :
(ID >= TypeId::_kVec32Start && ID <= TypeId::_kVec32End ) ? 4 :
(ID >= TypeId::_kVec64Start && ID <= TypeId::_kVec64End ) ? 8 :
(ID >= TypeId::_kVec128Start && ID <= TypeId::_kVec128End) ? 16 :
(ID >= TypeId::_kVec256Start && ID <= TypeId::_kVec256End) ? 32 :
(ID >= TypeId::_kVec512Start && ID <= TypeId::_kVec512End) ? 64 : 0
};
};
ASMJIT_VARAPI const Operand noOperand;
const Operand noOperand = {{ 0, 0, 0, 0, kInvalidValue, 0, 0 }};
template<int ID>
struct TypeIdElementOf_T {
enum {
kValue = (ID == TypeId::kMask8 ) ? TypeId::kU8 :
(ID == TypeId::kMask16) ? TypeId::kU16 :
(ID == TypeId::kMask32) ? TypeId::kU32 :
(ID == TypeId::kMask64) ? TypeId::kU64 :
(ID == TypeId::kMmx32 ) ? TypeId::kI32 :
(ID == TypeId::kMmx64 ) ? TypeId::kI64 :
(ID >= TypeId::kI8 && ID <= TypeId::kF80 ) ? ID :
(ID >= TypeId::_kVec32Start && ID <= TypeId::_kVec32End ) ? ID - TypeId::_kVec32Start + TypeId::kI8 :
(ID >= TypeId::_kVec64Start && ID <= TypeId::_kVec64End ) ? ID - TypeId::_kVec64Start + TypeId::kI8 :
(ID >= TypeId::_kVec128Start && ID <= TypeId::_kVec128End) ? ID - TypeId::_kVec128Start + TypeId::kI8 :
(ID >= TypeId::_kVec256Start && ID <= TypeId::_kVec256End) ? ID - TypeId::_kVec256Start + TypeId::kI8 :
(ID >= TypeId::_kVec512Start && ID <= TypeId::_kVec512End) ? ID - TypeId::_kVec512Start + TypeId::kI8 : 0
};
};
#define R(TMPL, I) TMPL<I + 0>::kValue, TMPL<I + 1>::kValue, \
TMPL<I + 2>::kValue, TMPL<I + 3>::kValue, \
TMPL<I + 4>::kValue, TMPL<I + 5>::kValue, \
TMPL<I + 6>::kValue, TMPL<I + 7>::kValue, \
TMPL<I + 8>::kValue, TMPL<I + 9>::kValue, \
TMPL<I + 10>::kValue, TMPL<I + 11>::kValue, \
TMPL<I + 12>::kValue, TMPL<I + 13>::kValue, \
TMPL<I + 14>::kValue, TMPL<I + 15>::kValue
ASMJIT_API const TypeId::Info TypeId::_info = {
// SizeOf[128]
{
R(TypeIdSizeOf_T, 0), R(TypeIdSizeOf_T, 16),
R(TypeIdSizeOf_T, 32), R(TypeIdSizeOf_T, 48),
R(TypeIdSizeOf_T, 64), R(TypeIdSizeOf_T, 80),
R(TypeIdSizeOf_T, 96), R(TypeIdSizeOf_T, 112)
},
// ElementOf[128]
{
R(TypeIdElementOf_T, 0), R(TypeIdElementOf_T, 16),
R(TypeIdElementOf_T, 32), R(TypeIdElementOf_T, 48),
R(TypeIdElementOf_T, 64), R(TypeIdElementOf_T, 80),
R(TypeIdElementOf_T, 96), R(TypeIdElementOf_T, 112)
}
};
#undef R
// ============================================================================
// [asmjit::Operand - Test]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_operand) {
INFO("Checking operand sizes");
EXPECT(sizeof(Operand) == 16);
EXPECT(sizeof(Reg) == 16);
EXPECT(sizeof(Mem) == 16);
EXPECT(sizeof(Imm) == 16);
EXPECT(sizeof(Label) == 16);
INFO("Checking basic functionality of Operand");
Operand a, b;
Operand dummy;
EXPECT(a.isNone() == true);
EXPECT(a.isReg() == false);
EXPECT(a.isMem() == false);
EXPECT(a.isImm() == false);
EXPECT(a.isLabel() == false);
EXPECT(a == b);
EXPECT(a._any.reserved8_4 == 0, "Default constructed Operand should zero its 'reserved8_4' field");
EXPECT(a._any.reserved12_4 == 0, "Default constructed Operand should zero its 'reserved12_4' field");
INFO("Checking basic functionality of Label");
Label label;
EXPECT(label.isValid() == false);
EXPECT(label.getId() == 0);
INFO("Checking basic functionality of Reg");
EXPECT(Reg().isValid() == false,
"Default constructed Reg() should not be valid");
EXPECT(Reg()._any.reserved8_4 == 0,
"A default constructed Reg() should zero its 'reserved8_4' field");
EXPECT(Reg()._any.reserved12_4 == 0,
"A default constructed Reg() should zero its 'reserved12_4' field");
EXPECT(Reg().isReg() == false,
"Default constructed register should not isReg()");
EXPECT(dummy.as<Reg>().isValid() == false,
"Default constructed Operand casted to Reg should not be valid");
// Create some register (not specific to any architecture).
uint32_t rSig = Operand::kOpReg | (1 << Operand::kSignatureRegTypeShift) |
(2 << Operand::kSignatureRegKindShift) |
(8 << Operand::kSignatureSizeShift ) ;
Reg r1(Reg::fromSignature(rSig, 5));
EXPECT(r1.isValid() == true);
EXPECT(r1.isReg() == true);
EXPECT(r1.isReg(1) == true);
EXPECT(r1.isPhysReg() == true);
EXPECT(r1.isVirtReg() == false);
EXPECT(r1.getSignature() == rSig);
EXPECT(r1.getType() == 1);
EXPECT(r1.getKind() == 2);
EXPECT(r1.getSize() == 8);
EXPECT(r1.getId() == 5);
EXPECT(r1.isReg(1, 5) == true); // RegType and Id.
EXPECT(r1._any.reserved8_4 == 0, "Reg should have 'reserved8_4' zero");
EXPECT(r1._any.reserved12_4 == 0, "Reg should have 'reserved12_4' zero");
// The same type of register having different id.
Reg r2(r1, 6);
EXPECT(r2.isValid() == true);
EXPECT(r2.isReg() == true);
EXPECT(r2.isReg(1) == true);
EXPECT(r2.isPhysReg() == true);
EXPECT(r2.isVirtReg() == false);
EXPECT(r2.getSignature() == rSig);
EXPECT(r2.getType() == r1.getType());
EXPECT(r2.getKind() == r1.getKind());
EXPECT(r2.getSize() == r1.getSize());
EXPECT(r2.getId() == 6);
EXPECT(r2.isReg(1, 6) == true);
r1.reset();
EXPECT(!r1.isValid(),
"Reset register should not be valid");
EXPECT(!r1.isReg(),
"Reset register should not isReg()");
INFO("Checking basic functionality of Mem");
Mem m;
EXPECT(m.isMem() , "Default constructed Mem() should isMem()");
EXPECT(m == Mem() , "Two default constructed Mem() operands should be equal");
EXPECT(m.hasBase() == false , "Default constructed Mem() should not have base specified");
EXPECT(m.hasIndex() == false , "Default constructed Mem() should not have index specified");
EXPECT(m.has64BitOffset() == true , "Default constructed Mem() should report 64-bit offset");
EXPECT(m.getOffset() == 0 , "Default constructed Mem() should have be zero offset / address");
m.setOffset(-1);
EXPECT(m.getOffsetLo32() == -1 , "Memory operand must hold a 32-bit offset");
EXPECT(m.getOffset() == -1 , "32-bit offset must be sign extended to 64 bits");
int64_t x = int64_t(ASMJIT_UINT64_C(0xFF00FF0000000001));
m.setOffset(x);
EXPECT(m.getOffset() == x , "Memory operand must hold a 64-bit offset");
EXPECT(m.getOffsetLo32() == 1 , "Memory operand must return correct low offset DWORD");
EXPECT(m.getOffsetHi32() == 0xFF00FF00, "Memory operand must return correct high offset DWORD");
INFO("Checking basic functionality of Imm");
EXPECT(Imm(-1).getInt64() == int64_t(-1),
"Immediate values should by default sign-extend to 64-bits");
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

File diff suppressed because it is too large Load Diff

228
src/asmjit/base/osutils.cpp Normal file
View File

@@ -0,0 +1,228 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/osutils.h"
#include "../base/utils.h"
#if ASMJIT_OS_POSIX
# include <sys/types.h>
# include <sys/mman.h>
# include <time.h>
# include <unistd.h>
#endif // ASMJIT_OS_POSIX
#if ASMJIT_OS_MAC
# include <mach/mach_time.h>
#endif // ASMJIT_OS_MAC
#if ASMJIT_OS_WINDOWS
# if defined(_MSC_VER) && _MSC_VER >= 1400
# include <intrin.h>
# else
# define _InterlockedCompareExchange InterlockedCompareExchange
# endif // _MSC_VER
#endif // ASMJIT_OS_WINDOWS
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::OSUtils - Virtual Memory]
// ============================================================================
// Windows specific implementation using `VirtualAllocEx` and `VirtualFree`.
#if ASMJIT_OS_WINDOWS
static ASMJIT_NOINLINE const VMemInfo& OSUtils_GetVMemInfo() noexcept {
static VMemInfo vmi;
if (ASMJIT_UNLIKELY(!vmi.hCurrentProcess)) {
SYSTEM_INFO info;
::GetSystemInfo(&info);
vmi.pageSize = Utils::alignToPowerOf2<uint32_t>(info.dwPageSize);
vmi.pageGranularity = info.dwAllocationGranularity;
vmi.hCurrentProcess = ::GetCurrentProcess();
}
return vmi;
};
VMemInfo OSUtils::getVirtualMemoryInfo() noexcept { return OSUtils_GetVMemInfo(); }
void* OSUtils::allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept {
return allocProcessMemory(static_cast<HANDLE>(0), size, allocated, flags);
}
Error OSUtils::releaseVirtualMemory(void* p, size_t size) noexcept {
return releaseProcessMemory(static_cast<HANDLE>(0), p, size);
}
void* OSUtils::allocProcessMemory(HANDLE hProcess, size_t size, size_t* allocated, uint32_t flags) noexcept {
if (size == 0)
return nullptr;
const VMemInfo& vmi = OSUtils_GetVMemInfo();
if (!hProcess) hProcess = vmi.hCurrentProcess;
// VirtualAllocEx rounds the allocated size to a page size automatically,
// but we need the `alignedSize` so we can store the real allocated size
// into `allocated` output.
size_t alignedSize = Utils::alignTo(size, vmi.pageSize);
// Windows XP SP2 / Vista+ allow data-execution-prevention (DEP).
DWORD protectFlags = 0;
if (flags & kVMExecutable)
protectFlags |= (flags & kVMWritable) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
else
protectFlags |= (flags & kVMWritable) ? PAGE_READWRITE : PAGE_READONLY;
LPVOID mBase = ::VirtualAllocEx(hProcess, nullptr, alignedSize, MEM_COMMIT | MEM_RESERVE, protectFlags);
if (ASMJIT_UNLIKELY(!mBase)) return nullptr;
ASMJIT_ASSERT(Utils::isAligned<size_t>(reinterpret_cast<size_t>(mBase), vmi.pageSize));
if (allocated) *allocated = alignedSize;
return mBase;
}
Error OSUtils::releaseProcessMemory(HANDLE hProcess, void* p, size_t size) noexcept {
const VMemInfo& vmi = OSUtils_GetVMemInfo();
if (!hProcess) hProcess = vmi.hCurrentProcess;
if (ASMJIT_UNLIKELY(!::VirtualFreeEx(hProcess, p, 0, MEM_RELEASE)))
return DebugUtils::errored(kErrorInvalidState);
return kErrorOk;
}
#endif // ASMJIT_OS_WINDOWS
// Posix specific implementation using `mmap()` and `munmap()`.
#if ASMJIT_OS_POSIX
// Mac uses MAP_ANON instead of MAP_ANONYMOUS.
#if !defined(MAP_ANONYMOUS)
# define MAP_ANONYMOUS MAP_ANON
#endif // MAP_ANONYMOUS
static const VMemInfo& OSUtils_GetVMemInfo() noexcept {
static VMemInfo vmi;
if (ASMJIT_UNLIKELY(!vmi.pageSize)) {
size_t pageSize = ::getpagesize();
vmi.pageSize = pageSize;
vmi.pageGranularity = std::max<size_t>(pageSize, 65536);
}
return vmi;
};
VMemInfo OSUtils::getVirtualMemoryInfo() noexcept { return OSUtils_GetVMemInfo(); }
void* OSUtils::allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept {
const VMemInfo& vmi = OSUtils_GetVMemInfo();
size_t alignedSize = Utils::alignTo<size_t>(size, vmi.pageSize);
int protection = PROT_READ;
if (flags & kVMWritable ) protection |= PROT_WRITE;
if (flags & kVMExecutable) protection |= PROT_EXEC;
void* mbase = ::mmap(nullptr, alignedSize, protection, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ASMJIT_UNLIKELY(mbase == MAP_FAILED)) return nullptr;
if (allocated) *allocated = alignedSize;
return mbase;
}
Error OSUtils::releaseVirtualMemory(void* p, size_t size) noexcept {
if (ASMJIT_UNLIKELY(::munmap(p, size) != 0))
return DebugUtils::errored(kErrorInvalidState);
return kErrorOk;
}
#endif // ASMJIT_OS_POSIX
// ============================================================================
// [asmjit::OSUtils - GetTickCount]
// ============================================================================
#if ASMJIT_OS_WINDOWS
static ASMJIT_INLINE uint32_t OSUtils_calcHiRes(const LARGE_INTEGER& now, double freq) noexcept {
return static_cast<uint32_t>(
(int64_t)(double(now.QuadPart) / freq) & 0xFFFFFFFF);
}
uint32_t OSUtils::getTickCount() noexcept {
static volatile uint32_t _hiResTicks;
static volatile double _hiResFreq;
do {
uint32_t hiResOk = _hiResTicks;
LARGE_INTEGER qpf, now;
// If for whatever reason this fails, bail to `GetTickCount()`.
if (!::QueryPerformanceCounter(&now)) break;
// Expected - if we ran through this at least once `hiResTicks` will be
// either 1 or 0xFFFFFFFF. If it's '1' then the Hi-Res counter is available
// and `QueryPerformanceCounter()` can be used.
if (hiResOk == 1) return OSUtils_calcHiRes(now, _hiResFreq);
// Hi-Res counter is not available, bail to `GetTickCount()`.
if (hiResOk != 0) break;
// Detect availability of Hi-Res counter, if not available, bail to `GetTickCount()`.
if (!::QueryPerformanceFrequency(&qpf)) {
_InterlockedCompareExchange((LONG*)&_hiResTicks, 0xFFFFFFFF, 0);
break;
}
double freq = double(qpf.QuadPart) / 1000.0;
_hiResFreq = freq;
_InterlockedCompareExchange((LONG*)&_hiResTicks, 1, 0);
return OSUtils_calcHiRes(now, freq);
} while (0);
return ::GetTickCount();
}
#elif ASMJIT_OS_MAC
uint32_t OSUtils::getTickCount() noexcept {
static mach_timebase_info_data_t _machTime;
// See Apple's QA1398.
if (ASMJIT_UNLIKELY(_machTime.denom == 0) || mach_timebase_info(&_machTime) != KERN_SUCCESS)
return 0;
// `mach_absolute_time()` returns nanoseconds, we want milliseconds.
uint64_t t = mach_absolute_time() / 1000000;
t = t * _machTime.numer / _machTime.denom;
return static_cast<uint32_t>(t & 0xFFFFFFFFU);
}
#elif defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
uint32_t OSUtils::getTickCount() noexcept {
struct timespec ts;
if (ASMJIT_UNLIKELY(clock_gettime(CLOCK_MONOTONIC, &ts) != 0))
return 0;
uint64_t t = (uint64_t(ts.tv_sec ) * 1000) + (uint64_t(ts.tv_nsec) / 1000000);
return static_cast<uint32_t>(t & 0xFFFFFFFFU);
}
#else
#error "[asmjit] OSUtils::getTickCount() is not implemented for your target OS."
uint32_t OSUtils::getTickCount() noexcept { return 0; }
#endif
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

178
src/asmjit/base/osutils.h Normal file
View File

@@ -0,0 +1,178 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_OSUTILS_H
#define _ASMJIT_BASE_OSUTILS_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::VMemInfo]
// ============================================================================
//! Information about OS virtual memory.
struct VMemInfo {
#if ASMJIT_OS_WINDOWS
HANDLE hCurrentProcess; //!< Handle of the current process (Windows).
#endif // ASMJIT_OS_WINDOWS
size_t pageSize; //!< Virtual memory page size.
size_t pageGranularity; //!< Virtual memory page granularity.
};
// ============================================================================
// [asmjit::OSUtils]
// ============================================================================
//! OS utilities.
//!
//! Virtual Memory
//! --------------
//!
//! Provides functions to allocate and release virtual memory that is required
//! to execute dynamically generated code. If both processor and host OS support
//! data-execution-prevention (DEP) then the only way to run machine code is to
//! allocate virtual memory that has `OSUtils::kVMExecutable` flag enabled. All
//! functions provides by OSUtils use internally platform specific API.
//!
//! Benchmarking
//! ------------
//!
//! OSUtils also provide a function `getTickCount()` that can be used for
//! benchmarking purposes. It's similar to Windows-only `GetTickCount()`, but
//! it's cross-platform and tries to be the most reliable platform specific
//! calls to make the result usable.
struct OSUtils {
// --------------------------------------------------------------------------
// [Virtual Memory]
// --------------------------------------------------------------------------
//! Virtual memory flags.
ASMJIT_ENUM(VMFlags) {
kVMWritable = 0x00000001U, //!< Virtual memory is writable.
kVMExecutable = 0x00000002U //!< Virtual memory is executable.
};
ASMJIT_API static VMemInfo getVirtualMemoryInfo() noexcept;
//! Allocate virtual memory.
ASMJIT_API static void* allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept;
//! Release virtual memory previously allocated by \ref allocVirtualMemory().
ASMJIT_API static Error releaseVirtualMemory(void* p, size_t size) noexcept;
#if ASMJIT_OS_WINDOWS
//! Allocate virtual memory of `hProcess` (Windows).
ASMJIT_API static void* allocProcessMemory(HANDLE hProcess, size_t size, size_t* allocated, uint32_t flags) noexcept;
//! Release virtual memory of `hProcess` (Windows).
ASMJIT_API static Error releaseProcessMemory(HANDLE hProcess, void* p, size_t size) noexcept;
#endif // ASMJIT_OS_WINDOWS
// --------------------------------------------------------------------------
// [GetTickCount]
// --------------------------------------------------------------------------
//! Get the current CPU tick count, used for benchmarking (1ms resolution).
ASMJIT_API static uint32_t getTickCount() noexcept;
};
// ============================================================================
// [asmjit::Lock]
// ============================================================================
//! \internal
//!
//! Lock.
struct Lock {
ASMJIT_NONCOPYABLE(Lock)
// --------------------------------------------------------------------------
// [Windows]
// --------------------------------------------------------------------------
#if ASMJIT_OS_WINDOWS
typedef CRITICAL_SECTION Handle;
//! Create a new `Lock` instance.
ASMJIT_INLINE Lock() noexcept { InitializeCriticalSection(&_handle); }
//! Destroy the `Lock` instance.
ASMJIT_INLINE ~Lock() noexcept { DeleteCriticalSection(&_handle); }
//! Lock.
ASMJIT_INLINE void lock() noexcept { EnterCriticalSection(&_handle); }
//! Unlock.
ASMJIT_INLINE void unlock() noexcept { LeaveCriticalSection(&_handle); }
#endif // ASMJIT_OS_WINDOWS
// --------------------------------------------------------------------------
// [Posix]
// --------------------------------------------------------------------------
#if ASMJIT_OS_POSIX
typedef pthread_mutex_t Handle;
//! Create a new `Lock` instance.
ASMJIT_INLINE Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
//! Destroy the `Lock` instance.
ASMJIT_INLINE ~Lock() noexcept { pthread_mutex_destroy(&_handle); }
//! Lock.
ASMJIT_INLINE void lock() noexcept { pthread_mutex_lock(&_handle); }
//! Unlock.
ASMJIT_INLINE void unlock() noexcept { pthread_mutex_unlock(&_handle); }
#endif // ASMJIT_OS_POSIX
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Native handle.
Handle _handle;
};
// ============================================================================
// [asmjit::AutoLock]
// ============================================================================
//! \internal
//!
//! Scoped lock.
struct AutoLock {
ASMJIT_NONCOPYABLE(AutoLock)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE AutoLock(Lock& target) noexcept : _target(target) { _target.lock(); }
ASMJIT_INLINE ~AutoLock() noexcept { _target.unlock(); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Reference to the `Lock`.
Lock& _target;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_OSUTILS_H

View File

@@ -1,132 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/podvector.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::PodVectorBase - NullData]
// ============================================================================
const PodVectorBase::Data PodVectorBase::_nullData = { 0, 0 };
static ASMJIT_INLINE bool isDataStatic(PodVectorBase* self, PodVectorBase::Data* d) noexcept {
return (void*)(self + 1) == (void*)d;
}
// ============================================================================
// [asmjit::PodVectorBase - Reset]
// ============================================================================
//! Clear vector data and free internal buffer.
void PodVectorBase::reset(bool releaseMemory) noexcept {
Data* d = _d;
if (d == &_nullData)
return;
if (releaseMemory && !isDataStatic(this, d)) {
ASMJIT_FREE(d);
_d = const_cast<Data*>(&_nullData);
return;
}
d->length = 0;
}
// ============================================================================
// [asmjit::PodVectorBase - Helpers]
// ============================================================================
Error PodVectorBase::_grow(size_t n, size_t sizeOfT) noexcept {
Data* d = _d;
size_t threshold = kMemAllocGrowMax / sizeOfT;
size_t capacity = d->capacity;
size_t after = d->length;
if (IntTraits<size_t>::maxValue() - n < after)
return kErrorNoHeapMemory;
after += n;
if (capacity >= after)
return kErrorOk;
// PodVector is used as a linear array for some data structures used by
// AsmJit code generation. The purpose of this agressive growing schema
// is to minimize memory reallocations, because AsmJit code generation
// classes live short life and will be freed or reused soon.
if (capacity < 32)
capacity = 32;
else if (capacity < 128)
capacity = 128;
else if (capacity < 512)
capacity = 512;
while (capacity < after) {
if (capacity < threshold)
capacity *= 2;
else
capacity += threshold;
}
return _reserve(capacity, sizeOfT);
}
Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) noexcept {
Data* d = _d;
if (d->capacity >= n)
return kErrorOk;
size_t nBytes = sizeof(Data) + n * sizeOfT;
if (ASMJIT_UNLIKELY(nBytes < n))
return kErrorNoHeapMemory;
if (d == &_nullData) {
d = static_cast<Data*>(ASMJIT_ALLOC(nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
d->length = 0;
}
else {
if (isDataStatic(this, d)) {
Data* oldD = d;
d = static_cast<Data*>(ASMJIT_ALLOC(nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
size_t len = oldD->length;
d->length = len;
::memcpy(d->getData(), oldD->getData(), len * sizeOfT);
}
else {
d = static_cast<Data*>(ASMJIT_REALLOC(d, nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
}
}
d->capacity = n;
_d = d;
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

View File

@@ -1,281 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_PODVECTOR_H
#define _ASMJIT_BASE_PODVECTOR_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::PodVectorBase]
// ============================================================================
//! \internal
class PodVectorBase {
public:
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
//! \internal
struct Data {
//! Get data.
ASMJIT_INLINE void* getData() const noexcept {
return static_cast<void*>(const_cast<Data*>(this + 1));
}
//! Capacity of the vector.
size_t capacity;
//! Length of the vector.
size_t length;
};
static ASMJIT_API const Data _nullData;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVectorBase`.
ASMJIT_INLINE PodVectorBase() noexcept : _d(const_cast<Data*>(&_nullData)) {}
//! Destroy the `PodVectorBase` and its data.
ASMJIT_INLINE ~PodVectorBase() noexcept { reset(true); }
protected:
explicit ASMJIT_INLINE PodVectorBase(Data* d) noexcept : _d(d) {}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
public:
//! Reset the vector data and set its `length` to zero.
//!
//! If `releaseMemory` is true the vector buffer will be released to the
//! system.
ASMJIT_API void reset(bool releaseMemory = false) noexcept;
// --------------------------------------------------------------------------
// [Grow / Reserve]
// --------------------------------------------------------------------------
protected:
ASMJIT_API Error _grow(size_t n, size_t sizeOfT) noexcept;
ASMJIT_API Error _reserve(size_t n, size_t sizeOfT) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
public:
Data* _d;
};
// ============================================================================
// [asmjit::PodVector<T>]
// ============================================================================
//! Template used to store and manage array of POD data.
//!
//! This template has these adventages over other vector<> templates:
//! - Non-copyable (designed to be non-copyable, we want it)
//! - No copy-on-write (some implementations of stl can use it)
//! - Optimized for working only with POD types
//! - Uses ASMJIT_... memory management macros
template <typename T>
class PodVector : public PodVectorBase {
public:
ASMJIT_NO_COPY(PodVector<T>)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVector<T>`.
ASMJIT_INLINE PodVector() noexcept {}
//! Destroy the `PodVector<T>` and its data.
ASMJIT_INLINE ~PodVector() noexcept {}
protected:
explicit ASMJIT_INLINE PodVector(Data* d) noexcept : PodVectorBase(d) {}
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
public:
//! Get whether the vector is empty.
ASMJIT_INLINE bool isEmpty() const noexcept { return _d->length == 0; }
//! Get length.
ASMJIT_INLINE size_t getLength() const noexcept { return _d->length; }
//! Get capacity.
ASMJIT_INLINE size_t getCapacity() const noexcept { return _d->capacity; }
//! Get data.
ASMJIT_INLINE T* getData() noexcept { return static_cast<T*>(_d->getData()); }
//! \overload
ASMJIT_INLINE const T* getData() const noexcept { return static_cast<const T*>(_d->getData()); }
// --------------------------------------------------------------------------
// [Grow / Reserve]
// --------------------------------------------------------------------------
//! Called to grow the buffer to fit at least `n` elements more.
ASMJIT_INLINE Error _grow(size_t n) noexcept { return PodVectorBase::_grow(n, sizeof(T)); }
//! Realloc internal array to fit at least `n` items.
ASMJIT_INLINE Error _reserve(size_t n) noexcept { return PodVectorBase::_reserve(n, sizeof(T)); }
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! Prepend `item` to vector.
Error prepend(const T& item) noexcept {
Data* d = _d;
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
_d = d;
}
::memmove(static_cast<T*>(d->getData()) + 1, d->getData(), d->length * sizeof(T));
::memcpy(d->getData(), &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Insert an `item` at the `index`.
Error insert(size_t index, const T& item) noexcept {
Data* d = _d;
ASMJIT_ASSERT(index <= d->length);
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
d = _d;
}
T* dst = static_cast<T*>(d->getData()) + index;
::memmove(dst + 1, dst, d->length - index);
::memcpy(dst, &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Append `item` to vector.
Error append(const T& item) noexcept {
Data* d = _d;
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
d = _d;
}
::memcpy(static_cast<T*>(d->getData()) + d->length, &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Get index of `val` or `kInvalidIndex` if not found.
size_t indexOf(const T& val) const noexcept {
Data* d = _d;
const T* data = static_cast<const T*>(d->getData());
size_t len = d->length;
for (size_t i = 0; i < len; i++)
if (data[i] == val)
return i;
return kInvalidIndex;
}
//! Remove item at index `i`.
void removeAt(size_t i) noexcept {
Data* d = _d;
ASMJIT_ASSERT(i < d->length);
T* data = static_cast<T*>(d->getData()) + i;
d->length--;
::memmove(data, data + 1, d->length - i);
}
//! Swap this pod-vector with `other`.
void swap(PodVector<T>& other) noexcept {
T* otherData = other._d;
other._d = _d;
_d = otherData;
}
//! Get item at index `i`.
ASMJIT_INLINE T& operator[](size_t i) noexcept {
ASMJIT_ASSERT(i < getLength());
return getData()[i];
}
//! Get item at index `i`.
ASMJIT_INLINE const T& operator[](size_t i) const noexcept {
ASMJIT_ASSERT(i < getLength());
return getData()[i];
}
};
// ============================================================================
// [asmjit::PodVectorTmp<T>]
// ============================================================================
template<typename T, size_t N>
class PodVectorTmp : public PodVector<T> {
public:
ASMJIT_NO_COPY(PodVectorTmp<T, N>)
// --------------------------------------------------------------------------
// [StaticData]
// --------------------------------------------------------------------------
struct StaticData : public PodVectorBase::Data {
char data[sizeof(T) * N];
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVectorTmp<T>`.
ASMJIT_INLINE PodVectorTmp() noexcept : PodVector<T>(&_staticData) {
_staticData.capacity = N;
_staticData.length = 0;
}
//! Destroy the `PodVectorTmp<T>` and its data.
ASMJIT_INLINE ~PodVectorTmp() noexcept {}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
StaticData _staticData;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_PODVECTOR_H

View File

@@ -0,0 +1,599 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/regalloc_p.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::RAPass - Construction / Destruction]
// ============================================================================
RAPass::RAPass() noexcept :
CBPass("RA"),
_varMapToVaListOffset(0) {}
RAPass::~RAPass() noexcept {}
// ============================================================================
// [asmjit::RAPass - Interface]
// ============================================================================
Error RAPass::process(Zone* zone) noexcept {
_zone = zone;
_heap.reset(zone);
_emitComments = (cb()->getGlobalOptions() & CodeEmitter::kOptionLoggingEnabled) != 0;
Error err = kErrorOk;
CBNode* node = cc()->getFirstNode();
if (!node) return err;
do {
if (node->getType() == CBNode::kNodeFunc) {
CCFunc* func = static_cast<CCFunc*>(node);
node = func->getEnd();
err = compile(func);
if (err) break;
}
// Find a function by skipping all nodes that are not `kNodeFunc`.
do {
node = node->getNext();
} while (node && node->getType() != CBNode::kNodeFunc);
} while (node);
_heap.reset(nullptr);
_zone = nullptr;
return err;
}
Error RAPass::compile(CCFunc* func) noexcept {
ASMJIT_PROPAGATE(prepare(func));
Error err;
do {
err = fetch();
if (err) break;
err = removeUnreachableCode();
if (err) break;
err = livenessAnalysis();
if (err) break;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (cc()->getGlobalOptions() & CodeEmitter::kOptionLoggingEnabled) {
err = annotate();
if (err) break;
}
#endif // !ASMJIT_DISABLE_LOGGING
err = translate();
} while (false);
cleanup();
// We alter the compiler cursor, because it doesn't make sense to reference
// it after compilation - some nodes may disappear and it's forbidden to add
// new code after the compilation is done.
cc()->_setCursor(nullptr);
return err;
}
Error RAPass::prepare(CCFunc* func) noexcept {
CBNode* end = func->getEnd();
_func = func;
_stop = end->getNext();
_extraBlock = end;
_unreachableList.reset();
_returningList.reset();
_jccList.reset();
_contextVd.reset();
_memVarCells = nullptr;
_memStackCells = nullptr;
_mem1ByteVarsUsed = 0;
_mem2ByteVarsUsed = 0;
_mem4ByteVarsUsed = 0;
_mem8ByteVarsUsed = 0;
_mem16ByteVarsUsed = 0;
_mem32ByteVarsUsed = 0;
_mem64ByteVarsUsed = 0;
_memStackCellsUsed = 0;
_memMaxAlign = 0;
_memVarTotal = 0;
_memStackTotal = 0;
_memAllTotal = 0;
_annotationLength = 12;
return kErrorOk;
}
void RAPass::cleanup() noexcept {
VirtReg** virtArray = _contextVd.getData();
size_t virtCount = _contextVd.getLength();
for (size_t i = 0; i < virtCount; i++) {
VirtReg* vreg = virtArray[i];
vreg->_raId = kInvalidValue;
vreg->resetPhysId();
}
_contextVd.reset();
_extraBlock = nullptr;
}
// ============================================================================
// [asmjit::RAPass - Mem]
// ============================================================================
static ASMJIT_INLINE uint32_t RAGetDefaultAlignment(uint32_t size) {
if (size > 32)
return 64;
else if (size > 16)
return 32;
else if (size > 8)
return 16;
else if (size > 4)
return 8;
else if (size > 2)
return 4;
else if (size > 1)
return 2;
else
return 1;
}
RACell* RAPass::_newVarCell(VirtReg* vreg) {
ASMJIT_ASSERT(vreg->_memCell == nullptr);
RACell* cell;
uint32_t size = vreg->getSize();
if (vreg->isStack()) {
cell = _newStackCell(size, vreg->getAlignment());
if (ASMJIT_UNLIKELY(!cell)) return nullptr;
}
else {
cell = static_cast<RACell*>(_zone->alloc(sizeof(RACell)));
if (!cell) goto _NoMemory;
cell->next = _memVarCells;
cell->offset = 0;
cell->size = size;
cell->alignment = size;
_memVarCells = cell;
_memMaxAlign = std::max<uint32_t>(_memMaxAlign, size);
_memVarTotal += size;
switch (size) {
case 1: _mem1ByteVarsUsed++ ; break;
case 2: _mem2ByteVarsUsed++ ; break;
case 4: _mem4ByteVarsUsed++ ; break;
case 8: _mem8ByteVarsUsed++ ; break;
case 16: _mem16ByteVarsUsed++; break;
case 32: _mem32ByteVarsUsed++; break;
case 64: _mem64ByteVarsUsed++; break;
default:
ASMJIT_NOT_REACHED();
}
}
vreg->_memCell = cell;
return cell;
_NoMemory:
cc()->setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
RACell* RAPass::_newStackCell(uint32_t size, uint32_t alignment) {
RACell* cell = static_cast<RACell*>(_zone->alloc(sizeof(RACell)));
if (ASMJIT_UNLIKELY(!cell)) return nullptr;
if (alignment == 0)
alignment = RAGetDefaultAlignment(size);
if (alignment > 64)
alignment = 64;
ASMJIT_ASSERT(Utils::isPowerOf2(alignment));
size = Utils::alignTo<uint32_t>(size, alignment);
// Insert it sorted according to the alignment and size.
{
RACell** pPrev = &_memStackCells;
RACell* cur = *pPrev;
while (cur && ((cur->alignment > alignment) || (cur->alignment == alignment && cur->size > size))) {
pPrev = &cur->next;
cur = *pPrev;
}
cell->next = cur;
cell->offset = 0;
cell->size = size;
cell->alignment = alignment;
*pPrev = cell;
_memStackCellsUsed++;
_memMaxAlign = std::max<uint32_t>(_memMaxAlign, alignment);
_memStackTotal += size;
}
return cell;
}
Error RAPass::resolveCellOffsets() {
RACell* varCell = _memVarCells;
RACell* stackCell = _memStackCells;
uint32_t stackAlignment = 0;
if (stackCell) stackAlignment = stackCell->alignment;
uint32_t pos64 = 0;
uint32_t pos32 = pos64 + _mem64ByteVarsUsed * 64;
uint32_t pos16 = pos32 + _mem32ByteVarsUsed * 32;
uint32_t pos8 = pos16 + _mem16ByteVarsUsed * 16;
uint32_t pos4 = pos8 + _mem8ByteVarsUsed * 8 ;
uint32_t pos2 = pos4 + _mem4ByteVarsUsed * 4 ;
uint32_t pos1 = pos2 + _mem2ByteVarsUsed * 2 ;
// Assign home slots.
while (varCell) {
uint32_t size = varCell->size;
uint32_t offset = 0;
switch (size) {
case 1: offset = pos1 ; pos1 += 1 ; break;
case 2: offset = pos2 ; pos2 += 2 ; break;
case 4: offset = pos4 ; pos4 += 4 ; break;
case 8: offset = pos8 ; pos8 += 8 ; break;
case 16: offset = pos16; pos16 += 16; break;
case 32: offset = pos32; pos32 += 32; break;
case 64: offset = pos64; pos64 += 64; break;
default:
ASMJIT_NOT_REACHED();
}
varCell->offset = static_cast<int32_t>(offset);
varCell = varCell->next;
}
// Assign stack slots.
uint32_t stackPos = pos1 + _mem1ByteVarsUsed;
while (stackCell) {
uint32_t size = stackCell->size;
uint32_t alignment = stackCell->alignment;
ASMJIT_ASSERT(alignment != 0 && Utils::isPowerOf2(alignment));
stackPos = Utils::alignTo(stackPos, alignment);
stackCell->offset = stackPos;
stackCell = stackCell->next;
stackPos += size;
}
_memAllTotal = stackPos;
return kErrorOk;
}
// ============================================================================
// [asmjit::RAPass - RemoveUnreachableCode]
// ============================================================================
Error RAPass::removeUnreachableCode() {
ZoneList<CBNode*>::Link* link = _unreachableList.getFirst();
CBNode* stop = getStop();
while (link) {
CBNode* node = link->getValue();
if (node && node->getPrev() && node != stop) {
// Locate all unreachable nodes.
CBNode* first = node;
do {
if (node->hasPassData()) break;
node = node->getNext();
} while (node != stop);
// Remove unreachable nodes that are neither informative nor directives.
if (node != first) {
CBNode* end = node;
node = first;
// NOTE: The strategy is as follows:
// 1. The algorithm removes everything until it finds a first label.
// 2. After the first label is found it removes only removable nodes.
bool removeEverything = true;
do {
CBNode* next = node->getNext();
bool remove = node->isRemovable();
if (!remove) {
if (node->isLabel())
removeEverything = false;
remove = removeEverything;
}
if (remove)
cc()->removeNode(node);
node = next;
} while (node != end);
}
}
link = link->getNext();
}
return kErrorOk;
}
// ============================================================================
// [asmjit::RAPass - Liveness Analysis]
// ============================================================================
//! \internal
struct LivenessTarget {
LivenessTarget* prev; //!< Previous target.
CBLabel* node; //!< Target node.
CBJump* from; //!< Jumped from.
};
Error RAPass::livenessAnalysis() {
uint32_t bLen = static_cast<uint32_t>(
((_contextVd.getLength() + RABits::kEntityBits - 1) / RABits::kEntityBits));
// No variables.
if (bLen == 0)
return kErrorOk;
CCFunc* func = getFunc();
CBJump* from = nullptr;
LivenessTarget* ltCur = nullptr;
LivenessTarget* ltUnused = nullptr;
ZoneList<CBNode*>::Link* retPtr = _returningList.getFirst();
ASMJIT_ASSERT(retPtr != nullptr);
CBNode* node = retPtr->getValue();
RAData* wd;
size_t varMapToVaListOffset = _varMapToVaListOffset;
RABits* bCur = newBits(bLen);
if (ASMJIT_UNLIKELY(!bCur)) goto NoMem;
// Allocate bits for code visited first time.
Visit:
for (;;) {
wd = node->getPassData<RAData>();
if (wd->liveness) {
if (bCur->_addBitsDelSource(wd->liveness, bCur, bLen))
goto Patch;
else
goto Done;
}
RABits* bTmp = copyBits(bCur, bLen);
if (!bTmp) goto NoMem;
wd = node->getPassData<RAData>();
wd->liveness = bTmp;
uint32_t tiedTotal = wd->tiedTotal;
TiedReg* tiedArray = reinterpret_cast<TiedReg*>(((uint8_t*)wd) + varMapToVaListOffset);
for (uint32_t i = 0; i < tiedTotal; i++) {
TiedReg* tied = &tiedArray[i];
VirtReg* vreg = tied->vreg;
uint32_t flags = tied->flags;
uint32_t raId = vreg->_raId;
if ((flags & TiedReg::kWAll) && !(flags & TiedReg::kRAll)) {
// Write-Only.
bTmp->setBit(raId);
bCur->delBit(raId);
}
else {
// Read-Only or Read/Write.
bTmp->setBit(raId);
bCur->setBit(raId);
}
}
if (node->getType() == CBNode::kNodeLabel)
goto Target;
if (node == func)
goto Done;
ASMJIT_ASSERT(node->getPrev());
node = node->getPrev();
}
// Patch already generated liveness bits.
Patch:
for (;;) {
ASMJIT_ASSERT(node->hasPassData());
ASMJIT_ASSERT(node->getPassData<RAData>()->liveness != nullptr);
RABits* bNode = node->getPassData<RAData>()->liveness;
if (!bNode->_addBitsDelSource(bCur, bLen)) goto Done;
if (node->getType() == CBNode::kNodeLabel) goto Target;
if (node == func) goto Done;
node = node->getPrev();
}
Target:
if (static_cast<CBLabel*>(node)->getNumRefs() != 0) {
// Push a new LivenessTarget onto the stack if needed.
if (!ltCur || ltCur->node != node) {
// Allocate a new LivenessTarget object (from pool or zone).
LivenessTarget* ltTmp = ltUnused;
if (ltTmp) {
ltUnused = ltUnused->prev;
}
else {
ltTmp = _zone->allocT<LivenessTarget>(
sizeof(LivenessTarget) - sizeof(RABits) + bLen * sizeof(uintptr_t));
if (!ltTmp) goto NoMem;
}
// Initialize and make current - ltTmp->from will be set later on.
ltTmp->prev = ltCur;
ltTmp->node = static_cast<CBLabel*>(node);
ltCur = ltTmp;
from = static_cast<CBLabel*>(node)->getFrom();
ASMJIT_ASSERT(from != nullptr);
}
else {
from = ltCur->from;
goto JumpNext;
}
// Visit/Patch.
do {
ltCur->from = from;
bCur->copyBits(node->getPassData<RAData>()->liveness, bLen);
if (!from->getPassData<RAData>()->liveness) {
node = from;
goto Visit;
}
// Issue #25: Moved 'JumpNext' here since it's important to patch
// code again if there are more live variables than before.
JumpNext:
if (bCur->delBits(from->getPassData<RAData>()->liveness, bLen)) {
node = from;
goto Patch;
}
from = from->getJumpNext();
} while (from);
// Pop the current LivenessTarget from the stack.
{
LivenessTarget* ltTmp = ltCur;
ltCur = ltCur->prev;
ltTmp->prev = ltUnused;
ltUnused = ltTmp;
}
}
bCur->copyBits(node->getPassData<RAData>()->liveness, bLen);
node = node->getPrev();
if (node->isJmp() || !node->hasPassData()) goto Done;
wd = node->getPassData<RAData>();
if (!wd->liveness) goto Visit;
if (bCur->delBits(wd->liveness, bLen)) goto Patch;
Done:
if (ltCur) {
node = ltCur->node;
from = ltCur->from;
goto JumpNext;
}
retPtr = retPtr->getNext();
if (retPtr) {
node = retPtr->getValue();
goto Visit;
}
return kErrorOk;
NoMem:
return DebugUtils::errored(kErrorNoHeapMemory);
}
// ============================================================================
// [asmjit::RAPass - Annotate]
// ============================================================================
Error RAPass::formatInlineComment(StringBuilder& dst, CBNode* node) {
#if !defined(ASMJIT_DISABLE_LOGGING)
RAData* wd = node->getPassData<RAData>();
if (node->hasInlineComment())
dst.appendString(node->getInlineComment());
if (wd && wd->liveness) {
if (dst.getLength() < _annotationLength)
dst.appendChars(' ', _annotationLength - dst.getLength());
uint32_t vdCount = static_cast<uint32_t>(_contextVd.getLength());
size_t offset = dst.getLength() + 1;
dst.appendChar('[');
dst.appendChars(' ', vdCount);
dst.appendChar(']');
RABits* liveness = wd->liveness;
uint32_t i;
for (i = 0; i < vdCount; i++) {
if (liveness->getBit(i))
dst.getData()[offset + i] = '.';
}
uint32_t tiedTotal = wd->tiedTotal;
TiedReg* tiedArray = reinterpret_cast<TiedReg*>(((uint8_t*)wd) + _varMapToVaListOffset);
for (i = 0; i < tiedTotal; i++) {
TiedReg* tied = &tiedArray[i];
VirtReg* vreg = tied->vreg;
uint32_t flags = tied->flags;
char c = 'u';
if ( (flags & TiedReg::kRAll) && !(flags & TiedReg::kWAll)) c = 'r';
if (!(flags & TiedReg::kRAll) && (flags & TiedReg::kWAll)) c = 'w';
if ( (flags & TiedReg::kRAll) && (flags & TiedReg::kWAll)) c = 'x';
// Uppercase if unused.
if ( (flags & TiedReg::kUnuse)) c -= 'a' - 'A';
ASMJIT_ASSERT(offset + vreg->_raId < dst.getLength());
dst._data[offset + vreg->_raId] = c;
}
}
#endif // !ASMJIT_DISABLE_LOGGING
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER

View File

@@ -0,0 +1,574 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_REGALLOC_P_H
#define _ASMJIT_BASE_REGALLOC_P_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/codecompiler.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::TiedReg]
// ============================================================================
//! Tied register (CodeCompiler)
//!
//! Tied register is used to describe one ore more register operands that share
//! the same virtual register. Tied register contains all the data that is
//! essential for register allocation.
struct TiedReg {
//! Flags.
ASMJIT_ENUM(Flags) {
kRReg = 0x00000001U, //!< Register read.
kWReg = 0x00000002U, //!< Register write.
kXReg = 0x00000003U, //!< Register read-write.
kRMem = 0x00000004U, //!< Memory read.
kWMem = 0x00000008U, //!< Memory write.
kXMem = 0x0000000CU, //!< Memory read-write.
kRDecide = 0x00000010U, //!< RA can decide between reg/mem read.
kWDecide = 0x00000020U, //!< RA can decide between reg/mem write.
kXDecide = 0x00000030U, //!< RA can decide between reg/mem read-write.
kRFunc = 0x00000100U, //!< Function argument passed in register.
kWFunc = 0x00000200U, //!< Function return value passed into register.
kXFunc = 0x00000300U, //!< Function argument and return value.
kRCall = 0x00000400U, //!< Function call operand.
kSpill = 0x00000800U, //!< Variable should be spilled.
kUnuse = 0x00001000U, //!< Variable should be unused at the end of the instruction/node.
kRAll = kRReg | kRMem | kRDecide | kRFunc | kRCall, //!< All in-flags.
kWAll = kWReg | kWMem | kWDecide | kWFunc, //!< All out-flags.
kRDone = 0x00400000U, //!< Already allocated on the input.
kWDone = 0x00800000U, //!< Already allocated on the output.
kX86GpbLo = 0x10000000U,
kX86GpbHi = 0x20000000U,
kX86Fld4 = 0x40000000U,
kX86Fld8 = 0x80000000U
};
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void init(VirtReg* vreg, uint32_t flags = 0, uint32_t inRegs = 0, uint32_t allocableRegs = 0) noexcept {
this->vreg = vreg;
this->flags = flags;
this->refCount = 0;
this->inPhysId = Globals::kInvalidRegId;
this->outPhysId = Globals::kInvalidRegId;
this->reserved = 0;
this->inRegs = inRegs;
this->allocableRegs = allocableRegs;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get whether the variable has to be allocated in a specific input register.
ASMJIT_INLINE uint32_t hasInPhysId() const { return inPhysId != Globals::kInvalidRegId; }
//! Get whether the variable has to be allocated in a specific output register.
ASMJIT_INLINE uint32_t hasOutPhysId() const { return outPhysId != Globals::kInvalidRegId; }
//! Set the input register index.
ASMJIT_INLINE void setInPhysId(uint32_t index) { inPhysId = static_cast<uint8_t>(index); }
//! Set the output register index.
ASMJIT_INLINE void setOutPhysId(uint32_t index) { outPhysId = static_cast<uint8_t>(index); }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE TiedReg& operator=(const TiedReg& other) {
::memcpy(this, &other, sizeof(TiedReg));
return *this;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Pointer to the associated \ref VirtReg.
VirtReg* vreg;
//! Tied flags.
uint32_t flags;
union {
struct {
//! How many times the variable is used by the instruction/node.
uint8_t refCount;
//! Input register index or `kInvalidReg` if it's not given.
//!
//! Even if the input register index is not given (i.e. it may by any
//! register), register allocator should assign an index that will be
//! used to persist a variable into this specific index. It's helpful
//! in situations where one variable has to be allocated in multiple
//! registers to determine the register which will be persistent.
uint8_t inPhysId;
//! Output register index or `kInvalidReg` if it's not given.
//!
//! Typically `kInvalidReg` if variable is only used on input.
uint8_t outPhysId;
//! \internal
uint8_t reserved;
};
//! \internal
//!
//! Packed data #0.
uint32_t packed;
};
//! Mandatory input registers.
//!
//! Mandatory input registers are required by the instruction even if
//! there are duplicates. This schema allows us to allocate one variable
//! in one or more register when needed. Required mostly by instructions
//! that have implicit register operands (imul, cpuid, ...) and function
//! call.
uint32_t inRegs;
//! Allocable input registers.
//!
//! Optional input registers is a mask of all allocable registers for a given
//! variable where we have to pick one of them. This mask is usually not used
//! when _inRegs is set. If both masks are used then the register
//! allocator tries first to find an intersection between these and allocates
//! an extra slot if not found.
uint32_t allocableRegs;
};
// ============================================================================
// [asmjit::RABits]
// ============================================================================
//! Fixed size bit-array.
//!
//! Used by variable liveness analysis.
struct RABits {
// --------------------------------------------------------------------------
// [Enums]
// --------------------------------------------------------------------------
enum {
kEntitySize = static_cast<int>(sizeof(uintptr_t)),
kEntityBits = kEntitySize * 8
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uintptr_t getBit(uint32_t index) const noexcept {
return (data[index / kEntityBits] >> (index % kEntityBits)) & 1;
}
ASMJIT_INLINE void setBit(uint32_t index) noexcept {
data[index / kEntityBits] |= static_cast<uintptr_t>(1) << (index % kEntityBits);
}
ASMJIT_INLINE void delBit(uint32_t index) noexcept {
data[index / kEntityBits] &= ~(static_cast<uintptr_t>(1) << (index % kEntityBits));
}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Copy bits from `s0`, returns `true` if at least one bit is set in `s0`.
ASMJIT_INLINE bool copyBits(const RABits* s0, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool addBits(const RABits* s0, uint32_t len) noexcept {
return addBits(this, s0, len);
}
ASMJIT_INLINE bool addBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] | s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool andBits(const RABits* s1, uint32_t len) noexcept {
return andBits(this, s1, len);
}
ASMJIT_INLINE bool andBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool delBits(const RABits* s1, uint32_t len) noexcept {
return delBits(this, s1, len);
}
ASMJIT_INLINE bool delBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & ~s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool _addBitsDelSource(RABits* s1, uint32_t len) noexcept {
return _addBitsDelSource(this, s1, len);
}
ASMJIT_INLINE bool _addBitsDelSource(const RABits* s0, RABits* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t a = s0->data[i];
uintptr_t b = s1->data[i];
this->data[i] = a | b;
b &= ~a;
s1->data[i] = b;
r |= b;
}
return r != 0;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uintptr_t data[1];
};
// ============================================================================
// [asmjit::RACell]
// ============================================================================
//! Register allocator's (RA) memory cell.
struct RACell {
RACell* next; //!< Next active cell.
int32_t offset; //!< Cell offset, relative to base-offset.
uint32_t size; //!< Cell size.
uint32_t alignment; //!< Cell alignment.
};
// ============================================================================
// [asmjit::RAData]
// ============================================================================
//! Register allocator's (RA) data associated with each \ref CBNode.
struct RAData {
ASMJIT_INLINE RAData(uint32_t tiedTotal) noexcept
: liveness(nullptr),
state(nullptr),
tiedTotal(tiedTotal) {}
RABits* liveness; //!< Liveness bits (populated by liveness-analysis).
RAState* state; //!< Optional saved \ref RAState.
uint32_t tiedTotal; //!< Total count of \ref TiedReg regs.
};
// ============================================================================
// [asmjit::RAState]
// ============================================================================
//! Variables' state.
struct RAState {};
// ============================================================================
// [asmjit::RAPass]
// ============================================================================
//! \internal
//!
//! Register allocator pipeline used by \ref CodeCompiler.
struct RAPass : public CBPass {
public:
ASMJIT_NONCOPYABLE(RAPass)
typedef void (ASMJIT_CDECL* TraceNodeFunc)(RAPass* self, CBNode* node_, const char* prefix);
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
RAPass() noexcept;
virtual ~RAPass() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
virtual Error process(Zone* zone) noexcept override;
//! Run the register allocator for a given function `func`.
virtual Error compile(CCFunc* func) noexcept;
//! Called by `compile()` to prepare the register allocator to process the
//! given function. It should reset and set-up everything (i.e. no states
//! from a previous compilation should prevail).
virtual Error prepare(CCFunc* func) noexcept;
//! Called after `compile()` to clean everything up, no matter if it
//! succeeded or failed.
virtual void cleanup() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the associated `CodeCompiler`.
ASMJIT_INLINE CodeCompiler* cc() const noexcept { return static_cast<CodeCompiler*>(_cb); }
//! Get function.
ASMJIT_INLINE CCFunc* getFunc() const noexcept { return _func; }
//! Get stop node.
ASMJIT_INLINE CBNode* getStop() const noexcept { return _stop; }
//! Get extra block.
ASMJIT_INLINE CBNode* getExtraBlock() const noexcept { return _extraBlock; }
//! Set extra block.
ASMJIT_INLINE void setExtraBlock(CBNode* node) noexcept { _extraBlock = node; }
// --------------------------------------------------------------------------
// [State]
// --------------------------------------------------------------------------
//! Get current state.
ASMJIT_INLINE RAState* getState() const { return _state; }
//! Load current state from `target` state.
virtual void loadState(RAState* src) = 0;
//! Save current state, returning new `RAState` instance.
virtual RAState* saveState() = 0;
//! Change the current state to `target` state.
virtual void switchState(RAState* src) = 0;
//! Change the current state to the intersection of two states `a` and `b`.
virtual void intersectStates(RAState* a, RAState* b) = 0;
// --------------------------------------------------------------------------
// [Context]
// --------------------------------------------------------------------------
ASMJIT_INLINE Error assignRAId(VirtReg* vreg) noexcept {
// Likely as a single virtual register would be mostly used more than once,
// this means that each virtual register will hit one bad case (doesn't
// have id) and then all likely cases.
if (ASMJIT_LIKELY(vreg->_raId != kInvalidValue)) return kErrorOk;
uint32_t raId = static_cast<uint32_t>(_contextVd.getLength());
ASMJIT_PROPAGATE(_contextVd.append(&_heap, vreg));
vreg->_raId = raId;
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Mem]
// --------------------------------------------------------------------------
RACell* _newVarCell(VirtReg* vreg);
RACell* _newStackCell(uint32_t size, uint32_t alignment);
ASMJIT_INLINE RACell* getVarCell(VirtReg* vreg) {
RACell* cell = vreg->getMemCell();
return cell ? cell : _newVarCell(vreg);
}
virtual Error resolveCellOffsets();
// --------------------------------------------------------------------------
// [Bits]
// --------------------------------------------------------------------------
ASMJIT_INLINE RABits* newBits(uint32_t len) {
return static_cast<RABits*>(
_zone->allocZeroed(static_cast<size_t>(len) * RABits::kEntitySize));
}
ASMJIT_INLINE RABits* copyBits(const RABits* src, uint32_t len) {
return static_cast<RABits*>(
_zone->dup(src, static_cast<size_t>(len) * RABits::kEntitySize));
}
// --------------------------------------------------------------------------
// [Fetch]
// --------------------------------------------------------------------------
//! Fetch.
//!
//! Fetch iterates over all nodes and gathers information about all variables
//! used. The process generates information required by register allocator,
//! variable liveness analysis and translator.
virtual Error fetch() = 0;
// --------------------------------------------------------------------------
// [Unreachable Code]
// --------------------------------------------------------------------------
//! Add unreachable-flow data to the unreachable flow list.
ASMJIT_INLINE Error addUnreachableNode(CBNode* node) {
ZoneList<CBNode*>::Link* link = _zone->allocT<ZoneList<CBNode*>::Link>();
if (!link) return DebugUtils::errored(kErrorNoHeapMemory);
link->setValue(node);
_unreachableList.append(link);
return kErrorOk;
}
//! Remove unreachable code.
virtual Error removeUnreachableCode();
// --------------------------------------------------------------------------
// [Code-Flow]
// --------------------------------------------------------------------------
//! Add returning node (i.e. node that returns and where liveness analysis
//! should start).
ASMJIT_INLINE Error addReturningNode(CBNode* node) {
ZoneList<CBNode*>::Link* link = _zone->allocT<ZoneList<CBNode*>::Link>();
if (!link) return DebugUtils::errored(kErrorNoHeapMemory);
link->setValue(node);
_returningList.append(link);
return kErrorOk;
}
//! Add jump-flow data to the jcc flow list.
ASMJIT_INLINE Error addJccNode(CBNode* node) {
ZoneList<CBNode*>::Link* link = _zone->allocT<ZoneList<CBNode*>::Link>();
if (!link) return DebugUtils::errored(kErrorNoHeapMemory);
link->setValue(node);
_jccList.append(link);
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Analyze]
// --------------------------------------------------------------------------
//! Perform variable liveness analysis.
//!
//! Analysis phase iterates over nodes in reverse order and generates a bit
//! array describing variables that are alive at every node in the function.
//! When the analysis start all variables are assumed dead. When a read or
//! read/write operations of a variable is detected the variable becomes
//! alive; when only write operation is detected the variable becomes dead.
//!
//! When a label is found all jumps to that label are followed and analysis
//! repeats until all variables are resolved.
virtual Error livenessAnalysis();
// --------------------------------------------------------------------------
// [Annotate]
// --------------------------------------------------------------------------
virtual Error annotate() = 0;
virtual Error formatInlineComment(StringBuilder& dst, CBNode* node);
// --------------------------------------------------------------------------
// [Translate]
// --------------------------------------------------------------------------
//! Translate code by allocating registers and handling state changes.
virtual Error translate() = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Zone* _zone; //!< Zone passed to `process()`.
ZoneHeap _heap; //!< ZoneHeap that uses `_zone`.
CCFunc* _func; //!< Function being processed.
CBNode* _stop; //!< Stop node.
CBNode* _extraBlock; //!< Node that is used to insert extra code after the function body.
//! \internal
//!
//! Offset (how many bytes to add) to `VarMap` to get `TiedReg` array. Used
//! by liveness analysis shared across all backends. This is needed because
//! `VarMap` is a base class for a specialized version that liveness analysis
//! doesn't use, it just needs `TiedReg` array.
uint32_t _varMapToVaListOffset;
uint8_t _emitComments; //!< Whether to emit comments.
ZoneList<CBNode*> _unreachableList; //!< Unreachable nodes.
ZoneList<CBNode*> _returningList; //!< Returning nodes.
ZoneList<CBNode*> _jccList; //!< Jump nodes.
ZoneVector<VirtReg*> _contextVd; //!< All variables used by the current function.
RACell* _memVarCells; //!< Memory used to spill variables.
RACell* _memStackCells; //!< Memory used to allocate memory on the stack.
uint32_t _mem1ByteVarsUsed; //!< Count of 1-byte cells.
uint32_t _mem2ByteVarsUsed; //!< Count of 2-byte cells.
uint32_t _mem4ByteVarsUsed; //!< Count of 4-byte cells.
uint32_t _mem8ByteVarsUsed; //!< Count of 8-byte cells.
uint32_t _mem16ByteVarsUsed; //!< Count of 16-byte cells.
uint32_t _mem32ByteVarsUsed; //!< Count of 32-byte cells.
uint32_t _mem64ByteVarsUsed; //!< Count of 64-byte cells.
uint32_t _memStackCellsUsed; //!< Count of stack memory cells.
uint32_t _memMaxAlign; //!< Maximum memory alignment used by the function.
uint32_t _memVarTotal; //!< Count of bytes used by variables.
uint32_t _memStackTotal; //!< Count of bytes used by stack.
uint32_t _memAllTotal; //!< Count of bytes used by variables and stack after alignment.
uint32_t _annotationLength; //!< Default length of an annotated instruction.
RAState* _state; //!< Current RA state.
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_REGALLOC_P_H

View File

@@ -9,52 +9,15 @@
// [Dependencies]
#include "../base/assembler.h"
#include "../base/cpuinfo.h"
#include "../base/runtime.h"
// TODO: Rename this, or make call conv independent of CompilerFunc.
#include "../base/compilerfunc.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Runtime - Utilities]
// ============================================================================
static ASMJIT_INLINE uint32_t hostStackAlignment() noexcept {
// By default a pointer-size stack alignment is assumed.
uint32_t alignment = sizeof(intptr_t);
// ARM & ARM64
// -----------
//
// - 32-bit ARM requires stack to be aligned to 8 bytes.
// - 64-bit ARM requires stack to be aligned to 16 bytes.
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
alignment = ASMJIT_ARCH_ARM32 ? 8 : 16;
#endif
// X86 & X64
// ---------
//
// - 32-bit X86 requires stack to be aligned to 4 bytes. Modern Linux, APPLE
// and UNIX guarantees 16-byte stack alignment even in 32-bit, but I'm
// not sure about all other UNIX operating systems, because 16-byte alignment
// is addition to an older specification.
// - 64-bit X86 requires stack to be aligned to 16 bytes.
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
int modernOS = ASMJIT_OS_LINUX || // Linux & ANDROID.
ASMJIT_OS_MAC || // OSX and iOS.
ASMJIT_OS_BSD; // BSD variants.
alignment = ASMJIT_ARCH_X64 || modernOS ? 16 : 4;
#endif
return alignment;
}
static ASMJIT_INLINE void hostFlushInstructionCache(void* p, size_t size) noexcept {
static ASMJIT_INLINE void hostFlushInstructionCache(const void* p, size_t size) noexcept {
// Only useful on non-x86 architectures.
#if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
# if ASMJIT_OS_WINDOWS
@@ -67,22 +30,46 @@ static ASMJIT_INLINE void hostFlushInstructionCache(void* p, size_t size) noexce
#endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
}
static ASMJIT_INLINE uint32_t hostDetectNaturalStackAlignment() noexcept {
// Alignment is assumed to match the pointer-size by default.
uint32_t alignment = sizeof(intptr_t);
// X86 & X64
// ---------
//
// - 32-bit X86 requires stack to be aligned to 4 bytes. Modern Linux, Mac
// and UNIX guarantees 16-byte stack alignment even on 32-bit. I'm not
// sure about all other UNIX operating systems, because 16-byte alignment
//! is addition to an older specification.
// - 64-bit X86 requires stack to be aligned to at least 16 bytes.
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
int kIsModernOS = ASMJIT_OS_LINUX || // Linux & ANDROID.
ASMJIT_OS_MAC || // OSX and iOS.
ASMJIT_OS_BSD ; // BSD variants.
alignment = ASMJIT_ARCH_X64 || kIsModernOS ? 16 : 4;
#endif
// ARM32 & ARM64
// -------------
//
// - 32-bit ARM requires stack to be aligned to 8 bytes.
// - 64-bit ARM requires stack to be aligned to 16 bytes.
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
alignment = ASMJIT_ARCH_ARM32 ? 8 : 16;
#endif
return alignment;
}
// ============================================================================
// [asmjit::Runtime - Construction / Destruction]
// ============================================================================
Runtime::Runtime() noexcept
: _runtimeType(kTypeNone),
_allocType(kVMemAllocFreeable),
_cpuInfo(),
_stackAlignment(0),
_cdeclConv(kCallConvNone),
_stdCallConv(kCallConvNone),
_baseAddress(kNoBaseAddress),
_sizeLimit(0) {
::memset(_reserved, 0, sizeof(_reserved));
}
: _codeInfo(),
_runtimeType(kRuntimeNone),
_allocType(VMemMgr::kAllocFreeable) {}
Runtime::~Runtime() noexcept {}
// ============================================================================
@@ -90,12 +77,14 @@ Runtime::~Runtime() noexcept {}
// ============================================================================
HostRuntime::HostRuntime() noexcept {
_runtimeType = kTypeJit;
_cpuInfo = CpuInfo::getHost();
_runtimeType = kRuntimeJit;
_stackAlignment = hostStackAlignment();
_cdeclConv = kCallConvHostCDecl;
_stdCallConv = kCallConvHostStdCall;
// Setup the CodeInfo of this Runtime.
_codeInfo._archInfo = CpuInfo::getHost().getArchInfo();
_codeInfo._stackAlignment = static_cast<uint8_t>(hostDetectNaturalStackAlignment());
_codeInfo._cdeclCallConv = CallConv::kIdHostCDecl;
_codeInfo._stdCallConv = CallConv::kIdHostStdCall;
_codeInfo._fastCallConv = CallConv::kIdHostFastCall;
}
HostRuntime::~HostRuntime() noexcept {}
@@ -103,66 +92,10 @@ HostRuntime::~HostRuntime() noexcept {}
// [asmjit::HostRuntime - Interface]
// ============================================================================
void HostRuntime::flush(void* p, size_t size) noexcept {
void HostRuntime::flush(const void* p, size_t size) noexcept {
hostFlushInstructionCache(p, size);
}
// ============================================================================
// [asmjit::StaticRuntime - Construction / Destruction]
// ============================================================================
StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) noexcept {
_sizeLimit = sizeLimit;
_baseAddress = static_cast<Ptr>((uintptr_t)baseAddress);
}
StaticRuntime::~StaticRuntime() noexcept {}
// ============================================================================
// [asmjit::StaticRuntime - Interface]
// ============================================================================
Error StaticRuntime::add(void** dst, Assembler* assembler) noexcept {
size_t codeSize = assembler->getCodeSize();
size_t sizeLimit = _sizeLimit;
if (codeSize == 0) {
*dst = nullptr;
return kErrorNoCodeGenerated;
}
if (sizeLimit != 0 && sizeLimit < codeSize) {
*dst = nullptr;
return kErrorCodeTooLarge;
}
Ptr baseAddress = _baseAddress;
uint8_t* p = static_cast<uint8_t*>((void*)static_cast<uintptr_t>(baseAddress));
// Since the base address is known the `relocSize` returned should be equal
// to `codeSize`. It's better to fail if they don't match instead of passsing
// silently.
size_t relocSize = assembler->relocCode(p, baseAddress);
if (relocSize == 0 || codeSize != relocSize) {
*dst = nullptr;
return kErrorInvalidState;
}
_baseAddress += codeSize;
if (sizeLimit)
sizeLimit -= codeSize;
flush(p, codeSize);
*dst = p;
return kErrorOk;
}
Error StaticRuntime::release(void* p) noexcept {
// There is nothing to release as `StaticRuntime` doesn't manage any memory.
ASMJIT_UNUSED(p);
return kErrorOk;
}
// ============================================================================
// [asmjit::JitRuntime - Construction / Destruction]
// ============================================================================
@@ -174,25 +107,25 @@ JitRuntime::~JitRuntime() noexcept {}
// [asmjit::JitRuntime - Interface]
// ============================================================================
Error JitRuntime::add(void** dst, Assembler* assembler) noexcept {
size_t codeSize = assembler->getCodeSize();
if (codeSize == 0) {
Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
size_t codeSize = code->getCodeSize();
if (ASMJIT_UNLIKELY(codeSize == 0)) {
*dst = nullptr;
return kErrorNoCodeGenerated;
return DebugUtils::errored(kErrorNoCodeGenerated);
}
void* p = _memMgr.alloc(codeSize, getAllocType());
if (p == nullptr) {
if (ASMJIT_UNLIKELY(!p)) {
*dst = nullptr;
return kErrorNoVirtualMemory;
return DebugUtils::errored(kErrorNoVirtualMemory);
}
// Relocate the code and release the unused memory back to `VMemMgr`.
size_t relocSize = assembler->relocCode(p);
if (relocSize == 0) {
size_t relocSize = code->relocate(p);
if (ASMJIT_UNLIKELY(relocSize == 0)) {
*dst = nullptr;
_memMgr.release(p);
return kErrorInvalidState;
return DebugUtils::errored(kErrorInvalidState);
}
if (relocSize < codeSize)
@@ -204,11 +137,11 @@ Error JitRuntime::add(void** dst, Assembler* assembler) noexcept {
return kErrorOk;
}
Error JitRuntime::release(void* p) noexcept {
Error JitRuntime::_release(void* p) noexcept {
return _memMgr.release(p);
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

View File

@@ -9,11 +9,11 @@
#define _ASMJIT_BASE_RUNTIME_H
// [Dependencies]
#include "../base/cpuinfo.h"
#include "../base/codeholder.h"
#include "../base/vmem.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
@@ -21,8 +21,7 @@ namespace asmjit {
// [Forward Declarations]
// ============================================================================
class Assembler;
class CpuInfo;
class CodeHolder;
//! \addtogroup asmjit_base
//! \{
@@ -33,17 +32,13 @@ class CpuInfo;
//! Base runtime.
class ASMJIT_VIRTAPI Runtime {
public:
ASMJIT_NO_COPY(Runtime)
public:
ASMJIT_NONCOPYABLE(Runtime)
// --------------------------------------------------------------------------
// [asmjit::RuntimeType]
// --------------------------------------------------------------------------
ASMJIT_ENUM(Type) {
kTypeNone = 0,
kTypeJit = 1,
kTypeRemote = 2
ASMJIT_ENUM(RuntimeType) {
kRuntimeNone = 0,
kRuntimeJit = 1,
kRuntimeRemote = 2
};
// --------------------------------------------------------------------------
@@ -59,83 +54,66 @@ class ASMJIT_VIRTAPI Runtime {
// [Accessors]
// --------------------------------------------------------------------------
//! Get CodeInfo of this runtime.
//!
//! CodeInfo can be used to setup a CodeHolder in case you plan to generate a
//! code compatible and executable by this Runtime.
ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; }
//! Get the Runtime's architecture type, see \ref ArchInfo::Type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return _codeInfo.getArchType(); }
//! Get the Runtime's architecture sub-type, see \ref ArchInfo::SubType.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _codeInfo.getArchSubType(); }
//! Get the runtime type, see \ref Type.
ASMJIT_INLINE uint32_t getRuntimeType() const noexcept { return _runtimeType; }
//! Get stack alignment of the target.
ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; }
//! Get the CDECL calling convention conforming to the runtime's ABI.
//!
//! NOTE: This is a default calling convention used by the runtime's target.
ASMJIT_INLINE uint32_t getCdeclConv() const noexcept { return _cdeclConv; }
//! Get the STDCALL calling convention conforming to the runtime's ABI.
//!
//! NOTE: STDCALL calling convention is only used by 32-bit x86 target. On
//! all other targets it's mapped to CDECL and calling `getStdcallConv()` will
//! return the same as `getCdeclConv()`.
ASMJIT_INLINE uint32_t getStdCallConv() const noexcept { return _stdCallConv; }
//! Get CPU information.
ASMJIT_INLINE const CpuInfo& getCpuInfo() const noexcept { return _cpuInfo; }
//! Set CPU information.
ASMJIT_INLINE void setCpuInfo(const CpuInfo& ci) noexcept { _cpuInfo = ci; }
//! Get whether the runtime has a base address.
ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _baseAddress != kNoBaseAddress; }
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const noexcept { return _baseAddress; }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Allocate a memory needed for a code generated by `assembler` and
// NOTE: To allow passing function pointers to `add()` and `release()` the
// virtual methods are prefixed with `_` and called from templates.
template<typename Func>
ASMJIT_INLINE Error add(Func* dst, CodeHolder* code) noexcept {
return _add(Internal::ptr_cast<void**, Func*>(dst), code);
}
template<typename Func>
ASMJIT_INLINE Error release(Func dst) noexcept {
return _release(Internal::ptr_cast<void*, Func>(dst));
}
//! Allocate a memory needed for a code stored in the \ref CodeHolder and
//! relocate it to the target location.
//!
//! The beginning of the memory allocated for the function is returned in
//! `dst`. Returns Status code as \ref ErrorCode, on failure `dst` is set to
//! `nullptr`.
virtual Error add(void** dst, Assembler* assembler) noexcept = 0;
//! `dst`. If failed the \ref Error code is returned and `dst` is set to null
//! (this means that you don't have to set it to null before calling `add()`).
virtual Error _add(void** dst, CodeHolder* code) noexcept = 0;
//! Release memory allocated by `add`.
virtual Error release(void* p) noexcept = 0;
//! Release `p` allocated by `add()`.
virtual Error _release(void* p) noexcept = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Type of the runtime.
uint8_t _runtimeType;
//! Type of the allocation.
uint8_t _allocType;
//! Runtime's stack alignment.
uint8_t _stackAlignment;
//! CDECL calling convention conforming to runtime ABI.
uint8_t _cdeclConv;
//! STDCALL calling convention conforming to runtime ABI.
uint8_t _stdCallConv;
//! \internal
uint8_t _reserved[3];
//! Runtime CPU information.
CpuInfo _cpuInfo;
//! Base address (-1 means no base address).
Ptr _baseAddress;
//! Maximum size of the code that can be added to the runtime (0=unlimited).
size_t _sizeLimit;
CodeInfo _codeInfo; //!< Basic information about the Runtime's code.
uint8_t _runtimeType; //!< Type of the runtime.
uint8_t _allocType; //!< Type of the allocator the Runtime uses.
uint8_t _reserved[6]; //!< \internal
};
// ============================================================================
// [asmjit::HostRuntime]
// ============================================================================
//! Base runtime for JIT code generation.
//! Runtime designed to be used in the same process the code is generated in.
class ASMJIT_VIRTAPI HostRuntime : public Runtime {
public:
ASMJIT_NO_COPY(HostRuntime)
public:
ASMJIT_NONCOPYABLE(HostRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
@@ -154,70 +132,24 @@ class ASMJIT_VIRTAPI HostRuntime : public Runtime {
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes a flush of the processor cache.
//! causes a flush of the processor's cache.
//!
//! Flushing is basically a NOP under X86/X64, but is needed by architectures
//! that do not have a transparent instruction cache.
//! that do not have a transparent instruction cache like ARM.
//!
//! This function can also be overridden to improve compatibility with tools
//! such as Valgrind, however, it's not an official part of AsmJit.
ASMJIT_API virtual void flush(void* p, size_t size) noexcept;
};
// ============================================================================
// [asmjit::StaticRuntime]
// ============================================================================
//! JIT static runtime.
//!
//! JIT static runtime can be used to generate code to a memory location that
//! is known.
class ASMJIT_VIRTAPI StaticRuntime : public HostRuntime {
public:
ASMJIT_NO_COPY(StaticRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `StaticRuntime` instance.
//!
//! The `address` specifies a fixed target address, which will be used as a
//! base address for relocation, and `sizeLimit` specifies the maximum size
//! of a code that can be copied to it. If there is no limit `sizeLimit`
//! should be zero.
ASMJIT_API StaticRuntime(void* baseAddress, size_t sizeLimit = 0) noexcept;
//! Destroy the `StaticRuntime` instance.
ASMJIT_API virtual ~StaticRuntime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const noexcept { return _baseAddress; }
//! Get the maximum size of the code that can be relocated/stored in the target.
//!
//! Returns zero if unlimited.
ASMJIT_INLINE size_t getSizeLimit() const noexcept { return _sizeLimit; }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error add(void** dst, Assembler* assembler) noexcept;
ASMJIT_API virtual Error release(void* p) noexcept;
ASMJIT_API virtual void flush(const void* p, size_t size) noexcept;
};
// ============================================================================
// [asmjit::JitRuntime]
// ============================================================================
//! JIT runtime.
//! Runtime designed to store and execute code generated at runtime (JIT).
class ASMJIT_VIRTAPI JitRuntime : public HostRuntime {
public:
ASMJIT_NO_COPY(JitRuntime)
public:
ASMJIT_NONCOPYABLE(JitRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
@@ -244,8 +176,8 @@ class ASMJIT_VIRTAPI JitRuntime : public HostRuntime {
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error add(void** dst, Assembler* assembler) noexcept;
ASMJIT_API virtual Error release(void* p) noexcept;
ASMJIT_API Error _add(void** dst, CodeHolder* code) noexcept override;
ASMJIT_API Error _release(void* p) noexcept override;
// --------------------------------------------------------------------------
// [Members]
@@ -260,7 +192,7 @@ class ASMJIT_VIRTAPI JitRuntime : public HostRuntime {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_RUNTIME_H

File diff suppressed because it is too large Load Diff

View File

@@ -8,11 +8,11 @@
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/containers.h"
#include "../base/string.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
@@ -31,20 +31,16 @@ StringBuilder::StringBuilder() noexcept
StringBuilder::~StringBuilder() noexcept {
if (_canFree)
ASMJIT_FREE(_data);
Internal::releaseMemory(_data);
}
// ============================================================================
// [asmjit::StringBuilder - Prepare / Reserve]
// ============================================================================
char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
ASMJIT_FAVOR_SIZE char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
if (op == kStringOpSet) {
// We don't care here, but we can't return a NULL pointer since it indicates
// We don't care here, but we can't return a null pointer since it indicates
// failure in memory allocation.
if (len == 0) {
if (_data != StringBuilder_empty)
@@ -62,14 +58,14 @@ char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
if (to < 256 - sizeof(intptr_t))
to = 256 - sizeof(intptr_t);
char* newData = static_cast<char*>(ASMJIT_ALLOC(to + sizeof(intptr_t)));
if (newData == nullptr) {
char* newData = static_cast<char*>(Internal::allocMemory(to + sizeof(intptr_t)));
if (!newData) {
clear();
return nullptr;
}
if (_canFree)
ASMJIT_FREE(_data);
Internal::releaseMemory(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
@@ -82,14 +78,9 @@ char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
ASMJIT_ASSERT(_length <= _capacity);
return _data;
}
// --------------------------------------------------------------------------
// [Append]
// --------------------------------------------------------------------------
else {
// We don't care here, but we can't return a nullptr pointer since it indicates
// failure in memory allocation.
// We don't care here, but we can't return a null pointer since it indicates
// failure of memory allocation.
if (len == 0)
return _data + _length;
@@ -114,14 +105,12 @@ char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
}
to = Utils::alignTo<size_t>(to, sizeof(intptr_t));
char* newData = static_cast<char*>(ASMJIT_ALLOC(to + sizeof(intptr_t)));
if (newData == nullptr)
return nullptr;
char* newData = static_cast<char*>(Internal::allocMemory(to + sizeof(intptr_t)));
if (!newData) return nullptr;
::memcpy(newData, _data, _length);
if (_canFree)
ASMJIT_FREE(_data);
Internal::releaseMemory(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
@@ -137,27 +126,27 @@ char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
}
}
bool StringBuilder::reserve(size_t to) noexcept {
ASMJIT_FAVOR_SIZE Error StringBuilder::reserve(size_t to) noexcept {
if (_capacity >= to)
return true;
return kErrorOk;
if (to >= IntTraits<size_t>::maxValue() - sizeof(intptr_t) * 2)
return false;
return DebugUtils::errored(kErrorNoHeapMemory);
to = Utils::alignTo<size_t>(to, sizeof(intptr_t));
char* newData = static_cast<char*>(Internal::allocMemory(to + sizeof(intptr_t)));
char* newData = static_cast<char*>(ASMJIT_ALLOC(to + sizeof(intptr_t)));
if (newData == nullptr)
return false;
if (!newData)
return DebugUtils::errored(kErrorNoHeapMemory);
::memcpy(newData, _data, _length + 1);
if (_canFree)
ASMJIT_FREE(_data);
Internal::releaseMemory(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
_canFree = true;
return true;
return kErrorOk;
}
// ============================================================================
@@ -174,39 +163,36 @@ void StringBuilder::clear() noexcept {
// [asmjit::StringBuilder - Methods]
// ============================================================================
bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) noexcept {
if (len == kInvalidIndex)
len = str != nullptr ? ::strlen(str) : static_cast<size_t>(0);
Error StringBuilder::_opString(uint32_t op, const char* str, size_t len) noexcept {
if (len == Globals::kInvalidIndex)
len = str ? ::strlen(str) : static_cast<size_t>(0);
char* p = prepare(op, len);
if (p == nullptr)
return false;
if (!p) return DebugUtils::errored(kErrorNoHeapMemory);
::memcpy(p, str, len);
return true;
return kErrorOk;
}
bool StringBuilder::_opChar(uint32_t op, char c) noexcept {
Error StringBuilder::_opChar(uint32_t op, char c) noexcept {
char* p = prepare(op, 1);
if (p == nullptr)
return false;
if (!p) return DebugUtils::errored(kErrorNoHeapMemory);
*p = c;
return true;
return kErrorOk;
}
bool StringBuilder::_opChars(uint32_t op, char c, size_t len) noexcept {
char* p = prepare(op, len);
if (p == nullptr)
return false;
Error StringBuilder::_opChars(uint32_t op, char c, size_t n) noexcept {
char* p = prepare(op, n);
if (!p) return DebugUtils::errored(kErrorNoHeapMemory);
::memset(p, c, len);
return true;
::memset(p, c, n);
return kErrorOk;
}
static const char StringBuilder_numbers[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
Error StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
if (base < 2 || base > 36)
base = 10;
@@ -282,8 +268,8 @@ bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t wid
size_t prefixLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberLength;
char* data = prepare(op, prefixLength + width + numberLength);
if (data == nullptr)
return false;
if (!data)
return DebugUtils::errored(kErrorNoHeapMemory);
::memcpy(data, p, prefixLength);
data += prefixLength;
@@ -292,28 +278,24 @@ bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t wid
data += width;
::memcpy(data, p + prefixLength, numberLength);
return true;
return kErrorOk;
}
bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) noexcept {
if (len >= IntTraits<size_t>::maxValue() / 2)
return false;
Error StringBuilder::_opHex(uint32_t op, const void* data, size_t len) noexcept {
char* dst;
char* dst = prepare(op, len * 2);
if (dst == nullptr)
return false;
if (len >= IntTraits<size_t>::maxValue() / 2 || !(dst = prepare(op, len * 2)))
return DebugUtils::errored(kErrorNoHeapMemory);;
const char* src = static_cast<const char*>(data);
for (size_t i = 0; i < len; i++, dst += 2, src += 1)
{
for (size_t i = 0; i < len; i++, dst += 2, src++) {
dst[0] = StringBuilder_numbers[(src[0] >> 4) & 0xF];
dst[1] = StringBuilder_numbers[(src[0] ) & 0xF];
}
return true;
return kErrorOk;
}
bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
Error StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
char buf[1024];
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
@@ -322,7 +304,7 @@ bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcep
return _opString(op, buf);
}
bool StringBuilder::setFormat(const char* fmt, ...) noexcept {
Error StringBuilder::setFormat(const char* fmt, ...) noexcept {
bool result;
va_list ap;
@@ -333,7 +315,7 @@ bool StringBuilder::setFormat(const char* fmt, ...) noexcept {
return result;
}
bool StringBuilder::appendFormat(const char* fmt, ...) noexcept {
Error StringBuilder::appendFormat(const char* fmt, ...) noexcept {
bool result;
va_list ap;
@@ -351,19 +333,16 @@ bool StringBuilder::eq(const char* str, size_t len) const noexcept {
size_t aLength = _length;
size_t bLength = len;
if (bLength == kInvalidIndex) {
if (bLength == Globals::kInvalidIndex) {
size_t i;
for (i = 0; i < aLength; i++) {
for (i = 0; i < aLength; i++)
if (aData[i] != bData[i] || bData[i] == 0)
return false;
}
return bData[i] == 0;
}
else {
if (aLength != bLength)
return false;
return ::memcmp(aData, bData, aLength) == 0;
}
}
@@ -371,4 +350,4 @@ bool StringBuilder::eq(const char* str, size_t len) const noexcept {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

289
src/asmjit/base/string.h Normal file
View File

@@ -0,0 +1,289 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_STRING_H
#define _ASMJIT_BASE_STRING_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::SmallString]
// ============================================================================
//! Small string is a template that helps to create strings that can be either
//! statically allocated if they are small, or externally allocated in case
//! their length exceed the limit. The `WholeSize` represents the size of the
//! whole `SmallString` structure, based on that size the maximum size of the
//! internal buffer is determined.
template<size_t WholeSize>
class SmallString {
public:
enum { kMaxEmbeddedLength = WholeSize - 5 };
ASMJIT_INLINE SmallString() noexcept { reset(); }
ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); }
ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; }
ASMJIT_INLINE bool isEmbedded() const noexcept { return _length <= kMaxEmbeddedLength; }
ASMJIT_INLINE bool mustEmbed(size_t len) const noexcept { return len <= kMaxEmbeddedLength; }
ASMJIT_INLINE uint32_t getLength() const noexcept { return _length; }
ASMJIT_INLINE char* getData() const noexcept {
return _length <= kMaxEmbeddedLength ? const_cast<char*>(_embedded) : _external[1];
}
ASMJIT_INLINE void setEmbedded(const char* data, size_t len) noexcept {
ASMJIT_ASSERT(len <= kMaxEmbeddedLength);
_length = static_cast<uint32_t>(len);
::memcpy(_embedded, data, len);
_embedded[len] = '\0';
}
ASMJIT_INLINE void setExternal(const char* data, size_t len) noexcept {
ASMJIT_ASSERT(len > kMaxEmbeddedLength);
ASMJIT_ASSERT(len <= ~static_cast<uint32_t>(0));
_length = static_cast<uint32_t>(len);
_external[1] = const_cast<char*>(data);
}
union {
struct {
uint32_t _length;
char _embedded[WholeSize - 4];
};
char* _external[2];
};
};
// ============================================================================
// [asmjit::StringBuilder]
// ============================================================================
//! String builder.
//!
//! String builder was designed to be able to build a string using append like
//! operation to append numbers, other strings, or signle characters. It can
//! allocate it's own buffer or use a buffer created on the stack.
//!
//! String builder contains method specific to AsmJit functionality, used for
//! logging or HTML output.
class StringBuilder {
public:
ASMJIT_NONCOPYABLE(StringBuilder)
//! \internal
//!
//! String operation.
ASMJIT_ENUM(OpType) {
kStringOpSet = 0, //!< Replace the current string by a given content.
kStringOpAppend = 1 //!< Append a given content to the current string.
};
//! \internal
//!
//! String format flags.
ASMJIT_ENUM(StringFormatFlags) {
kStringFormatShowSign = 0x00000001,
kStringFormatShowSpace = 0x00000002,
kStringFormatAlternate = 0x00000004,
kStringFormatSigned = 0x80000000
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API StringBuilder() noexcept;
ASMJIT_API ~StringBuilder() noexcept;
ASMJIT_INLINE StringBuilder(const _NoInit&) noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get string builder capacity.
ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; }
//! Get length.
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
//! Get null-terminated string data.
ASMJIT_INLINE char* getData() noexcept { return _data; }
//! Get null-terminated string data (const).
ASMJIT_INLINE const char* getData() const noexcept { return _data; }
// --------------------------------------------------------------------------
// [Prepare / Reserve]
// --------------------------------------------------------------------------
//! Prepare to set/append.
ASMJIT_API char* prepare(uint32_t op, size_t len) noexcept;
//! Reserve `to` bytes in string builder.
ASMJIT_API Error reserve(size_t to) noexcept;
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
//! Clear the content in String builder.
ASMJIT_API void clear() noexcept;
// --------------------------------------------------------------------------
// [Op]
// --------------------------------------------------------------------------
ASMJIT_API Error _opString(uint32_t op, const char* str, size_t len = Globals::kInvalidIndex) noexcept;
ASMJIT_API Error _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
ASMJIT_API Error _opChar(uint32_t op, char c) noexcept;
ASMJIT_API Error _opChars(uint32_t op, char c, size_t n) noexcept;
ASMJIT_API Error _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
ASMJIT_API Error _opHex(uint32_t op, const void* data, size_t len) noexcept;
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
//! Replace the current string with `str` having `len` characters (or `kInvalidIndex` if it's null terminated).
ASMJIT_INLINE Error setString(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _opString(kStringOpSet, str, len); }
//! Replace the current content by a formatted string `fmt`.
ASMJIT_API Error setFormat(const char* fmt, ...) noexcept;
//! Replace the current content by a formatted string `fmt` (va_list version).
ASMJIT_INLINE Error setFormatVA(const char* fmt, va_list ap) noexcept { return _opVFormat(kStringOpSet, fmt, ap); }
//! Replace the current content by a single `c` character.
ASMJIT_INLINE Error setChar(char c) noexcept { return _opChar(kStringOpSet, c); }
//! Replace the current content by `c` character `n` times.
ASMJIT_INLINE Error setChars(char c, size_t n) noexcept { return _opChars(kStringOpSet, c, n); }
//! Replace the current content by a formatted integer `i` (signed).
ASMJIT_INLINE Error setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned);
}
//! Replace the current content by a formatted integer `i` (unsigned).
ASMJIT_INLINE Error setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags);
}
//! Replace the current content by the given `data` converted to a HEX string.
ASMJIT_INLINE Error setHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpSet, data, len);
}
// --------------------------------------------------------------------------
// [Append]
// --------------------------------------------------------------------------
//! Append string `str` having `len` characters (or `kInvalidIndex` if it's null terminated).
ASMJIT_INLINE Error appendString(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _opString(kStringOpAppend, str, len); }
//! Append a formatted string `fmt`.
ASMJIT_API Error appendFormat(const char* fmt, ...) noexcept;
//! Append a formatted string `fmt` (va_list version).
ASMJIT_INLINE Error appendFormatVA(const char* fmt, va_list ap) noexcept { return _opVFormat(kStringOpAppend, fmt, ap); }
//! Append a single `c` character.
ASMJIT_INLINE Error appendChar(char c) noexcept { return _opChar(kStringOpAppend, c); }
//! Append `c` character `n` times.
ASMJIT_INLINE Error appendChars(char c, size_t n) noexcept { return _opChars(kStringOpAppend, c, n); }
//! Append `i`.
ASMJIT_INLINE Error appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, static_cast<uint64_t>(i), base, width, flags | kStringFormatSigned);
}
//! Append `i`.
ASMJIT_INLINE Error appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, i, base, width, flags);
}
//! Append the given `data` converted to a HEX string.
ASMJIT_INLINE Error appendHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpAppend, data, len);
}
// --------------------------------------------------------------------------
// [Eq]
// --------------------------------------------------------------------------
//! Check for equality with other `str` of length `len`.
ASMJIT_API bool eq(const char* str, size_t len = Globals::kInvalidIndex) const noexcept;
//! Check for equality with `other`.
ASMJIT_INLINE bool eq(const StringBuilder& other) const noexcept { return eq(other._data); }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool operator==(const StringBuilder& other) const noexcept { return eq(other); }
ASMJIT_INLINE bool operator!=(const StringBuilder& other) const noexcept { return !eq(other); }
ASMJIT_INLINE bool operator==(const char* str) const noexcept { return eq(str); }
ASMJIT_INLINE bool operator!=(const char* str) const noexcept { return !eq(str); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
char* _data; //!< String data.
size_t _length; //!< String length.
size_t _capacity; //!< String capacity.
size_t _canFree; //!< If the string data can be freed.
};
// ============================================================================
// [asmjit::StringBuilderTmp]
// ============================================================================
//! Temporary string builder, has statically allocated `N` bytes.
template<size_t N>
class StringBuilderTmp : public StringBuilder {
public:
ASMJIT_NONCOPYABLE(StringBuilderTmp<N>)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE StringBuilderTmp() noexcept : StringBuilder(NoInit) {
_data = _embeddedData;
_data[0] = 0;
_length = 0;
_capacity = N;
_canFree = false;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Embedded data.
char _embeddedData[static_cast<size_t>(
N + 1 + sizeof(intptr_t)) & ~static_cast<size_t>(sizeof(intptr_t) - 1)];
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_STRING_H

View File

@@ -10,115 +10,11 @@
// [Dependencies]
#include "../base/utils.h"
#if ASMJIT_OS_POSIX
# include <time.h>
# include <unistd.h>
#endif // ASMJIT_OS_POSIX
#if ASMJIT_OS_MAC
# include <mach/mach_time.h>
#endif // ASMJIT_OS_MAC
#if ASMJIT_OS_WINDOWS
# if defined(_MSC_VER) && _MSC_VER >= 1400
# include <intrin.h>
# else
# define _InterlockedCompareExchange InterlockedCompareExchange
# endif // _MSC_VER
#endif // ASMJIT_OS_WINDOWS
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CpuTicks - Windows]
// ============================================================================
#if ASMJIT_OS_WINDOWS
static volatile uint32_t Utils_hiResTicks;
static volatile double Utils_hiResFreq;
uint32_t Utils::getTickCount() noexcept {
do {
uint32_t hiResOk = Utils_hiResTicks;
if (hiResOk == 1) {
LARGE_INTEGER now;
if (!::QueryPerformanceCounter(&now))
break;
return (int64_t)(double(now.QuadPart) / Utils_hiResFreq);
}
if (hiResOk == 0) {
LARGE_INTEGER qpf;
if (!::QueryPerformanceFrequency(&qpf)) {
_InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 0xFFFFFFFF, 0);
break;
}
LARGE_INTEGER now;
if (!::QueryPerformanceCounter(&now)) {
_InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 0xFFFFFFFF, 0);
break;
}
double freqDouble = double(qpf.QuadPart) / 1000.0;
Utils_hiResFreq = freqDouble;
_InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 1, 0);
return static_cast<uint32_t>(
static_cast<int64_t>(double(now.QuadPart) / freqDouble) & 0xFFFFFFFF);
}
} while (0);
// Bail to a less precise GetTickCount().
return ::GetTickCount();
}
// ============================================================================
// [asmjit::CpuTicks - Mac]
// ============================================================================
#elif ASMJIT_OS_MAC
static mach_timebase_info_data_t CpuTicks_machTime;
uint32_t Utils::getTickCount() noexcept {
// Initialize the first time CpuTicks::now() is called (See Apple's QA1398).
if (CpuTicks_machTime.denom == 0) {
if (mach_timebase_info(&CpuTicks_machTime) != KERN_SUCCESS)
return 0;
}
// mach_absolute_time() returns nanoseconds, we need just milliseconds.
uint64_t t = mach_absolute_time() / 1000000;
t = t * CpuTicks_machTime.numer / CpuTicks_machTime.denom;
return static_cast<uint32_t>(t & 0xFFFFFFFFU);
}
// ============================================================================
// [asmjit::CpuTicks - Posix]
// ============================================================================
#else
uint32_t Utils::getTickCount() noexcept {
#if defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0)
return 0;
uint64_t t = (uint64_t(ts.tv_sec ) * 1000) + (uint64_t(ts.tv_nsec) / 1000000);
return static_cast<uint32_t>(t & 0xFFFFFFFFU);
#else // _POSIX_MONOTONIC_CLOCK
#error "[asmjit] Utils::getTickCount() is not implemented for your target OS."
return 0;
#endif // _POSIX_MONOTONIC_CLOCK
}
#endif // ASMJIT_OS
// ============================================================================
// [asmjit::Utils - Unit]
// ============================================================================
@@ -127,93 +23,84 @@ uint32_t Utils::getTickCount() noexcept {
UNIT(base_utils) {
uint32_t i;
INFO("IntTraits<>.");
EXPECT(IntTraits<signed char>::kIsSigned,"IntTraits<signed char> should report signed.");
EXPECT(IntTraits<short>::kIsSigned, "IntTraits<signed short> should report signed.");
EXPECT(IntTraits<int>::kIsSigned, "IntTraits<int> should report signed.");
EXPECT(IntTraits<long>::kIsSigned, "IntTraits<long> should report signed.");
INFO("IntTraits<>");
EXPECT(IntTraits<signed char>::kIsSigned,"IntTraits<signed char> should report signed");
EXPECT(IntTraits<short>::kIsSigned, "IntTraits<signed short> should report signed");
EXPECT(IntTraits<int>::kIsSigned, "IntTraits<int> should report signed");
EXPECT(IntTraits<long>::kIsSigned, "IntTraits<long> should report signed");
EXPECT(IntTraits<unsigned char>::kIsUnsigned, "IntTraits<unsigned char> should report unsigned.");
EXPECT(IntTraits<unsigned short>::kIsUnsigned, "IntTraits<unsigned short> should report unsigned.");
EXPECT(IntTraits<unsigned int>::kIsUnsigned, "IntTraits<unsigned int> should report unsigned.");
EXPECT(IntTraits<unsigned long>::kIsUnsigned, "IntTraits<unsigned long> should report unsigned.");
EXPECT(IntTraits<unsigned char>::kIsUnsigned, "IntTraits<unsigned char> should report unsigned");
EXPECT(IntTraits<unsigned short>::kIsUnsigned, "IntTraits<unsigned short> should report unsigned");
EXPECT(IntTraits<unsigned int>::kIsUnsigned, "IntTraits<unsigned int> should report unsigned");
EXPECT(IntTraits<unsigned long>::kIsUnsigned, "IntTraits<unsigned long> should report unsigned");
EXPECT(IntTraits<intptr_t>::kIsSigned, "IntTraits<intptr_t> should report signed.");
EXPECT(IntTraits<uintptr_t>::kIsUnsigned, "IntTraits<uintptr_t> should report unsigned.");
EXPECT(IntTraits<intptr_t>::kIsSigned, "IntTraits<intptr_t> should report signed");
EXPECT(IntTraits<uintptr_t>::kIsUnsigned, "IntTraits<uintptr_t> should report unsigned");
EXPECT(IntTraits<intptr_t>::kIsIntPtr, "IntTraits<intptr_t> should report intptr_t type.");
EXPECT(IntTraits<uintptr_t>::kIsIntPtr, "IntTraits<uintptr_t> should report intptr_t type.");
EXPECT(IntTraits<intptr_t>::kIsIntPtr, "IntTraits<intptr_t> should report intptr_t type");
EXPECT(IntTraits<uintptr_t>::kIsIntPtr, "IntTraits<uintptr_t> should report intptr_t type");
INFO("Utils::iMin()/iMax().");
EXPECT(Utils::iMin<int>( 0, -1) == -1, "Utils::iMin<int> should return a minimum value.");
EXPECT(Utils::iMin<int>(-1, -2) == -2, "Utils::iMin<int> should return a minimum value.");
EXPECT(Utils::iMin<int>( 1, 2) == 1, "Utils::iMin<int> should return a minimum value.");
INFO("Utils::inInterval()");
EXPECT(Utils::inInterval<int>(11 , 10, 20) == true , "Utils::inInterval<int> should return true if inside");
EXPECT(Utils::inInterval<int>(101, 10, 20) == false, "Utils::inInterval<int> should return false if outside");
EXPECT(Utils::iMax<int>( 0, -1) == 0, "Utils::iMax<int> should return a maximum value.");
EXPECT(Utils::iMax<int>(-1, -2) == -1, "Utils::iMax<int> should return a maximum value.");
EXPECT(Utils::iMax<int>( 1, 2) == 2, "Utils::iMax<int> should return a maximum value.");
INFO("Utils::isInt8()");
EXPECT(Utils::isInt8(-128) == true , "Utils::isInt8<> should return true if inside");
EXPECT(Utils::isInt8( 127) == true , "Utils::isInt8<> should return true if inside");
EXPECT(Utils::isInt8(-129) == false, "Utils::isInt8<> should return false if outside");
EXPECT(Utils::isInt8( 128) == false, "Utils::isInt8<> should return false if outside");
INFO("Utils::inInterval().");
EXPECT(Utils::inInterval<int>(11 , 10, 20) == true , "Utils::inInterval<int> should return true if inside.");
EXPECT(Utils::inInterval<int>(101, 10, 20) == false, "Utils::inInterval<int> should return false if outside.");
INFO("Utils::isInt16()");
EXPECT(Utils::isInt16(-32768) == true , "Utils::isInt16<> should return true if inside");
EXPECT(Utils::isInt16( 32767) == true , "Utils::isInt16<> should return true if inside");
EXPECT(Utils::isInt16(-32769) == false, "Utils::isInt16<> should return false if outside");
EXPECT(Utils::isInt16( 32768) == false, "Utils::isInt16<> should return false if outside");
INFO("Utils::isInt8().");
EXPECT(Utils::isInt8(-128) == true , "Utils::isInt8<> should return true if inside.");
EXPECT(Utils::isInt8( 127) == true , "Utils::isInt8<> should return true if inside.");
EXPECT(Utils::isInt8(-129) == false, "Utils::isInt8<> should return false if outside.");
EXPECT(Utils::isInt8( 128) == false, "Utils::isInt8<> should return false if outside.");
INFO("Utils::isInt32()");
EXPECT(Utils::isInt32( 2147483647 ) == true, "Utils::isInt32<int> should return true if inside");
EXPECT(Utils::isInt32(-2147483647 - 1) == true, "Utils::isInt32<int> should return true if inside");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(2147483648)) == false, "Utils::isInt32<int> should return false if outside");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == false, "Utils::isInt32<int> should return false if outside");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isInt32<int> should return false if outside");
INFO("Utils::isInt16().");
EXPECT(Utils::isInt16(-32768) == true , "Utils::isInt16<> should return true if inside.");
EXPECT(Utils::isInt16( 32767) == true , "Utils::isInt16<> should return true if inside.");
EXPECT(Utils::isInt16(-32769) == false, "Utils::isInt16<> should return false if outside.");
EXPECT(Utils::isInt16( 32768) == false, "Utils::isInt16<> should return false if outside.");
INFO("Utils::isUInt8()");
EXPECT(Utils::isUInt8(0) == true , "Utils::isUInt8<> should return true if inside");
EXPECT(Utils::isUInt8(255) == true , "Utils::isUInt8<> should return true if inside");
EXPECT(Utils::isUInt8(256) == false, "Utils::isUInt8<> should return false if outside");
EXPECT(Utils::isUInt8(-1) == false, "Utils::isUInt8<> should return false if negative");
INFO("Utils::isInt32().");
EXPECT(Utils::isInt32( 2147483647 ) == true, "Utils::isInt32<int> should return true if inside.");
EXPECT(Utils::isInt32(-2147483647 - 1) == true, "Utils::isInt32<int> should return true if inside.");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(2147483648)) == false, "Utils::isInt32<int> should return false if outside.");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == false, "Utils::isInt32<int> should return false if outside.");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isInt32<int> should return false if outside.");
INFO("Utils::isUInt12()");
EXPECT(Utils::isUInt12(0) == true , "Utils::isUInt12<> should return true if inside");
EXPECT(Utils::isUInt12(4095) == true , "Utils::isUInt12<> should return true if inside");
EXPECT(Utils::isUInt12(4096) == false, "Utils::isUInt12<> should return false if outside");
EXPECT(Utils::isUInt12(-1) == false, "Utils::isUInt12<> should return false if negative");
INFO("Utils::isUInt8().");
EXPECT(Utils::isUInt8(0) == true , "Utils::isUInt8<> should return true if inside.");
EXPECT(Utils::isUInt8(255) == true , "Utils::isUInt8<> should return true if inside.");
EXPECT(Utils::isUInt8(256) == false, "Utils::isUInt8<> should return false if outside.");
EXPECT(Utils::isUInt8(-1) == false, "Utils::isUInt8<> should return false if negative.");
INFO("Utils::isUInt16()");
EXPECT(Utils::isUInt16(0) == true , "Utils::isUInt16<> should return true if inside");
EXPECT(Utils::isUInt16(65535) == true , "Utils::isUInt16<> should return true if inside");
EXPECT(Utils::isUInt16(65536) == false, "Utils::isUInt16<> should return false if outside");
EXPECT(Utils::isUInt16(-1) == false, "Utils::isUInt16<> should return false if negative");
INFO("Utils::isUInt12().");
EXPECT(Utils::isUInt12(0) == true , "Utils::isUInt12<> should return true if inside.");
EXPECT(Utils::isUInt12(4095) == true , "Utils::isUInt12<> should return true if inside.");
EXPECT(Utils::isUInt12(4096) == false, "Utils::isUInt12<> should return false if outside.");
EXPECT(Utils::isUInt12(-1) == false, "Utils::isUInt12<> should return false if negative.");
INFO("Utils::isUInt32()");
EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == true, "Utils::isUInt32<uint64_t> should return true if inside");
EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isUInt32<uint64_t> should return false if outside");
EXPECT(Utils::isUInt32(-1) == false, "Utils::isUInt32<int> should return false if negative");
INFO("Utils::isUInt16().");
EXPECT(Utils::isUInt16(0) == true , "Utils::isUInt16<> should return true if inside.");
EXPECT(Utils::isUInt16(65535) == true , "Utils::isUInt16<> should return true if inside.");
EXPECT(Utils::isUInt16(65536) == false, "Utils::isUInt16<> should return false if outside.");
EXPECT(Utils::isUInt16(-1) == false, "Utils::isUInt16<> should return false if negative.");
INFO("Utils::isUInt32().");
EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == true, "Utils::isUInt32<uint64_t> should return true if inside.");
EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isUInt32<uint64_t> should return false if outside.");
EXPECT(Utils::isUInt32(-1) == false, "Utils::isUInt32<int> should return false if negative.");
INFO("Utils::isPower2().");
INFO("Utils::isPower2()");
for (i = 0; i < 64; i++) {
EXPECT(Utils::isPowerOf2(static_cast<uint64_t>(1) << i) == true,
"Utils::isPower2() didn't report power of 2.");
"Utils::isPower2() didn't report power of 2");
EXPECT(Utils::isPowerOf2((static_cast<uint64_t>(1) << i) ^ 0x001101) == false,
"Utils::isPower2() didn't report not power of 2.");
"Utils::isPower2() didn't report not power of 2");
}
INFO("Utils::mask().");
INFO("Utils::mask()");
for (i = 0; i < 32; i++) {
EXPECT(Utils::mask(i) == (1 << i),
"Utils::mask(%u) should return %X.", i, (1 << i));
"Utils::mask(%u) should return %X", i, (1 << i));
}
INFO("Utils::bits().");
INFO("Utils::bits()");
for (i = 0; i < 32; i++) {
uint32_t expectedBits = 0;
@@ -221,19 +108,19 @@ UNIT(base_utils) {
expectedBits |= static_cast<uint32_t>(1) << b;
EXPECT(Utils::bits(i) == expectedBits,
"Utils::bits(%u) should return %X.", i, expectedBits);
"Utils::bits(%u) should return %X", i, expectedBits);
}
INFO("Utils::hasBit().");
INFO("Utils::hasBit()");
for (i = 0; i < 32; i++) {
EXPECT(Utils::hasBit((1 << i), i) == true,
"Utils::hasBit(%X, %u) should return true.", (1 << i), i);
"Utils::hasBit(%X, %u) should return true", (1 << i), i);
}
INFO("Utils::bitCount().");
INFO("Utils::bitCount()");
for (i = 0; i < 32; i++) {
EXPECT(Utils::bitCount((1 << i)) == 1,
"Utils::bitCount(%X) should return true.", (1 << i));
"Utils::bitCount(%X) should return true", (1 << i));
}
EXPECT(Utils::bitCount(0x000000F0) == 4, "");
EXPECT(Utils::bitCount(0x10101010) == 4, "");
@@ -241,40 +128,40 @@ UNIT(base_utils) {
EXPECT(Utils::bitCount(0xFFFFFFF7) == 31, "");
EXPECT(Utils::bitCount(0x7FFFFFFF) == 31, "");
INFO("Utils::findFirstBit().");
INFO("Utils::findFirstBit()");
for (i = 0; i < 32; i++) {
EXPECT(Utils::findFirstBit((1 << i)) == i,
"Utils::findFirstBit(%X) should return %u.", (1 << i), i);
"Utils::findFirstBit(%X) should return %u", (1 << i), i);
}
INFO("Utils::keepNOnesFromRight().");
INFO("Utils::keepNOnesFromRight()");
EXPECT(Utils::keepNOnesFromRight(0xF, 1) == 0x1, "");
EXPECT(Utils::keepNOnesFromRight(0xF, 2) == 0x3, "");
EXPECT(Utils::keepNOnesFromRight(0xF, 3) == 0x7, "");
EXPECT(Utils::keepNOnesFromRight(0x5, 2) == 0x5, "");
EXPECT(Utils::keepNOnesFromRight(0xD, 2) == 0x5, "");
INFO("Utils::isAligned().");
INFO("Utils::isAligned()");
EXPECT(Utils::isAligned<size_t>(0xFFFF, 4) == false, "");
EXPECT(Utils::isAligned<size_t>(0xFFF4, 4) == true , "");
EXPECT(Utils::isAligned<size_t>(0xFFF8, 8) == true , "");
EXPECT(Utils::isAligned<size_t>(0xFFF0, 16) == true , "");
INFO("Utils::alignTo().");
INFO("Utils::alignTo()");
EXPECT(Utils::alignTo<size_t>(0xFFFF, 4) == 0x10000, "");
EXPECT(Utils::alignTo<size_t>(0xFFF4, 4) == 0x0FFF4, "");
EXPECT(Utils::alignTo<size_t>(0xFFF8, 8) == 0x0FFF8, "");
EXPECT(Utils::alignTo<size_t>(0xFFF0, 16) == 0x0FFF0, "");
EXPECT(Utils::alignTo<size_t>(0xFFF0, 32) == 0x10000, "");
INFO("Utils::alignToPowerOf2().");
INFO("Utils::alignToPowerOf2()");
EXPECT(Utils::alignToPowerOf2<size_t>(0xFFFF) == 0x10000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0xF123) == 0x10000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x0F00) == 0x01000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x0100) == 0x00100, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x1001) == 0x02000, "");
INFO("Utils::alignDiff().");
INFO("Utils::alignDiff()");
EXPECT(Utils::alignDiff<size_t>(0xFFFF, 4) == 1, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF4, 4) == 0, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF8, 8) == 0, "");
@@ -286,4 +173,4 @@ UNIT(base_utils) {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

View File

@@ -16,7 +16,7 @@
#endif // ASMJIT_OS_WINDOWS
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
@@ -30,9 +30,7 @@ namespace asmjit {
//! \internal
//! \{
template<size_t Size, int IsSigned>
struct IntTraitsPrivate {
// Let it fail if not specialized!
};
struct IntTraitsPrivate {}; // Let it fail if not specialized!
template<> struct IntTraitsPrivate<1, 0> { typedef int IntType; typedef int8_t SignedType; typedef uint8_t UnsignedType; };
template<> struct IntTraitsPrivate<1, 1> { typedef int IntType; typedef int8_t SignedType; typedef uint8_t UnsignedType; };
@@ -50,15 +48,13 @@ template<> struct IntTraitsPrivate<8, 1> { typedef int64_t IntType; typedef int6
template<typename T>
struct IntTraits {
enum {
kIsSigned = static_cast<T>(~static_cast<T>(0)) < static_cast<T>(0),
kIsSigned = static_cast<T>(~static_cast<T>(0)) < static_cast<T>(0),
kIsUnsigned = !kIsSigned,
kIs8Bit = sizeof(T) == 1,
kIs16Bit = sizeof(T) == 2,
kIs32Bit = sizeof(T) == 4,
kIs64Bit = sizeof(T) == 8,
kIsIntPtr = sizeof(T) == sizeof(intptr_t)
kIs8Bit = sizeof(T) == 1,
kIs16Bit = sizeof(T) == 2,
kIs32Bit = sizeof(T) == 4,
kIs64Bit = sizeof(T) == 8,
kIsIntPtr = sizeof(T) == sizeof(intptr_t)
};
typedef typename IntTraitsPrivate<sizeof(T), kIsSigned>::IntType IntType;
@@ -67,18 +63,12 @@ struct IntTraits {
//! Get a minimum value of `T`.
static ASMJIT_INLINE T minValue() noexcept {
if (kIsSigned)
return static_cast<T>((~static_cast<UnsignedType>(0) >> 1) + static_cast<UnsignedType>(1));
else
return static_cast<T>(0);
return kIsSigned ? T((~static_cast<UnsignedType>(0) >> 1) + static_cast<UnsignedType>(1)) : T(0);
}
//! Get a maximum value of `T`.
static ASMJIT_INLINE T maxValue() noexcept {
if (kIsSigned)
return static_cast<T>(~static_cast<UnsignedType>(0) >> 1);
else
return ~static_cast<T>(0);
return kIsSigned ? T(~static_cast<UnsignedType>(0) >> 1) : ~T(0);
}
};
@@ -120,17 +110,9 @@ struct Utils {
// [Pack / Unpack]
// --------------------------------------------------------------------------
//! Pack two 8-bit integer and one 16-bit integer into a 32-bit integer as it
//! is an array of `{b0,b1,w2}`.
static ASMJIT_INLINE uint32_t pack32_2x8_1x16(uint32_t b0, uint32_t b1, uint32_t w2) noexcept {
return ASMJIT_ARCH_LE ? b0 + (b1 << 8) + (w2 << 16)
: (b0 << 24) + (b1 << 16) + w2;
}
//! Pack four 8-bit integer into a 32-bit integer as it is an array of `{b0,b1,b2,b3}`.
static ASMJIT_INLINE uint32_t pack32_4x8(uint32_t b0, uint32_t b1, uint32_t b2, uint32_t b3) noexcept {
return ASMJIT_ARCH_LE ? b0 + (b1 << 8) + (b2 << 16) + (b3 << 24)
: (b0 << 24) + (b1 << 16) + (b2 << 8) + b3;
return ASMJIT_PACK32_4x8(b0, b1, b2, b3);
}
//! Pack two 32-bit integer into a 64-bit integer as it is an array of `{u0,u1}`.
@@ -151,19 +133,41 @@ struct Utils {
}
// --------------------------------------------------------------------------
// [Min/Max]
// [Lower/Upper]
// --------------------------------------------------------------------------
// Some environments declare `min()` and `max()` as preprocessor macros so it
// was decided to use different names to prevent such collision.
//! Get minimum value of `a` and `b`.
template<typename T>
static ASMJIT_INLINE T iMin(const T& a, const T& b) noexcept { return a < b ? a : b; }
//! Get maximum value of `a` and `b`.
static ASMJIT_INLINE T toLower(T c) noexcept { return c ^ (static_cast<T>(c >= T('A') && c <= T('Z')) << 5); }
template<typename T>
static ASMJIT_INLINE T iMax(const T& a, const T& b) noexcept { return a > b ? a : b; }
static ASMJIT_INLINE T toUpper(T c) noexcept { return c ^ (static_cast<T>(c >= T('a') && c <= T('z')) << 5); }
// --------------------------------------------------------------------------
// [Hash]
// --------------------------------------------------------------------------
// \internal
static ASMJIT_INLINE uint32_t hashRound(uint32_t hash, uint32_t c) noexcept { return hash * 65599 + c; }
// Get a hash of the given string `str` of `len` length. Length must be valid
// as this function doesn't check for a null terminator and allows it in the
// middle of the string.
static ASMJIT_INLINE uint32_t hashString(const char* str, size_t len) noexcept {
uint32_t hVal = 0;
for (uint32_t i = 0; i < len; i++)
hVal = hashRound(hVal, str[i]);
return hVal;
}
// --------------------------------------------------------------------------
// [Swap]
// --------------------------------------------------------------------------
template<typename T>
static ASMJIT_INLINE void swap(T& a, T& b) noexcept {
T tmp = a;
a = b;
b = tmp;
}
// --------------------------------------------------------------------------
// [InInterval]
@@ -179,9 +183,9 @@ struct Utils {
// [AsInt]
// --------------------------------------------------------------------------
//! Map an integer `x` of type `T` to an `int` or `int64_t`, depending on the
//! type. Used internally by AsmJit to dispatch an argument that can be an
//! arbitrary integer type into a function that accepts either `int` or
//! Map an integer `x` of type `T` to `int` or `int64_t` depending on the
//! type. Used internally by AsmJit to dispatch arguments that can be of
//! arbitrary integer type into a function argument that is either `int` or
//! `int64_t`.
template<typename T>
static ASMJIT_INLINE typename IntTraits<T>::IntType asInt(T x) noexcept {
@@ -467,15 +471,22 @@ struct Utils {
// [Alignment]
// --------------------------------------------------------------------------
template<typename T>
static ASMJIT_INLINE bool isAligned(T base, T alignment) noexcept {
return (base % alignment) == 0;
template<typename X, typename Y>
static ASMJIT_INLINE bool isAligned(X base, Y alignment) noexcept {
typedef typename IntTraitsPrivate<sizeof(X), 0>::UnsignedType U;
return ((U)base % (U)alignment) == 0;
}
//! Align `base` to `alignment`.
template<typename T>
static ASMJIT_INLINE T alignTo(T base, T alignment) noexcept {
return (base + (alignment - 1)) & ~(alignment - 1);
template<typename X, typename Y>
static ASMJIT_INLINE X alignTo(X x, Y alignment) noexcept {
typedef typename IntTraitsPrivate<sizeof(X), 0>::UnsignedType U;
return (X)( ((U)x + (U)(alignment - 1)) & ~(static_cast<U>(alignment) - 1) );
}
//! Get delta required to align `base` to `alignment`.
template<typename X, typename Y>
static ASMJIT_INLINE X alignDiff(X base, Y alignment) noexcept {
return alignTo(base, alignment) - base;
}
template<typename T>
@@ -505,12 +516,6 @@ struct Utils {
return base + 1;
}
//! Get delta required to align `base` to `alignment`.
template<typename T>
static ASMJIT_INLINE T alignDiff(T base, T alignment) noexcept {
return alignTo(base, alignment) - base;
}
// --------------------------------------------------------------------------
// [String]
// --------------------------------------------------------------------------
@@ -523,6 +528,33 @@ struct Utils {
return i;
}
static ASMJIT_INLINE const char* findPackedString(const char* p, uint32_t id) noexcept {
uint32_t i = 0;
while (i < id) {
while (p[0])
p++;
p++;
i++;
}
return p;
}
//! \internal
//!
//! Compare two instruction names.
//!
//! `a` is a null terminated instruction name from `???InstDB::nameData[]` table.
//! `b` is a non-null terminated instruction name passed to `???Inst::getIdByName()`.
static ASMJIT_INLINE int cmpInstName(const char* a, const char* b, size_t len) noexcept {
for (size_t i = 0; i < len; i++) {
int c = static_cast<int>(static_cast<uint8_t>(a[i])) -
static_cast<int>(static_cast<uint8_t>(b[i])) ;
if (c != 0) return c;
}
return static_cast<int>(a[len]);
}
// --------------------------------------------------------------------------
// [BSwap]
// --------------------------------------------------------------------------
@@ -968,13 +1000,6 @@ struct Utils {
static ASMJIT_INLINE void writeI64a(void* p, int64_t x) noexcept { writeI64x<8>(p, x); }
static ASMJIT_INLINE void writeI64u(void* p, int64_t x) noexcept { writeI64x<0>(p, x); }
// --------------------------------------------------------------------------
// [GetTickCount]
// --------------------------------------------------------------------------
//! Get the current CPU tick count, used for benchmarking (1ms resolution).
static ASMJIT_API uint32_t getTickCount() noexcept;
};
// ============================================================================
@@ -1237,104 +1262,30 @@ union UInt64 {
// [Members]
// --------------------------------------------------------------------------
//! 64-bit unsigned value.
uint64_t u64;
int8_t i8[8]; //!< 8-bit signed integer (8x).
uint8_t u8[8]; //!< 8-bit unsigned integer (8x).
uint32_t u32[2];
uint16_t u16[4];
uint8_t u8[8];
int16_t i16[4]; //!< 16-bit signed integer (4x).
uint16_t u16[4]; //!< 16-bit unsigned integer (4x).
int32_t i32[2]; //!< 32-bit signed integer (2x).
uint32_t u32[2]; //!< 32-bit unsigned integer (2x).
int64_t i64; //!< 64-bit signed integer.
uint64_t u64; //!< 64-bit unsigned integer.
float f32[2]; //!< 32-bit floating point (2x).
double f64; //!< 64-bit floating point.
struct {
#if ASMJIT_ARCH_LE
uint32_t lo, hi;
struct { float f32Lo, f32Hi; };
struct { int32_t i32Lo, i32Hi; };
struct { uint32_t u32Lo, u32Hi; };
#else
uint32_t hi, lo;
struct { float f32Hi, f32Lo; };
struct { int32_t i32Hi, i32Lo; };
struct { uint32_t u32Hi, u32Lo; };
#endif // ASMJIT_ARCH_LE
};
};
// ============================================================================
// [asmjit::Lock]
// ============================================================================
//! \internal
//!
//! Lock.
struct Lock {
ASMJIT_NO_COPY(Lock)
// --------------------------------------------------------------------------
// [Windows]
// --------------------------------------------------------------------------
#if ASMJIT_OS_WINDOWS
typedef CRITICAL_SECTION Handle;
//! Create a new `Lock` instance.
ASMJIT_INLINE Lock() noexcept { InitializeCriticalSection(&_handle); }
//! Destroy the `Lock` instance.
ASMJIT_INLINE ~Lock() noexcept { DeleteCriticalSection(&_handle); }
//! Lock.
ASMJIT_INLINE void lock() noexcept { EnterCriticalSection(&_handle); }
//! Unlock.
ASMJIT_INLINE void unlock() noexcept { LeaveCriticalSection(&_handle); }
#endif // ASMJIT_OS_WINDOWS
// --------------------------------------------------------------------------
// [Posix]
// --------------------------------------------------------------------------
#if ASMJIT_OS_POSIX
typedef pthread_mutex_t Handle;
//! Create a new `Lock` instance.
ASMJIT_INLINE Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
//! Destroy the `Lock` instance.
ASMJIT_INLINE ~Lock() noexcept { pthread_mutex_destroy(&_handle); }
//! Lock.
ASMJIT_INLINE void lock() noexcept { pthread_mutex_lock(&_handle); }
//! Unlock.
ASMJIT_INLINE void unlock() noexcept { pthread_mutex_unlock(&_handle); }
#endif // ASMJIT_OS_POSIX
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Native handle.
Handle _handle;
};
// ============================================================================
// [asmjit::AutoLock]
// ============================================================================
//! \internal
//!
//! Scoped lock.
struct AutoLock {
ASMJIT_NO_COPY(AutoLock)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE AutoLock(Lock& target) noexcept : _target(target) {
_target.lock();
}
ASMJIT_INLINE ~AutoLock() noexcept {
_target.unlock();
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Reference to the `Lock`.
Lock& _target;
};
//! \}
@@ -1342,7 +1293,7 @@ struct AutoLock {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_UTILS_H

View File

@@ -8,33 +8,26 @@
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/globals.h"
#include "../base/osutils.h"
#include "../base/utils.h"
#include "../base/vmem.h"
#if ASMJIT_OS_POSIX
# include <sys/types.h>
# include <sys/mman.h>
# include <unistd.h>
#endif // ASMJIT_OS_POSIX
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
// This file contains implementation of virtual memory management for AsmJit
// library. The initial concept is to keep this implementation simple but
// efficient. There are several goals I decided to write implementation myself.
//
// Goals:
// library. There are several goals I decided to write implementation myself:
//
// - Granularity of allocated blocks is different than granularity for a typical
// C malloc. It is at least 64-bytes so Assembler/Compiler can guarantee the
// alignment required. Alignment requirements can grow in the future, but at
// the moment 64 bytes is safe (we may jump to 128 bytes if necessary or make
// it configurable).
// C malloc. It is at least 64-bytes so CodeEmitter can guarantee the alignment
// up to 64 bytes, which is the size of a cache-line and it's also required by
// AVX-512 aligned loads and stores. Alignment requirements can grow in the future,
// but at the moment 64 bytes is safe (we may jump to 128 bytes if necessary or
// make it configurable).
//
// - Keep memory manager information outside of the allocated virtual memory
// pages, because these pages allow executing of machine code and there should
// not be data required to keep track of these blocks. Another reason is that
// pages, because these pages allow machine code execution and there should
// be not data required to keep track of these blocks. Another reason is that
// some environments (i.e. iOS) allow to generate and run JIT code, but this
// code has to be set to [Executable, but not Writable].
//
@@ -44,7 +37,7 @@
// information related to allocated and unused blocks of memory. The size of
// a block is described by `MemNode::density`. Count of blocks is stored in
// `MemNode::blocks`. For example if density is 64 and count of blocks is 20,
// memory node contains 64*20 bytes of memory and smallest possible allocation
// memory node contains 64*20 bytes of memory and the smallest possible allocation
// (and also alignment) is 64 bytes. So density is also related to memory
// alignment. Binary trees (RB) are used to enable fast lookup into all addresses
// allocated by memory manager instance. This is used mainly by `VMemPrivate::release()`.
@@ -61,162 +54,6 @@
namespace asmjit {
// ============================================================================
// [asmjit::VMemUtil - Windows]
// ============================================================================
// Windows specific implementation using `VirtualAllocEx` and `VirtualFree`.
#if ASMJIT_OS_WINDOWS
struct VMemLocal {
// AsmJit allows to pass a `nullptr` handle to `VMemUtil`. This function is
// just a convenient way to convert such handle to the current process one.
ASMJIT_INLINE HANDLE getSafeProcessHandle(HANDLE hParam) const noexcept {
return hParam != nullptr ? hParam : hProcess;
}
size_t pageSize;
size_t pageGranularity;
HANDLE hProcess;
};
static VMemLocal vMemLocal;
static const VMemLocal& vMemGet() noexcept {
VMemLocal& vMem = vMemLocal;
if (!vMem.hProcess) {
SYSTEM_INFO info;
::GetSystemInfo(&info);
vMem.pageSize = Utils::alignToPowerOf2<uint32_t>(info.dwPageSize);
vMem.pageGranularity = info.dwAllocationGranularity;
vMem.hProcess = ::GetCurrentProcess();
}
return vMem;
};
size_t VMemUtil::getPageSize() noexcept {
const VMemLocal& vMem = vMemGet();
return vMem.pageSize;
}
size_t VMemUtil::getPageGranularity() noexcept {
const VMemLocal& vMem = vMemGet();
return vMem.pageGranularity;
}
void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) noexcept {
return allocProcessMemory(static_cast<HANDLE>(0), length, allocated, flags);
}
void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) noexcept {
if (length == 0)
return nullptr;
const VMemLocal& vMem = vMemGet();
hProcess = vMem.getSafeProcessHandle(hProcess);
// VirtualAlloc rounds allocated size to a page size automatically.
size_t mSize = Utils::alignTo(length, vMem.pageSize);
// Windows XP SP2 / Vista allow Data Excution Prevention (DEP).
DWORD protectFlags = 0;
if (flags & kVMemFlagExecutable)
protectFlags |= (flags & kVMemFlagWritable) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
else
protectFlags |= (flags & kVMemFlagWritable) ? PAGE_READWRITE : PAGE_READONLY;
LPVOID mBase = ::VirtualAllocEx(hProcess, nullptr, mSize, MEM_COMMIT | MEM_RESERVE, protectFlags);
if (mBase == nullptr)
return nullptr;
ASMJIT_ASSERT(Utils::isAligned<size_t>(
reinterpret_cast<size_t>(mBase), vMem.pageSize));
if (allocated != nullptr)
*allocated = mSize;
return mBase;
}
Error VMemUtil::release(void* addr, size_t length) noexcept {
return releaseProcessMemory(static_cast<HANDLE>(0), addr, length);
}
Error VMemUtil::releaseProcessMemory(HANDLE hProcess, void* addr, size_t /* length */) noexcept {
hProcess = vMemGet().getSafeProcessHandle(hProcess);
if (!::VirtualFreeEx(hProcess, addr, 0, MEM_RELEASE))
return kErrorInvalidState;
return kErrorOk;
}
#endif // ASMJIT_OS_WINDOWS
// ============================================================================
// [asmjit::VMemUtil - Posix]
// ============================================================================
// Posix specific implementation using `mmap` and `munmap`.
#if ASMJIT_OS_POSIX
// MacOS uses MAP_ANON instead of MAP_ANONYMOUS.
#if !defined(MAP_ANONYMOUS)
# define MAP_ANONYMOUS MAP_ANON
#endif // MAP_ANONYMOUS
struct VMemLocal {
size_t pageSize;
size_t pageGranularity;
};
static VMemLocal vMemLocal;
static const VMemLocal& vMemGet() noexcept {
VMemLocal& vMem = vMemLocal;
if (!vMem.pageSize) {
size_t pageSize = ::getpagesize();
vMem.pageSize = pageSize;
vMem.pageGranularity = Utils::iMax<size_t>(pageSize, 65536);
}
return vMem;
};
size_t VMemUtil::getPageSize() noexcept {
const VMemLocal& vMem = vMemGet();
return vMem.pageSize;
}
size_t VMemUtil::getPageGranularity() noexcept {
const VMemLocal& vMem = vMemGet();
return vMem.pageGranularity;
}
void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) noexcept {
const VMemLocal& vMem = vMemGet();
size_t msize = Utils::alignTo<size_t>(length, vMem.pageSize);
int protection = PROT_READ;
if (flags & kVMemFlagWritable ) protection |= PROT_WRITE;
if (flags & kVMemFlagExecutable) protection |= PROT_EXEC;
void* mbase = ::mmap(nullptr, msize, protection, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED)
return nullptr;
if (allocated != nullptr)
*allocated = msize;
return mbase;
}
Error VMemUtil::release(void* addr, size_t length) noexcept {
if (::munmap(addr, length) != 0)
return kErrorInvalidState;
return kErrorOk;
}
#endif // ASMJIT_OS_POSIX
// ============================================================================
// [asmjit::VMemMgr - BitOps]
// ============================================================================
@@ -225,9 +62,7 @@ Error VMemUtil::release(void* addr, size_t length) noexcept {
#define M_MOD(x, y) ((x) % (y))
//! \internal
enum {
kBitsPerEntity = (sizeof(size_t) * 8)
};
enum { kBitsPerEntity = (sizeof(size_t) * 8) };
//! \internal
//!
@@ -278,33 +113,29 @@ struct VMemMgr::RbNode {
// Implementation is based on article by Julienne Walker (Public Domain),
// including C code and original comments. Thanks for the excellent article.
// Left[0] and right[1] nodes.
RbNode* node[2];
// Virtual memory address.
uint8_t* mem;
// Whether the node is RED.
uint32_t red;
RbNode* node[2]; //!< Left[0] and right[1] nodes.
uint8_t* mem; //!< Virtual memory address.
uint32_t red; //!< Node color (red vs. black).
};
//! \internal
//!
//! Get whether the node is red (nullptr or node with red flag).
//! Get if the node is red (nullptr or node with red flag).
static ASMJIT_INLINE bool rbIsRed(RbNode* node) noexcept {
return node != nullptr && node->red;
return node && node->red;
}
//! \internal
//!
//! Check whether the RB tree is valid.
static int rbAssert(RbNode* root) noexcept {
if (root == nullptr)
return 1;
if (!root) return 1;
RbNode* ln = root->node[0];
RbNode* rn = root->node[1];
// Red violation.
ASMJIT_ASSERT( !(rbIsRed(root) && (rbIsRed(ln) || rbIsRed(rn))) );
ASMJIT_ASSERT(!(rbIsRed(root) && (rbIsRed(ln) || rbIsRed(rn))));
int lh = rbAssert(ln);
int rh = rbAssert(rn);
@@ -314,7 +145,7 @@ static int rbAssert(RbNode* root) noexcept {
ASMJIT_ASSERT(rn == nullptr || rn->mem > root->mem);
// Black violation.
ASMJIT_ASSERT( !(lh != 0 && rh != 0 && lh != rh) );
ASMJIT_ASSERT(!(lh != 0 && rh != 0 && lh != rh));
// Only count black links.
if (lh != 0 && rh != 0)
@@ -351,16 +182,7 @@ static ASMJIT_INLINE RbNode* rbRotateDouble(RbNode* root, int dir) noexcept {
// ============================================================================
struct VMemMgr::MemNode : public RbNode {
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
// Get available space.
ASMJIT_INLINE size_t getAvailable() const noexcept {
return size - used;
}
ASMJIT_INLINE void fillData(MemNode* other) noexcept {
ASMJIT_INLINE void init(MemNode* other) noexcept {
mem = other->mem;
size = other->size;
@@ -373,9 +195,8 @@ struct VMemMgr::MemNode : public RbNode {
baCont = other->baCont;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
// Get available space.
ASMJIT_INLINE size_t getAvailable() const noexcept { return size - used; }
MemNode* prev; // Prev node in list.
MemNode* next; // Next node in list.
@@ -398,18 +219,8 @@ struct VMemMgr::MemNode : public RbNode {
//!
//! Permanent node.
struct VMemMgr::PermanentNode {
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
//! Get available space.
ASMJIT_INLINE size_t getAvailable() const noexcept {
return size - used;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
ASMJIT_INLINE size_t getAvailable() const noexcept { return size - used; }
PermanentNode* prev; // Pointer to prev chunk or nullptr.
uint8_t* mem; // Base pointer (virtual memory address).
@@ -425,11 +236,11 @@ struct VMemMgr::PermanentNode {
//!
//! Helper to avoid `#ifdef`s in the code.
ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSize) noexcept {
uint32_t flags = kVMemFlagWritable | kVMemFlagExecutable;
uint32_t flags = OSUtils::kVMWritable | OSUtils::kVMExecutable;
#if !ASMJIT_OS_WINDOWS
return static_cast<uint8_t*>(VMemUtil::alloc(size, vSize, flags));
return static_cast<uint8_t*>(OSUtils::allocVirtualMemory(size, vSize, flags));
#else
return static_cast<uint8_t*>(VMemUtil::allocProcessMemory(self->_hProcess, size, vSize, flags));
return static_cast<uint8_t*>(OSUtils::allocProcessMemory(self->_hProcess, size, vSize, flags));
#endif
}
@@ -438,9 +249,9 @@ ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSiz
//! Helper to avoid `#ifdef`s in the code.
ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) noexcept {
#if !ASMJIT_OS_WINDOWS
return VMemUtil::release(p, vSize);
return OSUtils::releaseVirtualMemory(p, vSize);
#else
return VMemUtil::releaseProcessMemory(self->_hProcess, p, vSize);
return OSUtils::releaseProcessMemory(self->_hProcess, p, vSize);
#endif
}
@@ -459,22 +270,19 @@ static bool vMemMgrCheckTree(VMemMgr* self) noexcept {
static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) noexcept {
size_t vSize;
uint8_t* vmem = vMemMgrAllocVMem(self, size, &vSize);
// Out of memory.
if (vmem == nullptr)
return nullptr;
if (!vmem) return nullptr;
size_t blocks = (vSize / density);
size_t bsize = (((blocks + 7) >> 3) + sizeof(size_t) - 1) & ~(size_t)(sizeof(size_t) - 1);
MemNode* node = static_cast<MemNode*>(ASMJIT_ALLOC(sizeof(MemNode)));
uint8_t* data = static_cast<uint8_t*>(ASMJIT_ALLOC(bsize * 2));
MemNode* node = static_cast<MemNode*>(Internal::allocMemory(sizeof(MemNode)));
uint8_t* data = static_cast<uint8_t*>(Internal::allocMemory(bsize * 2));
// Out of memory.
if (node == nullptr || data == nullptr) {
if (!node || !data) {
vMemMgrReleaseVMem(self, vmem, vSize);
if (node) ASMJIT_FREE(node);
if (data) ASMJIT_FREE(data);
if (node) Internal::releaseMemory(node);
if (data) Internal::releaseMemory(data);
return nullptr;
}
@@ -502,7 +310,7 @@ static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) no
}
static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) noexcept {
if (self->_root == nullptr) {
if (!self->_root) {
// Empty tree case.
self->_root = node;
}
@@ -523,7 +331,7 @@ static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) noexcept {
// Search down the tree.
for (;;) {
if (q == nullptr) {
if (!q) {
// Insert new node at the bottom.
q = node;
p->node[dir] = node;
@@ -549,8 +357,7 @@ static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) noexcept {
dir = q->mem < node->mem;
// Update helpers.
if (g != nullptr)
t = g;
if (g) t = g;
g = p;
p = q;
@@ -567,7 +374,7 @@ static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) noexcept {
// Link with others.
node->prev = self->_last;
if (self->_first == nullptr) {
if (!self->_first) {
self->_first = node;
self->_last = node;
self->_optimal = node;
@@ -602,7 +409,7 @@ static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) noexcept {
q->node[1] = self->_root;
// Search and push a red down.
while (q->node[dir] != nullptr) {
while (q->node[dir]) {
int last = dir;
// Update helpers.
@@ -623,7 +430,7 @@ static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) noexcept {
else if (!rbIsRed(q->node[!dir])) {
RbNode* s = p->node[!last];
if (s != nullptr) {
if (s) {
if (!rbIsRed(s->node[!last]) && !rbIsRed(s->node[last])) {
// Color flip.
p->red = 0;
@@ -655,15 +462,14 @@ static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) noexcept {
if (f != q) {
ASMJIT_ASSERT(f != &head);
static_cast<MemNode*>(f)->fillData(static_cast<MemNode*>(q));
static_cast<MemNode*>(f)->init(static_cast<MemNode*>(q));
}
p->node[p->node[1] == q] = q->node[q->node[0] == nullptr];
// Update root and make it black.
self->_root = static_cast<MemNode*>(head.node[1]);
if (self->_root != nullptr)
self->_root->red = 0;
if (self->_root) self->_root->red = 0;
// Unlink.
MemNode* next = static_cast<MemNode*>(q)->next;
@@ -687,7 +493,7 @@ static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) noexcept {
static MemNode* vMemMgrFindNodeByPtr(VMemMgr* self, uint8_t* mem) noexcept {
MemNode* node = self->_root;
while (node != nullptr) {
while (node) {
uint8_t* nodeMem = node->mem;
// Go left.
@@ -723,23 +529,16 @@ static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) noexcept {
node = node->prev;
// Or allocate new node.
if (node == nullptr) {
if (!node) {
size_t nodeSize = permanentNodeSize;
if (nodeSize < vSize) nodeSize = vSize;
if (nodeSize < vSize)
nodeSize = vSize;
node = static_cast<PermanentNode*>(ASMJIT_ALLOC(sizeof(PermanentNode)));
// Out of memory.
if (node == nullptr)
return nullptr;
node = static_cast<PermanentNode*>(Internal::allocMemory(sizeof(PermanentNode)));
if (!node) return nullptr;
node->mem = vMemMgrAllocVMem(self, nodeSize, &node->size);
// Out of memory.
if (node->mem == nullptr) {
ASMJIT_FREE(node);
if (!node->mem) {
Internal::releaseMemory(node);
return nullptr;
}
@@ -848,12 +647,10 @@ static void* vMemMgrAllocFreeable(VMemMgr* self, size_t vSize) noexcept {
// allocate a new one.
{
size_t blockSize = self->_blockSize;
if (blockSize < vSize)
blockSize = vSize;
if (blockSize < vSize) blockSize = vSize;
node = vMemMgrCreateNode(self, blockSize, self->_blockDensity);
if (node == nullptr)
return nullptr;
if (!node) return nullptr;
// Update binary tree.
vMemMgrInsertNode(self, node);
@@ -894,14 +691,14 @@ L_Found:
static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) noexcept {
MemNode* node = self->_first;
while (node != nullptr) {
while (node) {
MemNode* next = node->next;
if (!keepVirtualMemory)
vMemMgrReleaseVMem(self, node->mem, node->size);
ASMJIT_FREE(node->baUsed);
ASMJIT_FREE(node);
Internal::releaseMemory(node->baUsed);
Internal::releaseMemory(node);
node = next;
}
@@ -920,13 +717,18 @@ static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) noexcept {
// ============================================================================
#if !ASMJIT_OS_WINDOWS
VMemMgr::VMemMgr() noexcept
VMemMgr::VMemMgr() noexcept {
#else
VMemMgr::VMemMgr(HANDLE hProcess) noexcept
: _hProcess(vMemGet().getSafeProcessHandle(hProcess))
VMemMgr::VMemMgr(HANDLE hProcess) noexcept {
#endif
VMemInfo vm = OSUtils::getVirtualMemoryInfo();
#if ASMJIT_OS_WINDOWS
_hProcess = hProcess ? hProcess : vm.hCurrentProcess;
#endif // ASMJIT_OS_WINDOWS
{
_blockSize = VMemUtil::getPageGranularity();
_blockSize = vm.pageGranularity;
_blockDensity = 64;
_allocatedBytes = 0;
@@ -949,7 +751,7 @@ VMemMgr::~VMemMgr() noexcept {
PermanentNode* node = _permanent;
while (node) {
PermanentNode* prev = node->prev;
ASMJIT_FREE(node);
Internal::releaseMemory(node);
node = prev;
}
}
@@ -967,21 +769,18 @@ void VMemMgr::reset() noexcept {
// ============================================================================
void* VMemMgr::alloc(size_t size, uint32_t type) noexcept {
if (type == kVMemAllocPermanent)
if (type == kAllocPermanent)
return vMemMgrAllocPermanent(this, size);
else
return vMemMgrAllocFreeable(this, size);
}
Error VMemMgr::release(void* p) noexcept {
if (p == nullptr)
return kErrorOk;
if (!p) return kErrorOk;
AutoLock locked(_lock);
MemNode* node = vMemMgrFindNodeByPtr(this, static_cast<uint8_t*>(p));
if (node == nullptr)
return kErrorInvalidArgument;
if (!node) return DebugUtils::errored(kErrorInvalidArgument);
size_t offset = (size_t)((uint8_t*)p - (uint8_t*)node->mem);
size_t bitpos = M_DIV(offset, node->density);
@@ -1043,7 +842,7 @@ Error VMemMgr::release(void* p) noexcept {
// Free memory associated with node (this memory is not accessed
// anymore so it's safe).
vMemMgrReleaseVMem(this, node->mem, node->size);
ASMJIT_FREE(node->baUsed);
Internal::releaseMemory(node->baUsed);
node->baUsed = nullptr;
node->baCont = nullptr;
@@ -1053,7 +852,7 @@ Error VMemMgr::release(void* p) noexcept {
// Remove node. This function can return different node than
// passed into, but data is copied into previous node if needed.
ASMJIT_FREE(vMemMgrRemoveNode(this, node));
Internal::releaseMemory(vMemMgrRemoveNode(this, node));
ASMJIT_ASSERT(vMemMgrCheckTree(this));
}
@@ -1061,17 +860,13 @@ Error VMemMgr::release(void* p) noexcept {
}
Error VMemMgr::shrink(void* p, size_t used) noexcept {
if (p == nullptr)
return kErrorOk;
if (!p) return kErrorOk;
if (used == 0)
return release(p);
AutoLock locked(_lock);
MemNode* node = vMemMgrFindNodeByPtr(this, (uint8_t*)p);
if (node == nullptr)
return kErrorInvalidArgument;
if (!node) return DebugUtils::errored(kErrorInvalidArgument);
size_t offset = (size_t)((uint8_t*)p - (uint8_t*)node->mem);
size_t bitpos = M_DIV(offset, node->density);
@@ -1195,13 +990,13 @@ UNIT(base_vmem) {
int i;
int kCount = 200000;
INFO("Memory alloc/free test - %d allocations.", static_cast<int>(kCount));
INFO("Memory alloc/free test - %d allocations", static_cast<int>(kCount));
void** a = (void**)ASMJIT_ALLOC(sizeof(void*) * kCount);
void** b = (void**)ASMJIT_ALLOC(sizeof(void*) * kCount);
void** a = (void**)Internal::allocMemory(sizeof(void*) * kCount);
void** b = (void**)Internal::allocMemory(sizeof(void*) * kCount);
EXPECT(a != nullptr && b != nullptr,
"Couldn't allocate %u bytes on heap.", kCount * 2);
"Couldn't allocate %u bytes on heap", kCount * 2);
INFO("Allocating virtual memory...");
for (i = 0; i < kCount; i++) {
@@ -1217,21 +1012,21 @@ UNIT(base_vmem) {
INFO("Freeing virtual memory...");
for (i = 0; i < kCount; i++) {
EXPECT(memmgr.release(a[i]) == kErrorOk,
"Failed to free %p.", b[i]);
"Failed to free %p", b[i]);
}
VMemTest_stats(memmgr);
INFO("Verified alloc/free test - %d allocations.", static_cast<int>(kCount));
INFO("Verified alloc/free test - %d allocations", static_cast<int>(kCount));
for (i = 0; i < kCount; i++) {
int r = (rand() % 1000) + 4;
a[i] = memmgr.alloc(r);
EXPECT(a[i] != nullptr,
"Couldn't allocate %d bytes of virtual memory.", r);
"Couldn't allocate %d bytes of virtual memory", r);
b[i] = ASMJIT_ALLOC(r);
b[i] = Internal::allocMemory(r);
EXPECT(b[i] != nullptr,
"Couldn't allocate %d bytes on heap.", r);
"Couldn't allocate %d bytes on heap", r);
VMemTest_fill(a[i], b[i], r);
}
@@ -1244,22 +1039,22 @@ UNIT(base_vmem) {
for (i = 0; i < kCount / 2; i++) {
VMemTest_verify(a[i], b[i]);
EXPECT(memmgr.release(a[i]) == kErrorOk,
"Failed to free %p.", a[i]);
ASMJIT_FREE(b[i]);
"Failed to free %p", a[i]);
Internal::releaseMemory(b[i]);
}
VMemTest_stats(memmgr);
INFO("Alloc again.");
INFO("Alloc again");
for (i = 0; i < kCount / 2; i++) {
int r = (rand() % 1000) + 4;
a[i] = memmgr.alloc(r);
EXPECT(a[i] != nullptr,
"Couldn't allocate %d bytes of virtual memory.", r);
"Couldn't allocate %d bytes of virtual memory", r);
b[i] = ASMJIT_ALLOC(r);
b[i] = Internal::allocMemory(r);
EXPECT(b[i] != nullptr,
"Couldn't allocate %d bytes on heap.");
"Couldn't allocate %d bytes on heap");
VMemTest_fill(a[i], b[i], r);
}
@@ -1269,13 +1064,13 @@ UNIT(base_vmem) {
for (i = 0; i < kCount; i++) {
VMemTest_verify(a[i], b[i]);
EXPECT(memmgr.release(a[i]) == kErrorOk,
"Failed to free %p.", a[i]);
ASMJIT_FREE(b[i]);
"Failed to free %p", a[i]);
Internal::releaseMemory(b[i]);
}
VMemTest_stats(memmgr);
ASMJIT_FREE(a);
ASMJIT_FREE(b);
Internal::releaseMemory(a);
Internal::releaseMemory(b);
}
#endif // ASMJIT_TEST

View File

@@ -9,80 +9,17 @@
#define _ASMJIT_BASE_VMEM_H
// [Dependencies]
#include "../base/utils.h"
#include "../base/globals.h"
#include "../base/osutils.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::VMemAllocType]
// ============================================================================
//! Type of virtual memory allocation, see `VMemMgr::alloc()`.
ASMJIT_ENUM(VMemAllocType) {
//! Normal memory allocation, has to be freed by `VMemMgr::release()`.
kVMemAllocFreeable = 0,
//! Allocate permanent memory, can't be freed.
kVMemAllocPermanent = 1
};
// ============================================================================
// [asmjit::VMemFlags]
// ============================================================================
//! Type of virtual memory allocation, see `VMemMgr::alloc()`.
ASMJIT_ENUM(VMemFlags) {
//! Memory is writable.
kVMemFlagWritable = 0x00000001,
//! Memory is executable.
kVMemFlagExecutable = 0x00000002
};
// ============================================================================
// [asmjit::VMemUtil]
// ============================================================================
//! Virtual memory utilities.
//!
//! Defines functions that provide facility to allocate and free memory that is
//! executable in a platform independent manner. If both the processor and host
//! operating system support data-execution-prevention then the only way how to
//! run machine code is to allocate it to a memory that has marked as executable.
//! VMemUtil is just unified interface to platform dependent APIs.
//!
//! `VirtualAlloc()` function is used on Windows operating system and `mmap()`
//! on POSIX. `VirtualAlloc()` and `mmap()` documentation provide a detailed
//! overview on how to use a platform specific APIs.
struct VMemUtil {
//! Get a size/alignment of a single virtual memory page.
static ASMJIT_API size_t getPageSize() noexcept;
//! Get a recommended granularity for a single `alloc` call.
static ASMJIT_API size_t getPageGranularity() noexcept;
//! Allocate virtual memory.
//!
//! Pages are readable/writeable, but they are not guaranteed to be
//! executable unless 'canExecute' is true. Returns the address of
//! allocated memory, or `nullptr` on failure.
static ASMJIT_API void* alloc(size_t length, size_t* allocated, uint32_t flags) noexcept;
//! Free memory allocated by `alloc()`.
static ASMJIT_API Error release(void* addr, size_t length) noexcept;
#if ASMJIT_OS_WINDOWS
//! Allocate virtual memory of `hProcess` (Windows only).
static ASMJIT_API void* allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) noexcept;
//! Release virtual memory of `hProcess` (Windows only).
static ASMJIT_API Error releaseProcessMemory(HANDLE hProcess, void* addr, size_t length) noexcept;
#endif // ASMJIT_OS_WINDOWS
};
// ============================================================================
// [asmjit::VMemMgr]
// ============================================================================
@@ -90,7 +27,15 @@ struct VMemUtil {
//! Reference implementation of memory manager that uses `VMemUtil` to allocate
//! chunks of virtual memory and bit arrays to manage it.
class VMemMgr {
public:
public:
//! Type of virtual memory allocation, see `VMemMgr::alloc()`.
ASMJIT_ENUM(AllocType) {
//! Normal memory allocation, has to be freed by `VMemMgr::release()`.
kAllocFreeable = 0,
//! Allocate permanent memory, can't be freed.
kAllocPermanent = 1
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
@@ -123,30 +68,19 @@ class VMemMgr {
#if ASMJIT_OS_WINDOWS
//! Get the handle of the process memory manager is bound to.
ASMJIT_INLINE HANDLE getProcessHandle() const noexcept {
return _hProcess;
}
ASMJIT_INLINE HANDLE getProcessHandle() const noexcept { return _hProcess; }
#endif // ASMJIT_OS_WINDOWS
//! Get how many bytes are currently allocated.
ASMJIT_INLINE size_t getAllocatedBytes() const noexcept {
return _allocatedBytes;
}
ASMJIT_INLINE size_t getAllocatedBytes() const noexcept { return _allocatedBytes; }
//! Get how many bytes are currently used.
ASMJIT_INLINE size_t getUsedBytes() const noexcept {
return _usedBytes;
}
ASMJIT_INLINE size_t getUsedBytes() const noexcept { return _usedBytes; }
//! Get whether to keep allocated memory after the `VMemMgr` is destroyed.
//!
//! \sa \ref setKeepVirtualMemory.
ASMJIT_INLINE bool getKeepVirtualMemory() const noexcept {
return _keepVirtualMemory;
}
//! Set whether to keep allocated memory after memory manager is
//! destroyed.
ASMJIT_INLINE bool getKeepVirtualMemory() const noexcept { return _keepVirtualMemory; }
//! Set whether to keep allocated memory after the memory manager is destroyed.
//!
//! This method is usable when patching code of remote process. You need to
//! allocate process memory, store generated assembler into it and patch the
@@ -154,12 +88,10 @@ class VMemMgr {
//! VMemMgr destructor. After destruction all internal
//! structures are freed, only the process virtual memory remains.
//!
//! NOTE: Memory allocated with kVMemAllocPermanent is always kept.
//! NOTE: Memory allocated with kAllocPermanent is always kept.
//!
//! \sa \ref getKeepVirtualMemory.
ASMJIT_INLINE void setKeepVirtualMemory(bool keepVirtualMemory) noexcept {
_keepVirtualMemory = keepVirtualMemory;
}
ASMJIT_INLINE void setKeepVirtualMemory(bool val) noexcept { _keepVirtualMemory = val; }
// --------------------------------------------------------------------------
// [Alloc / Release]
@@ -170,11 +102,9 @@ class VMemMgr {
//! Note that if you are implementing your own virtual memory manager then you
//! can quitly ignore type of allocation. This is mainly for AsmJit to memory
//! manager that allocated memory will be never freed.
ASMJIT_API void* alloc(size_t size, uint32_t type = kVMemAllocFreeable) noexcept;
ASMJIT_API void* alloc(size_t size, uint32_t type = kAllocFreeable) noexcept;
//! Free previously allocated memory at a given `address`.
ASMJIT_API Error release(void* p) noexcept;
//! Free extra memory allocated with `p`.
ASMJIT_API Error shrink(void* p, size_t used) noexcept;
@@ -183,25 +113,16 @@ class VMemMgr {
// --------------------------------------------------------------------------
#if ASMJIT_OS_WINDOWS
//! Process passed to `VirtualAllocEx` and `VirtualFree`.
HANDLE _hProcess;
HANDLE _hProcess; //!< Process passed to `VirtualAllocEx` and `VirtualFree`.
#endif // ASMJIT_OS_WINDOWS
Lock _lock; //!< Lock to enable thread-safe functionality.
//! Lock to enable thread-safe functionality.
Lock _lock;
size_t _blockSize; //!< Default block size.
size_t _blockDensity; //!< Default block density.
bool _keepVirtualMemory; //!< Keep virtual memory after destroyed.
//! Default block size.
size_t _blockSize;
//! Default block density.
size_t _blockDensity;
// Whether to keep virtual memory after destroy.
bool _keepVirtualMemory;
//! How many bytes are currently allocated.
size_t _allocatedBytes;
//! How many bytes are currently used.
size_t _usedBytes;
size_t _allocatedBytes; //!< How many bytes are currently allocated.
size_t _usedBytes; //!< How many bytes are currently used.
//! \internal
//! \{
@@ -227,7 +148,7 @@ class VMemMgr {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_VMEM_H

View File

@@ -10,26 +10,39 @@
// [Dependencies]
#include "../base/utils.h"
#include "../base/zone.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
//! Zero size block used by `Zone` that doesn't have any memory allocated.
static const Zone::Block Zone_zeroBlock = {
nullptr, nullptr, nullptr, nullptr, { 0 }
};
static const Zone::Block Zone_zeroBlock = { nullptr, nullptr, 0, { 0 } };
static ASMJIT_INLINE uint32_t Zone_getAlignmentOffsetFromAlignment(uint32_t x) noexcept {
switch (x) {
default: return 0;
case 0 : return 0;
case 1 : return 0;
case 2 : return 1;
case 4 : return 2;
case 8 : return 3;
case 16: return 4;
case 32: return 5;
case 64: return 6;
}
}
// ============================================================================
// [asmjit::Zone - Construction / Destruction]
// ============================================================================
Zone::Zone(size_t blockSize) noexcept {
_block = const_cast<Zone::Block*>(&Zone_zeroBlock);
_blockSize = blockSize;
}
Zone::Zone(uint32_t blockSize, uint32_t blockAlignment) noexcept
: _ptr(nullptr),
_end(nullptr),
_block(const_cast<Zone::Block*>(&Zone_zeroBlock)),
_blockSize(blockSize),
_blockAlignmentShift(Zone_getAlignmentOffsetFromAlignment(blockAlignment)) {}
Zone::~Zone() noexcept {
reset(true);
@@ -52,24 +65,27 @@ void Zone::reset(bool releaseMemory) noexcept {
Block* next = cur->next;
do {
Block* prev = cur->prev;
ASMJIT_FREE(cur);
Internal::releaseMemory(cur);
cur = prev;
} while (cur != nullptr);
} while (cur);
cur = next;
while (cur != nullptr) {
while (cur) {
next = cur->next;
ASMJIT_FREE(cur);
Internal::releaseMemory(cur);
cur = next;
}
_ptr = nullptr;
_end = nullptr;
_block = const_cast<Zone::Block*>(&Zone_zeroBlock);
}
else {
while (cur->prev != nullptr)
while (cur->prev)
cur = cur->prev;
cur->pos = cur->data;
_ptr = cur->data;
_end = _ptr + cur->size;
_block = cur;
}
}
@@ -80,35 +96,47 @@ void Zone::reset(bool releaseMemory) noexcept {
void* Zone::_alloc(size_t size) noexcept {
Block* curBlock = _block;
size_t blockSize = Utils::iMax<size_t>(_blockSize, size);
uint8_t* p;
size_t blockSize = std::max<size_t>(_blockSize, size);
size_t blockAlignment = getBlockAlignment();
// The `_alloc()` method can only be called if there is not enough space
// in the current block, see `alloc()` implementation for more details.
ASMJIT_ASSERT(curBlock == &Zone_zeroBlock || curBlock->getRemainingSize() < size);
ASMJIT_ASSERT(curBlock == &Zone_zeroBlock || getRemainingSize() < size);
// If the `Zone` has been reset the current block doesn't have to be the
// If the `Zone` has been cleared the current block doesn't have to be the
// last one. Check if there is a block that can be used instead of allocating
// a new one. If there is a `next` block it's completely unused, we don't have
// to check for remaining bytes.
Block* next = curBlock->next;
if (next != nullptr && next->getBlockSize() >= size) {
next->pos = next->data + size;
if (next && next->size >= size) {
p = Utils::alignTo(next->data, blockAlignment);
_block = next;
return static_cast<void*>(next->data);
_ptr = p + size;
_end = next->data + next->size;
return static_cast<void*>(p);
}
// Prevent arithmetic overflow.
if (blockSize > ~static_cast<size_t>(0) - sizeof(Block))
if (ASMJIT_UNLIKELY(blockSize > (~static_cast<size_t>(0) - sizeof(Block) - blockAlignment)))
return nullptr;
Block* newBlock = static_cast<Block*>(ASMJIT_ALLOC(sizeof(Block) - sizeof(void*) + blockSize));
if (newBlock == nullptr)
blockSize += blockAlignment;
Block* newBlock = static_cast<Block*>(Internal::allocMemory(sizeof(Block) + blockSize));
if (ASMJIT_UNLIKELY(!newBlock))
return nullptr;
newBlock->pos = newBlock->data + size;
newBlock->end = newBlock->data + blockSize;
// Align the pointer to `blockAlignment` and adjust the size of this block
// accordingly. It's the same as using `blockAlignment - Utils::alignDiff()`,
// just written differently.
p = Utils::alignTo(newBlock->data, blockAlignment);
newBlock->prev = nullptr;
newBlock->next = nullptr;
newBlock->size = blockSize;
if (curBlock != &Zone_zeroBlock) {
newBlock->prev = curBlock;
@@ -117,62 +145,40 @@ void* Zone::_alloc(size_t size) noexcept {
// Does only happen if there is a next block, but the requested memory
// can't fit into it. In this case a new buffer is allocated and inserted
// between the current block and the next one.
if (next != nullptr) {
if (next) {
newBlock->next = next;
next->prev = newBlock;
}
}
_block = newBlock;
return static_cast<void*>(newBlock->data);
_ptr = p + size;
_end = newBlock->data + blockSize;
return static_cast<void*>(p);
}
void* Zone::allocZeroed(size_t size) noexcept {
void* p = alloc(size);
if (p != nullptr)
::memset(p, 0, size);
return p;
if (ASMJIT_UNLIKELY(!p)) return p;
return ::memset(p, 0, size);
}
void* Zone::dup(const void* data, size_t size) noexcept {
if (data == nullptr)
return nullptr;
void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
if (ASMJIT_UNLIKELY(!data || !size)) return nullptr;
if (size == 0)
return nullptr;
void* m = alloc(size);
if (m == nullptr)
return nullptr;
ASMJIT_ASSERT(size != IntTraits<size_t>::maxValue());
uint8_t* m = allocT<uint8_t>(size + nullTerminate);
if (ASMJIT_UNLIKELY(!m)) return nullptr;
::memcpy(m, data, size);
return m;
}
if (nullTerminate) m[size] = '\0';
char* Zone::sdup(const char* str) noexcept {
if (str == nullptr)
return nullptr;
size_t len = ::strlen(str);
if (len == 0)
return nullptr;
// Include NULL terminator and limit string length.
if (++len > 256)
len = 256;
char* m = static_cast<char*>(alloc(len));
if (m == nullptr)
return nullptr;
::memcpy(m, str, len);
m[len - 1] = '\0';
return m;
return static_cast<void*>(m);
}
char* Zone::sformat(const char* fmt, ...) noexcept {
if (fmt == nullptr)
return nullptr;
if (ASMJIT_UNLIKELY(!fmt)) return nullptr;
char buf[512];
size_t len;
@@ -187,7 +193,770 @@ char* Zone::sformat(const char* fmt, ...) noexcept {
return static_cast<char*>(dup(buf, len));
}
// ============================================================================
// [asmjit::ZoneHeap - Helpers]
// ============================================================================
static bool ZoneHeap_hasDynamicBlock(ZoneHeap* self, ZoneHeap::DynamicBlock* block) noexcept {
ZoneHeap::DynamicBlock* cur = self->_dynamicBlocks;
while (cur) {
if (cur == block)
return true;
cur = cur->next;
}
return false;
}
// ============================================================================
// [asmjit::ZoneHeap - Init / Reset]
// ============================================================================
void ZoneHeap::reset(Zone* zone) noexcept {
// Free dynamic blocks.
DynamicBlock* block = _dynamicBlocks;
while (block) {
DynamicBlock* next = block->next;
Internal::releaseMemory(block);
block = next;
}
// Zero the entire class and initialize to the given `zone`.
::memset(this, 0, sizeof(*this));
_zone = zone;
}
// ============================================================================
// [asmjit::ZoneHeap - Alloc / Release]
// ============================================================================
void* ZoneHeap::_alloc(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
// We use our memory pool only if the requested block is of a reasonable size.
uint32_t slot;
if (_getSlotIndex(size, slot, allocatedSize)) {
// Slot reuse.
uint8_t* p = reinterpret_cast<uint8_t*>(_slots[slot]);
size = allocatedSize;
if (p) {
_slots[slot] = reinterpret_cast<Slot*>(p)->next;
//printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
return p;
}
// So use Zone to allocate a new chunk for us. But before we use it, we
// check if there is enough room for the new chunk in zone, and if not,
// we redistribute the remaining memory in Zone's current block into slots.
Zone* zone = _zone;
p = Utils::alignTo(zone->getCursor(), kBlockAlignment);
size_t remain = (p <= zone->getEnd()) ? (size_t)(zone->getEnd() - p) : size_t(0);
if (ASMJIT_LIKELY(remain >= size)) {
zone->setCursor(p + size);
//printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
return p;
}
else {
// Distribute the remaining memory to suitable slots.
if (remain >= kLoGranularity) {
do {
size_t distSize = std::min<size_t>(remain, kLoMaxSize);
uint32_t distSlot = static_cast<uint32_t>((distSize - kLoGranularity) / kLoGranularity);
ASMJIT_ASSERT(distSlot < kLoCount);
reinterpret_cast<Slot*>(p)->next = _slots[distSlot];
_slots[distSlot] = reinterpret_cast<Slot*>(p);
p += distSize;
remain -= distSize;
} while (remain >= kLoGranularity);
zone->setCursor(p);
}
p = static_cast<uint8_t*>(zone->_alloc(size));
if (ASMJIT_UNLIKELY(!p)) {
allocatedSize = 0;
return nullptr;
}
//printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
return p;
}
}
else {
// Allocate a dynamic block.
size_t overhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
// Handle a possible overflow.
if (ASMJIT_UNLIKELY(overhead >= ~static_cast<size_t>(0) - size))
return nullptr;
void* p = Internal::allocMemory(size + overhead);
if (ASMJIT_UNLIKELY(!p)) {
allocatedSize = 0;
return nullptr;
}
// Link as first in `_dynamicBlocks` double-linked list.
DynamicBlock* block = static_cast<DynamicBlock*>(p);
DynamicBlock* next = _dynamicBlocks;
if (next)
next->prev = block;
block->prev = nullptr;
block->next = next;
_dynamicBlocks = block;
// Align the pointer to the guaranteed alignment and store `DynamicBlock`
// at the end of the memory block, so `_releaseDynamic()` can find it.
p = Utils::alignTo(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment);
reinterpret_cast<DynamicBlock**>(p)[-1] = block;
allocatedSize = size;
//printf("ALLOCATED DYNAMIC %p of size %d\n", p, int(size));
return p;
}
}
void* ZoneHeap::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
void* p = _alloc(size, allocatedSize);
if (ASMJIT_UNLIKELY(!p)) return p;
return ::memset(p, 0, allocatedSize);
}
void ZoneHeap::_releaseDynamic(void* p, size_t size) noexcept {
ASMJIT_ASSERT(isInitialized());
//printf("RELEASING DYNAMIC %p of size %d\n", p, int(size));
// Pointer to `DynamicBlock` is stored at [-1].
DynamicBlock* block = reinterpret_cast<DynamicBlock**>(p)[-1];
ASMJIT_ASSERT(ZoneHeap_hasDynamicBlock(this, block));
// Unlink and free.
DynamicBlock* prev = block->prev;
DynamicBlock* next = block->next;
if (prev)
prev->next = next;
else
_dynamicBlocks = next;
if (next)
next->prev = prev;
Internal::releaseMemory(block);
}
// ============================================================================
// [asmjit::ZoneVectorBase - Helpers]
// ============================================================================
Error ZoneVectorBase::_grow(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
size_t threshold = Globals::kAllocThreshold / sizeOfT;
size_t capacity = _capacity;
size_t after = _length;
if (ASMJIT_UNLIKELY(IntTraits<size_t>::maxValue() - n < after))
return DebugUtils::errored(kErrorNoHeapMemory);
after += n;
if (capacity >= after)
return kErrorOk;
// ZoneVector is used as an array to hold short-lived data structures used
// during code generation. The growing strategy is simple - use small capacity
// at the beginning (very good for ZoneHeap) and then grow quicker to prevent
// successive reallocations.
if (capacity < 4)
capacity = 4;
else if (capacity < 8)
capacity = 8;
else if (capacity < 16)
capacity = 16;
else if (capacity < 64)
capacity = 64;
else if (capacity < 256)
capacity = 256;
while (capacity < after) {
if (capacity < threshold)
capacity *= 2;
else
capacity += threshold;
}
return _reserve(heap, sizeOfT, capacity);
}
Error ZoneVectorBase::_reserve(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
size_t oldCapacity = _capacity;
if (oldCapacity >= n) return kErrorOk;
size_t nBytes = n * sizeOfT;
if (ASMJIT_UNLIKELY(nBytes < n))
return DebugUtils::errored(kErrorNoHeapMemory);
size_t allocatedBytes;
uint8_t* newData = static_cast<uint8_t*>(heap->alloc(nBytes, allocatedBytes));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorNoHeapMemory);
void* oldData = _data;
if (_length)
::memcpy(newData, oldData, _length * sizeOfT);
if (oldData)
heap->release(oldData, oldCapacity * sizeOfT);
_capacity = allocatedBytes / sizeOfT;
ASMJIT_ASSERT(_capacity >= n);
_data = newData;
return kErrorOk;
}
Error ZoneVectorBase::_resize(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
size_t length = _length;
if (_capacity < n) {
ASMJIT_PROPAGATE(_grow(heap, sizeOfT, n - length));
ASMJIT_ASSERT(_capacity >= n);
}
if (length < n)
::memset(static_cast<uint8_t*>(_data) + length * sizeOfT, 0, (n - length) * sizeOfT);
_length = n;
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneBitVector - Ops]
// ============================================================================
Error ZoneBitVector::_resize(ZoneHeap* heap, size_t newLength, size_t idealCapacity, bool newBitsValue) noexcept {
ASMJIT_ASSERT(idealCapacity >= newLength);
if (newLength <= _length) {
// The size after the resize is lesser than or equal to the current length.
size_t idx = newLength / kBitsPerWord;
size_t bit = newLength % kBitsPerWord;
// Just set all bits outside of the new length in the last word to zero.
// There is a case that there are not bits to set if `bit` is zero. This
// happens when `newLength` is a multiply of `kBitsPerWord` like 64, 128,
// and so on. In that case don't change anything as that would mean settings
// bits outside of the `_length`.
if (bit)
_data[idx] &= (static_cast<uintptr_t>(1) << bit) - 1U;
_length = newLength;
return kErrorOk;
}
size_t oldLength = _length;
BitWord* data = _data;
if (newLength > _capacity) {
// Realloc needed... Calculate the minimum capacity (in bytes) requied.
size_t minimumCapacityInBits = Utils::alignTo<size_t>(idealCapacity, kBitsPerWord);
size_t allocatedCapacity;
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newLength))
return DebugUtils::errored(kErrorNoHeapMemory);
// Normalize to bytes.
size_t minimumCapacity = minimumCapacityInBits / 8;
BitWord* newData = static_cast<BitWord*>(heap->alloc(minimumCapacity, allocatedCapacity));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorNoHeapMemory);
// `allocatedCapacity` now contains number in bytes, we need bits.
size_t allocatedCapacityInBits = allocatedCapacity * 8;
// Arithmetic overflow should normally not happen. If it happens we just
// change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
// this value is still safe to be used to call `_heap->release(...)`.
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
allocatedCapacityInBits = minimumCapacityInBits;
if (oldLength)
::memcpy(newData, data, _wordsPerBits(oldLength));
if (data)
heap->release(data, _capacity / 8);
data = newData;
_data = data;
_capacity = allocatedCapacityInBits;
}
// Start (of the old length) and end (of the new length) bits
size_t idx = oldLength / kBitsPerWord;
size_t startBit = oldLength % kBitsPerWord;
size_t endBit = newLength % kBitsPerWord;
// Set new bits to either 0 or 1. The `pattern` is used to set multiple
// bits per bit-word and contains either all zeros or all ones.
BitWord pattern = _patternFromBit(newBitsValue);
// First initialize the last bit-word of the old length.
if (startBit) {
size_t nBits = 0;
if (idx == (newLength / kBitsPerWord)) {
// The number of bit-words is the same after the resize. In that case
// we need to set only bits necessary in the current last bit-word.
ASMJIT_ASSERT(startBit < endBit);
nBits = endBit - startBit;
}
else {
// There is be more bit-words after the resize. In that case we don't
// have to be extra careful about the last bit-word of the old length.
nBits = kBitsPerWord - startBit;
}
data[idx++] |= pattern << nBits;
}
// Initialize all bit-words after the last bit-word of the old length.
size_t endIdx = _wordsPerBits(newLength);
endIdx -= static_cast<size_t>(endIdx * kBitsPerWord == newLength);
while (idx <= endIdx)
data[idx++] = pattern;
// Clear unused bits of the last bit-word.
if (endBit)
data[endIdx] &= (static_cast<BitWord>(1) << endBit) - 1;
_length = newLength;
return kErrorOk;
}
Error ZoneBitVector::_append(ZoneHeap* heap, bool value) noexcept {
size_t kThreshold = Globals::kAllocThreshold * 8;
size_t newLength = _length + 1;
size_t idealCapacity = _capacity;
if (idealCapacity < 128)
idealCapacity = 128;
else if (idealCapacity <= kThreshold)
idealCapacity *= 2;
else
idealCapacity += kThreshold;
if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) {
// It's technically impossible that `_length + 1` overflows.
idealCapacity = newLength;
ASMJIT_ASSERT(idealCapacity > _capacity);
}
return _resize(heap, newLength, idealCapacity, value);
}
Error ZoneBitVector::fill(size_t from, size_t to, bool value) noexcept {
if (ASMJIT_UNLIKELY(from >= to)) {
if (from > to)
return DebugUtils::errored(kErrorInvalidArgument);
else
return kErrorOk;
}
ASMJIT_ASSERT(from <= _length);
ASMJIT_ASSERT(to <= _length);
// This is very similar to `ZoneBitVector::_fill()`, however, since we
// actually set bits that are already part of the container we need to
// special case filiing to zeros and ones.
size_t idx = from / kBitsPerWord;
size_t startBit = from % kBitsPerWord;
size_t endIdx = to / kBitsPerWord;
size_t endBit = to % kBitsPerWord;
BitWord* data = _data;
ASMJIT_ASSERT(data != nullptr);
// Special case for non-zero `startBit`.
if (startBit) {
if (idx == endIdx) {
ASMJIT_ASSERT(startBit < endBit);
size_t nBits = endBit - startBit;
BitWord mask = ((static_cast<BitWord>(1) << nBits) - 1) << startBit;
if (value)
data[idx] |= mask;
else
data[idx] &= ~mask;
return kErrorOk;
}
else {
BitWord mask = (static_cast<BitWord>(0) - 1) << startBit;
if (value)
data[idx++] |= mask;
else
data[idx++] &= ~mask;
}
}
// Fill all bits in case there is a gap between the current `idx` and `endIdx`.
if (idx < endIdx) {
BitWord pattern = _patternFromBit(value);
do {
data[idx++] = pattern;
} while (idx < endIdx);
}
// Special case for non-zero `endBit`.
if (endBit) {
BitWord mask = ((static_cast<BitWord>(1) << endBit) - 1);
if (value)
data[endIdx] |= mask;
else
data[endIdx] &= ~mask;
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneStackBase - Init / Reset]
// ============================================================================
Error ZoneStackBase::_init(ZoneHeap* heap, size_t middleIndex) noexcept {
ZoneHeap* oldHeap = _heap;
if (oldHeap) {
Block* block = _block[kSideLeft];
while (block) {
Block* next = block->getNext();
oldHeap->release(block, kBlockSize);
block = next;
}
_heap = nullptr;
_block[kSideLeft] = nullptr;
_block[kSideRight] = nullptr;
}
if (heap) {
Block* block = static_cast<Block*>(heap->alloc(kBlockSize));
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorNoHeapMemory);
block->_link[kSideLeft] = nullptr;
block->_link[kSideRight] = nullptr;
block->_start = (uint8_t*)block + middleIndex;
block->_end = (uint8_t*)block + middleIndex;
_heap = heap;
_block[kSideLeft] = block;
_block[kSideRight] = block;
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneStackBase - Ops]
// ============================================================================
Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept {
ASMJIT_ASSERT(isInitialized());
Block* prev = _block[side];
ASMJIT_ASSERT(!prev->isEmpty());
Block* block = _heap->allocT<Block>(kBlockSize);
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorNoHeapMemory);
block->_link[ side] = nullptr;
block->_link[!side] = prev;
block->_start = (uint8_t*)block + initialIndex;
block->_end = (uint8_t*)block + initialIndex;
prev->_link[side] = block;
_block[side] = block;
return kErrorOk;
}
void ZoneStackBase::_cleanupBlock(uint32_t side, size_t middleIndex) noexcept {
Block* block = _block[side];
ASMJIT_ASSERT(block->isEmpty());
Block* prev = block->_link[!side];
if (prev) {
ASMJIT_ASSERT(prev->_link[side] == block);
_heap->release(block, kBlockSize);
prev->_link[side] = nullptr;
_block[side] = prev;
}
else if (_block[!side] == prev && prev->isEmpty()) {
// If the container becomes empty center both pointers in the remaining block.
prev->_start = (uint8_t*)prev + middleIndex;
prev->_end = (uint8_t*)prev + middleIndex;
}
}
// ============================================================================
// [asmjit::ZoneHashBase - Utilities]
// ============================================================================
static uint32_t ZoneHash_getClosestPrime(uint32_t x) noexcept {
static const uint32_t primeTable[] = {
23, 53, 193, 389, 769, 1543, 3079, 6151, 12289, 24593
};
size_t i = 0;
uint32_t p;
do {
if ((p = primeTable[i]) > x)
break;
} while (++i < ASMJIT_ARRAY_SIZE(primeTable));
return p;
}
// ============================================================================
// [asmjit::ZoneHashBase - Reset]
// ============================================================================
void ZoneHashBase::reset(ZoneHeap* heap) noexcept {
ZoneHashNode** oldData = _data;
if (oldData != _embedded)
_heap->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
_heap = heap;
_size = 0;
_bucketsCount = 1;
_bucketsGrow = 1;
_data = _embedded;
_embedded[0] = nullptr;
}
// ============================================================================
// [asmjit::ZoneHashBase - Rehash]
// ============================================================================
void ZoneHashBase::_rehash(uint32_t newCount) noexcept {
ASMJIT_ASSERT(isInitialized());
ZoneHashNode** oldData = _data;
ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(
_heap->allocZeroed(static_cast<size_t>(newCount) * sizeof(ZoneHashNode*)));
// We can still store nodes into the table, but it will degrade.
if (ASMJIT_UNLIKELY(newData == nullptr))
return;
uint32_t i;
uint32_t oldCount = _bucketsCount;
for (i = 0; i < oldCount; i++) {
ZoneHashNode* node = oldData[i];
while (node) {
ZoneHashNode* next = node->_hashNext;
uint32_t hMod = node->_hVal % newCount;
node->_hashNext = newData[hMod];
newData[hMod] = node;
node = next;
}
}
// 90% is the maximum occupancy, can't overflow since the maximum capacity
// is limited to the last prime number stored in the prime table.
if (oldData != _embedded)
_heap->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
_bucketsCount = newCount;
_bucketsGrow = newCount * 9 / 10;
_data = newData;
}
// ============================================================================
// [asmjit::ZoneHashBase - Ops]
// ============================================================================
ZoneHashNode* ZoneHashBase::_put(ZoneHashNode* node) noexcept {
uint32_t hMod = node->_hVal % _bucketsCount;
ZoneHashNode* next = _data[hMod];
node->_hashNext = next;
_data[hMod] = node;
if (++_size >= _bucketsGrow && next) {
uint32_t newCapacity = ZoneHash_getClosestPrime(_bucketsCount);
if (newCapacity != _bucketsCount)
_rehash(newCapacity);
}
return node;
}
ZoneHashNode* ZoneHashBase::_del(ZoneHashNode* node) noexcept {
uint32_t hMod = node->_hVal % _bucketsCount;
ZoneHashNode** pPrev = &_data[hMod];
ZoneHashNode* p = *pPrev;
while (p) {
if (p == node) {
*pPrev = p->_hashNext;
return node;
}
pPrev = &p->_hashNext;
p = *pPrev;
}
return nullptr;
}
// ============================================================================
// [asmjit::Zone - Test]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_zonevector) {
Zone zone(8096 - Zone::kZoneOverhead);
ZoneHeap heap(&zone);
int i;
int kMax = 100000;
ZoneVector<int> vec;
INFO("ZoneVector<int> basic tests");
EXPECT(vec.append(&heap, 0) == kErrorOk);
EXPECT(vec.isEmpty() == false);
EXPECT(vec.getLength() == 1);
EXPECT(vec.getCapacity() >= 1);
EXPECT(vec.indexOf(0) == 0);
EXPECT(vec.indexOf(-11) == Globals::kInvalidIndex);
vec.clear();
EXPECT(vec.isEmpty());
EXPECT(vec.getLength() == 0);
EXPECT(vec.indexOf(0) == Globals::kInvalidIndex);
for (i = 0; i < kMax; i++) {
EXPECT(vec.append(&heap, i) == kErrorOk);
}
EXPECT(vec.isEmpty() == false);
EXPECT(vec.getLength() == static_cast<size_t>(kMax));
EXPECT(vec.indexOf(kMax - 1) == static_cast<size_t>(kMax - 1));
}
UNIT(base_ZoneBitVector) {
Zone zone(8096 - Zone::kZoneOverhead);
ZoneHeap heap(&zone);
size_t i, count;
size_t kMaxCount = 100;
ZoneBitVector vec;
EXPECT(vec.isEmpty());
EXPECT(vec.getLength() == 0);
INFO("ZoneBitVector::resize()");
for (count = 1; count < kMaxCount; count++) {
vec.clear();
EXPECT(vec.resize(&heap, count, false) == kErrorOk);
EXPECT(vec.getLength() == count);
for (i = 0; i < count; i++)
EXPECT(vec.getAt(i) == false);
vec.clear();
EXPECT(vec.resize(&heap, count, true) == kErrorOk);
EXPECT(vec.getLength() == count);
for (i = 0; i < count; i++)
EXPECT(vec.getAt(i) == true);
}
INFO("ZoneBitVector::fill()");
for (count = 1; count < kMaxCount; count += 2) {
vec.clear();
EXPECT(vec.resize(&heap, count) == kErrorOk);
EXPECT(vec.getLength() == count);
for (i = 0; i < (count + 1) / 2; i++) {
bool value = static_cast<bool>(i & 1);
EXPECT(vec.fill(i, count - i, value) == kErrorOk);
}
for (i = 0; i < count; i++) {
EXPECT(vec.getAt(i) == static_cast<bool>(i & 1));
}
}
}
UNIT(base_zonestack) {
Zone zone(8096 - Zone::kZoneOverhead);
ZoneHeap heap(&zone);
ZoneStack<int> stack;
INFO("ZoneStack<int> contains %d elements per one Block", ZoneStack<int>::kNumBlockItems);
EXPECT(stack.init(&heap) == kErrorOk);
EXPECT(stack.isEmpty(), "Stack must be empty after `init()`");
EXPECT(stack.append(42) == kErrorOk);
EXPECT(!stack.isEmpty() , "Stack must not be empty after an item has been appended");
EXPECT(stack.pop() == 42, "Stack.pop() must return the item that has been appended last");
EXPECT(stack.isEmpty() , "Stack must be empty after the last element has been removed");
EXPECT(stack.prepend(43) == kErrorOk);
EXPECT(!stack.isEmpty() , "Stack must not be empty after an item has been prepended");
EXPECT(stack.popFirst() == 43, "Stack.popFirst() must return the item that has been prepended last");
EXPECT(stack.isEmpty() , "Stack must be empty after the last element has been removed");
int i;
int iMin =-100;
int iMax = 100000;
INFO("Adding items from %d to %d to the stack", iMin, iMax);
for (i = 1; i <= iMax; i++) stack.append(i);
for (i = 0; i >= iMin; i--) stack.prepend(i);
INFO("Validating popFirst()");
for (i = iMin; i <= iMax; i++) {
int item = stack.popFirst();
EXPECT(i == item, "Item '%d' didn't match the item '%d' popped", i, item);
}
EXPECT(stack.isEmpty());
INFO("Adding items from %d to %d to the stack", iMin, iMax);
for (i = 0; i >= iMin; i--) stack.prepend(i);
for (i = 1; i <= iMax; i++) stack.append(i);
INFO("Validating pop()");
for (i = iMax; i >= iMin; i--) {
int item = stack.pop();
EXPECT(i == item, "Item '%d' didn't match the item '%d' popped", i, item);
}
EXPECT(stack.isEmpty());
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"

File diff suppressed because it is too large Load Diff

View File

@@ -1,53 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_HOST_H
#define _ASMJIT_HOST_H
// [Dependencies]
#include "./base.h"
// [X86 / X64]
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
#include "./x86.h"
namespace asmjit {
// Define `asmjit::host` namespace wrapping `asmjit::x86`.
namespace host { using namespace ::asmjit::x86; }
// Define host assembler.
typedef X86Assembler HostAssembler;
// Define host operands.
typedef X86GpReg GpReg;
typedef X86FpReg FpReg;
typedef X86MmReg MmReg;
typedef X86XmmReg XmmReg;
typedef X86YmmReg YmmReg;
typedef X86SegReg SegReg;
typedef X86Mem Mem;
// Define host compiler and related.
#if !defined(ASMJIT_DISABLE_COMPILER)
typedef X86Compiler HostCompiler;
typedef X86CallNode HostCallNode;
typedef X86FuncDecl HostFuncDecl;
typedef X86FuncNode HostFuncNode;
typedef X86GpVar GpVar;
typedef X86MmVar MmVar;
typedef X86XmmVar XmmVar;
typedef X86YmmVar YmmVar;
#endif // !ASMJIT_DISABLE_COMPILER
} // asmjit namespace
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
// [Guard]
#endif // _ASMJIT_HOST_H

View File

@@ -12,9 +12,11 @@
#include "./base.h"
#include "./x86/x86assembler.h"
#include "./x86/x86builder.h"
#include "./x86/x86compiler.h"
#include "./x86/x86compilerfunc.h"
#include "./x86/x86emitter.h"
#include "./x86/x86inst.h"
#include "./x86/x86misc.h"
#include "./x86/x86operand.h"
// [Guard]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,66 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../x86/x86builder.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86Builder - Construction / Destruction]
// ============================================================================
X86Builder::X86Builder(CodeHolder* code) noexcept : CodeBuilder() {
if (code)
code->attach(this);
}
X86Builder::~X86Builder() noexcept {}
// ============================================================================
// [asmjit::X86Builder - Events]
// ============================================================================
Error X86Builder::onAttach(CodeHolder* code) noexcept {
uint32_t archType = code->getArchType();
if (!ArchInfo::isX86Family(archType))
return DebugUtils::errored(kErrorInvalidArch);
ASMJIT_PROPAGATE(Base::onAttach(code));
if (archType == ArchInfo::kTypeX86)
_nativeGpArray = x86OpData.gpd;
else
_nativeGpArray = x86OpData.gpq;
_nativeGpReg = _nativeGpArray[0];
return kErrorOk;
}
// ============================================================================
// [asmjit::X86Builder - Inst]
// ============================================================================
Error X86Builder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
// TODO:
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 && !ASMJIT_DISABLE_COMPILER

View File

@@ -0,0 +1,86 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86BUILDER_H
#define _ASMJIT_X86_X86BUILDER_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_BUILDER)
// [Dependencies]
#include "../base/codebuilder.h"
#include "../base/simdtypes.h"
#include "../x86/x86emitter.h"
#include "../x86/x86misc.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::CodeBuilder]
// ============================================================================
//! Architecture-dependent \ref CodeBuilder targeting X86 and X64.
class ASMJIT_VIRTAPI X86Builder
: public CodeBuilder,
public X86EmitterImplicitT<X86Builder> {
public:
ASMJIT_NONCOPYABLE(X86Builder)
typedef CodeBuilder Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `X86Builder` instance.
ASMJIT_API X86Builder(CodeHolder* code = nullptr) noexcept;
//! Destroy the `X86Builder` instance.
ASMJIT_API ~X86Builder() noexcept;
// --------------------------------------------------------------------------
// [Compatibility]
// --------------------------------------------------------------------------
//! Explicit cast to `X86Emitter`.
ASMJIT_INLINE X86Emitter* asEmitter() noexcept { return reinterpret_cast<X86Emitter*>(this); }
//! Explicit cast to `X86Emitter` (const).
ASMJIT_INLINE const X86Emitter* asEmitter() const noexcept { return reinterpret_cast<const X86Emitter*>(this); }
//! Implicit cast to `X86Emitter`.
ASMJIT_INLINE operator X86Emitter&() noexcept { return *asEmitter(); }
//! Implicit cast to `X86Emitter` (const).
ASMJIT_INLINE operator const X86Emitter&() const noexcept { return *asEmitter(); }
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Code-Generation]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) override;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_BUILDER
#endif // _ASMJIT_X86_X86BUILDER_H

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,726 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86COMPILERCONTEXT_P_H
#define _ASMJIT_X86_X86COMPILERCONTEXT_P_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/compiler.h"
#include "../base/compilercontext_p.h"
#include "../base/utils.h"
#include "../x86/x86assembler.h"
#include "../x86/x86compiler.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86VarMap]
// ============================================================================
struct X86VarMap : public VarMap {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get variable-attributes list as VarAttr data.
ASMJIT_INLINE VarAttr* getVaList() const {
return const_cast<VarAttr*>(_list);
}
//! Get variable-attributes list as VarAttr data (by class).
ASMJIT_INLINE VarAttr* getVaListByClass(uint32_t rc) const {
return const_cast<VarAttr*>(_list) + _start.get(rc);
}
//! Get position of variables (by class).
ASMJIT_INLINE uint32_t getVaStart(uint32_t rc) const {
return _start.get(rc);
}
//! Get count of variables (by class).
ASMJIT_INLINE uint32_t getVaCountByClass(uint32_t rc) const {
return _count.get(rc);
}
//! Get VarAttr at `index`.
ASMJIT_INLINE VarAttr* getVa(uint32_t index) const {
ASMJIT_ASSERT(index < _vaCount);
return getVaList() + index;
}
//! Get VarAttr of `c` class at `index`.
ASMJIT_INLINE VarAttr* getVaByClass(uint32_t rc, uint32_t index) const {
ASMJIT_ASSERT(index < _count._regs[rc]);
return getVaListByClass(rc) + index;
}
// --------------------------------------------------------------------------
// [Utils]
// --------------------------------------------------------------------------
//! Find VarAttr.
ASMJIT_INLINE VarAttr* findVa(VarData* vd) const {
VarAttr* list = getVaList();
uint32_t count = getVaCount();
for (uint32_t i = 0; i < count; i++)
if (list[i].getVd() == vd)
return &list[i];
return nullptr;
}
//! Find VarAttr (by class).
ASMJIT_INLINE VarAttr* findVaByClass(uint32_t rc, VarData* vd) const {
VarAttr* list = getVaListByClass(rc);
uint32_t count = getVaCountByClass(rc);
for (uint32_t i = 0; i < count; i++)
if (list[i].getVd() == vd)
return &list[i];
return nullptr;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Special registers on input.
//!
//! Special register(s) restricted to one or more physical register. If there
//! is more than one special register it means that we have to duplicate the
//! variable content to all of them (it means that the same varible was used
//! by two or more operands). We forget about duplicates after the register
//! allocation finishes and marks all duplicates as non-assigned.
X86RegMask _inRegs;
//! Special registers on output.
//!
//! Special register(s) used on output. Each variable can have only one
//! special register on the output, 'X86VarMap' contains all registers from
//! all 'VarAttr's.
X86RegMask _outRegs;
//! Clobbered registers (by a function call).
X86RegMask _clobberedRegs;
//! Start indexes of variables per register class.
X86RegCount _start;
//! Count of variables per register class.
X86RegCount _count;
//! VarAttr list.
VarAttr _list[1];
};
// ============================================================================
// [asmjit::X86StateCell]
// ============================================================================
//! X86/X64 state-cell.
union X86StateCell {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getState() const {
return _state;
}
ASMJIT_INLINE void setState(uint32_t state) {
_state = static_cast<uint8_t>(state);
}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() { _packed = 0; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint8_t _packed;
struct {
uint8_t _state : 2;
uint8_t _unused : 6;
};
};
// ============================================================================
// [asmjit::X86VarState]
// ============================================================================
//! X86/X64 state.
struct X86VarState : VarState {
enum {
//! Base index of GP registers.
kGpIndex = 0,
//! Count of GP registers.
kGpCount = 16,
//! Base index of MMX registers.
kMmIndex = kGpIndex + kGpCount,
//! Count of Mm registers.
kMmCount = 8,
//! Base index of XMM registers.
kXmmIndex = kMmIndex + kMmCount,
//! Count of XMM registers.
kXmmCount = 16,
//! Count of all registers in `X86VarState`.
kAllCount = kXmmIndex + kXmmCount
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE VarData** getList() {
return _list;
}
ASMJIT_INLINE VarData** getListByClass(uint32_t rc) {
switch (rc) {
case kX86RegClassGp : return _listGp;
case kX86RegClassMm : return _listMm;
case kX86RegClassXyz: return _listXmm;
default:
return nullptr;
}
}
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset(size_t numCells) {
::memset(this, 0, kAllCount * sizeof(VarData*) +
2 * sizeof(X86RegMask) +
numCells * sizeof(X86StateCell));
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
//! List of all allocated variables in one array.
VarData* _list[kAllCount];
struct {
//! Allocated GP registers.
VarData* _listGp[kGpCount];
//! Allocated MMX registers.
VarData* _listMm[kMmCount];
//! Allocated XMM registers.
VarData* _listXmm[kXmmCount];
};
};
//! Occupied registers (mask).
X86RegMask _occupied;
//! Modified registers (mask).
X86RegMask _modified;
//! Variables data, the length is stored in `X86Context`.
X86StateCell _cells[1];
};
// ============================================================================
// [asmjit::X86Context]
// ============================================================================
#if defined(ASMJIT_DEBUG)
# define ASMJIT_X86_CHECK_STATE _checkState();
#else
# define ASMJIT_X86_CHECK_STATE
#endif // ASMJIT_DEBUG
//! \internal
//!
//! Compiler context, used by `X86Compiler`.
//!
//! Compiler context takes care of generating function prolog and epilog, and
//! also performs register allocation. It's used during the compilation phase
//! and considered an implementation detail and asmjit consumers don't have
//! access to it. The context is used once per function and it's reset after
//! the function is processed.
struct X86Context : public Context {
ASMJIT_NO_COPY(X86Context)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `X86Context` instance.
X86Context(X86Compiler* compiler);
//! Destroy the `X86Context` instance.
virtual ~X86Context();
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
virtual void reset(bool releaseMemory = false) override;
// --------------------------------------------------------------------------
// [Arch]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isX64() const { return _zsp.getSize() == 16; }
ASMJIT_INLINE uint32_t getRegSize() const { return _zsp.getSize(); }
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get compiler as `X86Compiler`.
ASMJIT_INLINE X86Compiler* getCompiler() const { return static_cast<X86Compiler*>(_compiler); }
//! Get function as `X86FuncNode`.
ASMJIT_INLINE X86FuncNode* getFunc() const { return reinterpret_cast<X86FuncNode*>(_func); }
//! Get clobbered registers (global).
ASMJIT_INLINE uint32_t getClobberedRegs(uint32_t rc) { return _clobberedRegs.get(rc); }
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86VarMap* newVarMap(uint32_t vaCount) {
return static_cast<X86VarMap*>(
_zoneAllocator.alloc(sizeof(X86VarMap) + vaCount * sizeof(VarAttr)));
}
// --------------------------------------------------------------------------
// [Emit]
// --------------------------------------------------------------------------
void emitLoad(VarData* vd, uint32_t regIndex, const char* reason);
void emitSave(VarData* vd, uint32_t regIndex, const char* reason);
void emitMove(VarData* vd, uint32_t toRegIndex, uint32_t fromRegIndex, const char* reason);
void emitSwapGp(VarData* aVd, VarData* bVd, uint32_t aIndex, uint32_t bIndex, const char* reason);
void emitPushSequence(uint32_t regs);
void emitPopSequence(uint32_t regs);
void emitConvertVarToVar(uint32_t dstType, uint32_t dstIndex, uint32_t srcType, uint32_t srcIndex);
void emitMoveVarOnStack(uint32_t dstType, const X86Mem* dst, uint32_t srcType, uint32_t srcIndex);
void emitMoveImmOnStack(uint32_t dstType, const X86Mem* dst, const Imm* src);
void emitMoveImmToReg(uint32_t dstType, uint32_t dstIndex, const Imm* src);
// --------------------------------------------------------------------------
// [Register Management]
// --------------------------------------------------------------------------
void _checkState();
// --------------------------------------------------------------------------
// [Attach / Detach]
// --------------------------------------------------------------------------
//! Attach.
//!
//! Attach a register to the 'VarData', changing 'VarData' members to show
//! that the variable is currently alive and linking variable with the
//! current 'X86VarState'.
template<int C>
ASMJIT_INLINE void attach(VarData* vd, uint32_t regIndex, bool modified) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(regIndex != kInvalidReg);
// Prevent Esp allocation if C==Gp.
ASMJIT_ASSERT(C != kX86RegClassGp || regIndex != kX86RegIndexSp);
uint32_t regMask = Utils::mask(regIndex);
vd->setState(kVarStateReg);
vd->setModified(modified);
vd->setRegIndex(regIndex);
vd->addHomeIndex(regIndex);
_x86State.getListByClass(C)[regIndex] = vd;
_x86State._occupied.or_(C, regMask);
_x86State._modified.or_(C, static_cast<uint32_t>(modified) << regIndex);
ASMJIT_X86_CHECK_STATE
}
//! Detach.
//!
//! The opposite of 'Attach'. Detach resets the members in 'VarData'
//! (regIndex, state and changed flags) and unlinks the variable with the
//! current 'X86VarState'.
template<int C>
ASMJIT_INLINE void detach(VarData* vd, uint32_t regIndex, uint32_t vState) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vd->getRegIndex() == regIndex);
ASMJIT_ASSERT(vState != kVarStateReg);
uint32_t regMask = Utils::mask(regIndex);
vd->setState(vState);
vd->resetRegIndex();
vd->setModified(false);
_x86State.getListByClass(C)[regIndex] = nullptr;
_x86State._occupied.andNot(C, regMask);
_x86State._modified.andNot(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Rebase]
// --------------------------------------------------------------------------
//! Rebase.
//!
//! Change the register of the 'VarData' changing also the current 'X86VarState'.
//! Rebase is nearly identical to 'Detach' and 'Attach' sequence, but doesn't
//! change the `VarData`s modified flag.
template<int C>
ASMJIT_INLINE void rebase(VarData* vd, uint32_t newRegIndex, uint32_t oldRegIndex) {
ASMJIT_ASSERT(vd->getClass() == C);
uint32_t newRegMask = Utils::mask(newRegIndex);
uint32_t oldRegMask = Utils::mask(oldRegIndex);
uint32_t bothRegMask = newRegMask ^ oldRegMask;
vd->setRegIndex(newRegIndex);
_x86State.getListByClass(C)[oldRegIndex] = nullptr;
_x86State.getListByClass(C)[newRegIndex] = vd;
_x86State._occupied.xor_(C, bothRegMask);
_x86State._modified.xor_(C, bothRegMask & -static_cast<int32_t>(vd->isModified()));
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Load / Save]
// --------------------------------------------------------------------------
//! Load.
//!
//! Load variable from its memory slot to a register, emitting 'Load'
//! instruction and changing the variable state to allocated.
template<int C>
ASMJIT_INLINE void load(VarData* vd, uint32_t regIndex) {
// Can be only called if variable is not allocated.
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vd->getState() != kVarStateReg);
ASMJIT_ASSERT(vd->getRegIndex() == kInvalidReg);
emitLoad(vd, regIndex, "Load");
attach<C>(vd, regIndex, false);
ASMJIT_X86_CHECK_STATE
}
//! Save.
//!
//! Save the variable into its home location, but keep it as allocated.
template<int C>
ASMJIT_INLINE void save(VarData* vd) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vd->getState() == kVarStateReg);
ASMJIT_ASSERT(vd->getRegIndex() != kInvalidReg);
uint32_t regIndex = vd->getRegIndex();
uint32_t regMask = Utils::mask(regIndex);
emitSave(vd, regIndex, "Save");
vd->setModified(false);
_x86State._modified.andNot(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Move / Swap]
// --------------------------------------------------------------------------
//! Move a register.
//!
//! Move register from one index to another, emitting 'Move' if needed. This
//! function does nothing if register is already at the given index.
template<int C>
ASMJIT_INLINE void move(VarData* vd, uint32_t regIndex) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vd->getState() == kVarStateReg);
ASMJIT_ASSERT(vd->getRegIndex() != kInvalidReg);
uint32_t oldIndex = vd->getRegIndex();
if (regIndex != oldIndex) {
emitMove(vd, regIndex, oldIndex, "Move");
rebase<C>(vd, regIndex, oldIndex);
}
ASMJIT_X86_CHECK_STATE
}
//! Swap two registers
//!
//! It's only possible to swap Gp registers.
ASMJIT_INLINE void swapGp(VarData* aVd, VarData* bVd) {
ASMJIT_ASSERT(aVd != bVd);
ASMJIT_ASSERT(aVd->getClass() == kX86RegClassGp);
ASMJIT_ASSERT(aVd->getState() == kVarStateReg);
ASMJIT_ASSERT(aVd->getRegIndex() != kInvalidReg);
ASMJIT_ASSERT(bVd->getClass() == kX86RegClassGp);
ASMJIT_ASSERT(bVd->getState() == kVarStateReg);
ASMJIT_ASSERT(bVd->getRegIndex() != kInvalidReg);
uint32_t aIndex = aVd->getRegIndex();
uint32_t bIndex = bVd->getRegIndex();
emitSwapGp(aVd, bVd, aIndex, bIndex, "Swap");
aVd->setRegIndex(bIndex);
bVd->setRegIndex(aIndex);
_x86State.getListByClass(kX86RegClassGp)[aIndex] = bVd;
_x86State.getListByClass(kX86RegClassGp)[bIndex] = aVd;
uint32_t m = aVd->isModified() ^ bVd->isModified();
_x86State._modified.xor_(kX86RegClassGp, (m << aIndex) | (m << bIndex));
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Alloc / Spill]
// --------------------------------------------------------------------------
//! Alloc.
template<int C>
ASMJIT_INLINE void alloc(VarData* vd, uint32_t regIndex) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(regIndex != kInvalidReg);
uint32_t oldRegIndex = vd->getRegIndex();
uint32_t oldState = vd->getState();
uint32_t regMask = Utils::mask(regIndex);
ASMJIT_ASSERT(_x86State.getListByClass(C)[regIndex] == nullptr || regIndex == oldRegIndex);
if (oldState != kVarStateReg) {
if (oldState == kVarStateMem)
emitLoad(vd, regIndex, "Alloc");
vd->setModified(false);
}
else if (oldRegIndex != regIndex) {
emitMove(vd, regIndex, oldRegIndex, "Alloc");
_x86State.getListByClass(C)[oldRegIndex] = nullptr;
regMask ^= Utils::mask(oldRegIndex);
}
else {
ASMJIT_X86_CHECK_STATE
return;
}
vd->setState(kVarStateReg);
vd->setRegIndex(regIndex);
vd->addHomeIndex(regIndex);
_x86State.getListByClass(C)[regIndex] = vd;
_x86State._occupied.xor_(C, regMask);
_x86State._modified.xor_(C, regMask & -static_cast<int32_t>(vd->isModified()));
ASMJIT_X86_CHECK_STATE
}
//! Spill.
//!
//! Spill variable/register, saves the content to the memory-home if modified.
template<int C>
ASMJIT_INLINE void spill(VarData* vd) {
ASMJIT_ASSERT(vd->getClass() == C);
if (vd->getState() != kVarStateReg) {
ASMJIT_X86_CHECK_STATE
return;
}
uint32_t regIndex = vd->getRegIndex();
ASMJIT_ASSERT(regIndex != kInvalidReg);
ASMJIT_ASSERT(_x86State.getListByClass(C)[regIndex] == vd);
if (vd->isModified())
emitSave(vd, regIndex, "Spill");
detach<C>(vd, regIndex, kVarStateMem);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Modify]
// --------------------------------------------------------------------------
template<int C>
ASMJIT_INLINE void modify(VarData* vd) {
ASMJIT_ASSERT(vd->getClass() == C);
uint32_t regIndex = vd->getRegIndex();
uint32_t regMask = Utils::mask(regIndex);
vd->setModified(true);
_x86State._modified.or_(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Unuse]
// --------------------------------------------------------------------------
//! Unuse.
//!
//! Unuse variable, it will be detached it if it's allocated then its state
//! will be changed to kVarStateNone.
template<int C>
ASMJIT_INLINE void unuse(VarData* vd, uint32_t vState = kVarStateNone) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vState != kVarStateReg);
uint32_t regIndex = vd->getRegIndex();
if (regIndex != kInvalidReg)
detach<C>(vd, regIndex, vState);
else
vd->setState(vState);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [State]
// --------------------------------------------------------------------------
//! Get state as `X86VarState`.
ASMJIT_INLINE X86VarState* getState() const {
return const_cast<X86VarState*>(&_x86State);
}
virtual void loadState(VarState* src);
virtual VarState* saveState();
virtual void switchState(VarState* src);
virtual void intersectStates(VarState* a, VarState* b);
// --------------------------------------------------------------------------
// [Memory]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86Mem getVarMem(VarData* vd) {
(void)getVarCell(vd);
X86Mem mem(_memSlot);
mem.setBase(vd->getId());
return mem;
}
// --------------------------------------------------------------------------
// [Fetch]
// --------------------------------------------------------------------------
virtual Error fetch();
// --------------------------------------------------------------------------
// [Annotate]
// --------------------------------------------------------------------------
virtual Error annotate();
// --------------------------------------------------------------------------
// [Translate]
// --------------------------------------------------------------------------
virtual Error translate();
// --------------------------------------------------------------------------
// [Serialize]
// --------------------------------------------------------------------------
virtual Error serialize(Assembler* assembler, HLNode* start, HLNode* stop);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Count of X86/X64 registers.
X86RegCount _regCount;
//! X86/X64 stack-pointer (esp or rsp).
X86GpReg _zsp;
//! X86/X64 frame-pointer (ebp or rbp).
X86GpReg _zbp;
//! Temporary memory operand.
X86Mem _memSlot;
//! X86/X64 specific compiler state, linked to `_state`.
X86VarState _x86State;
//! Clobbered registers (for the whole function).
X86RegMask _clobberedRegs;
//! Memory cell where is stored address used to restore manually
//! aligned stack.
VarCell* _stackFrameCell;
//! Global allocable registers mask.
uint32_t _gaRegs[kX86RegClassCount];
//! Function arguments base pointer (register).
uint8_t _argBaseReg;
//! Function variables base pointer (register).
uint8_t _varBaseReg;
//! Whether to emit comments.
uint8_t _emitComments;
//! Function arguments base offset.
int32_t _argBaseOffset;
//! Function variables base offset.
int32_t _varBaseOffset;
//! Function arguments displacement.
int32_t _argActualDisp;
//! Function variables displacement.
int32_t _varActualDisp;
//! Temporary string builder used for logging.
StringBuilderTmp<256> _stringBuilder;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_X86_X86COMPILERCONTEXT_P_H

View File

@@ -1,551 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER) && (defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64))
// [Dependencies]
#include "../x86/x86compiler.h"
#include "../x86/x86compilerfunc.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86FuncDecl - Helpers]
// ============================================================================
static ASMJIT_INLINE bool x86ArgIsInt(uint32_t aType) {
ASMJIT_ASSERT(aType < kX86VarTypeCount);
return Utils::inInterval<uint32_t>(aType, _kVarTypeIntStart, _kVarTypeIntEnd);
}
static ASMJIT_INLINE bool x86ArgIsFp(uint32_t aType) {
ASMJIT_ASSERT(aType < kX86VarTypeCount);
return Utils::inInterval<uint32_t>(aType, _kVarTypeFpStart, _kVarTypeFpEnd);
}
static ASMJIT_INLINE uint32_t x86ArgTypeToXmmType(uint32_t aType) {
if (aType == kVarTypeFp32) return kX86VarTypeXmmSs;
if (aType == kVarTypeFp64) return kX86VarTypeXmmSd;
return aType;
}
//! Get an architecture depending on the calling convention `callConv`.
//!
//! Returns `kArchNone`, `kArchX86`, or `kArchX64`.
static ASMJIT_INLINE uint32_t x86GetArchFromCConv(uint32_t callConv) {
if (Utils::inInterval<uint32_t>(callConv, _kCallConvX86Start, _kCallConvX86End)) return kArchX86;
if (Utils::inInterval<uint32_t>(callConv, _kCallConvX64Start, _kCallConvX64End)) return kArchX64;
return kArchNone;
}
// ============================================================================
// [asmjit::X86FuncDecl - SetPrototype]
// ============================================================================
#define R(_Index_) kX86RegIndex##_Index_
static uint32_t X86FuncDecl_initConv(X86FuncDecl* self, uint32_t arch, uint32_t callConv) {
// Setup defaults.
self->_argStackSize = 0;
self->_redZoneSize = 0;
self->_spillZoneSize = 0;
self->_callConv = static_cast<uint8_t>(callConv);
self->_calleePopsStack = false;
self->_argsDirection = kFuncDirRTL;
self->_passed.reset();
self->_preserved.reset();
::memset(self->_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderGp));
::memset(self->_passedOrderXyz, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderXyz));
switch (arch) {
// ------------------------------------------------------------------------
// [X86 Support]
// ------------------------------------------------------------------------
#if defined(ASMJIT_BUILD_X86)
case kArchX86: {
self->_preserved.set(kX86RegClassGp, Utils::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di)));
switch (callConv) {
case kCallConvX86CDecl:
break;
case kCallConvX86StdCall:
self->_calleePopsStack = true;
break;
case kCallConvX86MsThisCall:
self->_calleePopsStack = true;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx)));
self->_passedOrderGp[0] = R(Cx);
break;
case kCallConvX86MsFastCall:
self->_calleePopsStack = true;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Cx)));
self->_passedOrderGp[0] = R(Cx);
self->_passedOrderGp[1] = R(Dx);
break;
case kCallConvX86BorlandFastCall:
self->_calleePopsStack = true;
self->_argsDirection = kFuncDirLTR;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx), R(Cx)));
self->_passedOrderGp[0] = R(Ax);
self->_passedOrderGp[1] = R(Dx);
self->_passedOrderGp[2] = R(Cx);
break;
case kCallConvX86GccFastCall:
self->_calleePopsStack = true;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Dx)));
self->_passedOrderGp[0] = R(Cx);
self->_passedOrderGp[1] = R(Dx);
break;
case kCallConvX86GccRegParm1:
self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax)));
self->_passedOrderGp[0] = R(Ax);
break;
case kCallConvX86GccRegParm2:
self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx)));
self->_passedOrderGp[0] = R(Ax);
self->_passedOrderGp[1] = R(Dx);
break;
case kCallConvX86GccRegParm3:
self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx), R(Cx)));
self->_passedOrderGp[0] = R(Ax);
self->_passedOrderGp[1] = R(Dx);
self->_passedOrderGp[2] = R(Cx);
break;
default:
return kErrorInvalidArgument;
}
return kErrorOk;
}
#endif // ASMJIT_BUILD_X86
// ------------------------------------------------------------------------
// [X64 Support]
// ------------------------------------------------------------------------
#if defined(ASMJIT_BUILD_X64)
case kArchX64: {
switch (callConv) {
case kCallConvX64Win:
self->_spillZoneSize = 32;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Dx), 8, 9));
self->_passedOrderGp[0] = R(Cx);
self->_passedOrderGp[1] = R(Dx);
self->_passedOrderGp[2] = 8;
self->_passedOrderGp[3] = 9;
self->_passed.set(kX86RegClassXyz, Utils::mask(0, 1, 2, 3));
self->_passedOrderXyz[0] = 0;
self->_passedOrderXyz[1] = 1;
self->_passedOrderXyz[2] = 2;
self->_passedOrderXyz[3] = 3;
self->_preserved.set(kX86RegClassGp , Utils::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di), 12, 13, 14, 15));
self->_preserved.set(kX86RegClassXyz, Utils::mask(6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
break;
case kCallConvX64Unix:
self->_redZoneSize = 128;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Di), R(Si), R(Dx), R(Cx), 8, 9));
self->_passedOrderGp[0] = R(Di);
self->_passedOrderGp[1] = R(Si);
self->_passedOrderGp[2] = R(Dx);
self->_passedOrderGp[3] = R(Cx);
self->_passedOrderGp[4] = 8;
self->_passedOrderGp[5] = 9;
self->_passed.set(kX86RegClassXyz, Utils::mask(0, 1, 2, 3, 4, 5, 6, 7));
self->_passedOrderXyz[0] = 0;
self->_passedOrderXyz[1] = 1;
self->_passedOrderXyz[2] = 2;
self->_passedOrderXyz[3] = 3;
self->_passedOrderXyz[4] = 4;
self->_passedOrderXyz[5] = 5;
self->_passedOrderXyz[6] = 6;
self->_passedOrderXyz[7] = 7;
self->_preserved.set(kX86RegClassGp, Utils::mask(R(Bx), R(Sp), R(Bp), 12, 13, 14, 15));
break;
default:
return kErrorInvalidArgument;
}
return kErrorOk;
}
#endif // ASMJIT_BUILD_X64
default:
return kErrorInvalidArgument;
}
}
#undef R
static Error X86FuncDecl_initFunc(X86FuncDecl* self, uint32_t arch,
uint32_t ret, const uint32_t* args, uint32_t numArgs) {
ASMJIT_ASSERT(numArgs <= kFuncArgCount);
uint32_t callConv = self->_callConv;
uint32_t regSize = (arch == kArchX86) ? 4 : 8;
int32_t i = 0;
int32_t gpPos = 0;
int32_t xmmPos = 0;
int32_t stackOffset = 0;
const uint8_t* varMapping = nullptr;
#if defined(ASMJIT_BUILD_X86)
if (arch == kArchX86)
varMapping = _x86VarMapping;
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_X64)
if (arch == kArchX64)
varMapping = _x64VarMapping;
#endif // ASMJIT_BUILD_X64
ASMJIT_ASSERT(varMapping != nullptr);
self->_numArgs = static_cast<uint8_t>(numArgs);
self->_retCount = 0;
for (i = 0; i < static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
arg._varType = static_cast<uint8_t>(varMapping[args[i]]);
arg._regIndex = kInvalidReg;
arg._stackOffset = kFuncStackInvalid;
}
for (; i < kFuncArgCount; i++) {
self->_args[i].reset();
}
self->_rets[0].reset();
self->_rets[1].reset();
self->_argStackSize = 0;
self->_used.reset();
if (ret != kInvalidVar) {
ret = varMapping[ret];
switch (ret) {
case kVarTypeInt64:
case kVarTypeUInt64:
// 64-bit value is returned in EDX:EAX on x86.
#if defined(ASMJIT_BUILD_X86)
if (arch == kArchX86) {
self->_retCount = 2;
self->_rets[0]._varType = kVarTypeUInt32;
self->_rets[0]._regIndex = kX86RegIndexAx;
self->_rets[1]._varType = static_cast<uint8_t>(ret - 2);
self->_rets[1]._regIndex = kX86RegIndexDx;
}
ASMJIT_FALLTHROUGH;
#endif // ASMJIT_BUILD_X86
case kVarTypeInt8:
case kVarTypeUInt8:
case kVarTypeInt16:
case kVarTypeUInt16:
case kVarTypeInt32:
case kVarTypeUInt32:
self->_retCount = 1;
self->_rets[0]._varType = static_cast<uint8_t>(ret);
self->_rets[0]._regIndex = kX86RegIndexAx;
break;
case kX86VarTypeMm:
self->_retCount = 1;
self->_rets[0]._varType = static_cast<uint8_t>(ret);
self->_rets[0]._regIndex = 0;
break;
case kVarTypeFp32:
self->_retCount = 1;
if (arch == kArchX86) {
self->_rets[0]._varType = kVarTypeFp32;
self->_rets[0]._regIndex = 0;
}
else {
self->_rets[0]._varType = kX86VarTypeXmmSs;
self->_rets[0]._regIndex = 0;
}
break;
case kVarTypeFp64:
self->_retCount = 1;
if (arch == kArchX86) {
self->_rets[0]._varType = kVarTypeFp64;
self->_rets[0]._regIndex = 0;
}
else {
self->_rets[0]._varType = kX86VarTypeXmmSd;
self->_rets[0]._regIndex = 0;
break;
}
break;
case kX86VarTypeXmm:
case kX86VarTypeXmmSs:
case kX86VarTypeXmmSd:
case kX86VarTypeXmmPs:
case kX86VarTypeXmmPd:
self->_retCount = 1;
self->_rets[0]._varType = static_cast<uint8_t>(ret);
self->_rets[0]._regIndex = 0;
break;
}
}
if (self->_numArgs == 0)
return kErrorOk;
#if defined(ASMJIT_BUILD_X86)
if (arch == kArchX86) {
// Register arguments (Integer), always left-to-right.
for (i = 0; i != static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp))
continue;
if (self->_passedOrderGp[gpPos] == kInvalidReg)
continue;
arg._regIndex = self->_passedOrderGp[gpPos++];
self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex()));
}
// Stack arguments.
int32_t iStart = static_cast<int32_t>(numArgs - 1);
int32_t iEnd = -1;
int32_t iStep = -1;
if (self->_argsDirection == kFuncDirLTR) {
iStart = 0;
iEnd = static_cast<int32_t>(numArgs);
iStep = 1;
}
for (i = iStart; i != iEnd; i += iStep) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (arg.hasRegIndex())
continue;
if (x86ArgIsInt(varType)) {
stackOffset -= 4;
arg._stackOffset = static_cast<int16_t>(stackOffset);
}
else if (x86ArgIsFp(varType)) {
int32_t size = static_cast<int32_t>(_x86VarInfo[varType].getSize());
stackOffset -= size;
arg._stackOffset = static_cast<int16_t>(stackOffset);
}
}
}
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_X64)
if (arch == kArchX64) {
if (callConv == kCallConvX64Win) {
int32_t argMax = Utils::iMin<int32_t>(numArgs, 4);
// Register arguments (GP/XMM), always left-to-right.
for (i = 0; i != argMax; i++) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (x86ArgIsInt(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) {
arg._regIndex = self->_passedOrderGp[i];
self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex()));
continue;
}
if (x86ArgIsFp(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderXyz)) {
arg._varType = static_cast<uint8_t>(x86ArgTypeToXmmType(varType));
arg._regIndex = self->_passedOrderXyz[i];
self->_used.or_(kX86RegClassXyz, Utils::mask(arg.getRegIndex()));
}
}
// Stack arguments (always right-to-left).
for (i = numArgs - 1; i != -1; i--) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (arg.hasRegIndex())
continue;
if (x86ArgIsInt(varType)) {
stackOffset -= 8; // Always 8 bytes.
arg._stackOffset = stackOffset;
}
else if (x86ArgIsFp(varType)) {
stackOffset -= 8; // Always 8 bytes (float/double).
arg._stackOffset = stackOffset;
}
}
// 32 bytes shadow space (X64W calling convention specific).
stackOffset -= 4 * 8;
}
else {
// Register arguments (Gp), always left-to-right.
for (i = 0; i != static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp))
continue;
if (self->_passedOrderGp[gpPos] == kInvalidReg)
continue;
arg._regIndex = self->_passedOrderGp[gpPos++];
self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex()));
}
// Register arguments (XMM), always left-to-right.
for (i = 0; i != static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (x86ArgIsFp(varType)) {
arg._varType = static_cast<uint8_t>(x86ArgTypeToXmmType(varType));
arg._regIndex = self->_passedOrderXyz[xmmPos++];
self->_used.or_(kX86RegClassXyz, Utils::mask(arg.getRegIndex()));
}
}
// Stack arguments.
for (i = numArgs - 1; i != -1; i--) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (arg.hasRegIndex())
continue;
if (x86ArgIsInt(varType)) {
stackOffset -= 8;
arg._stackOffset = static_cast<int16_t>(stackOffset);
}
else if (x86ArgIsFp(varType)) {
int32_t size = static_cast<int32_t>(_x86VarInfo[varType].getSize());
stackOffset -= size;
arg._stackOffset = static_cast<int16_t>(stackOffset);
}
}
}
}
#endif // ASMJIT_BUILD_X64
// Modify the stack offset, thus in result all parameters would have positive
// non-zero stack offset.
for (i = 0; i < static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
if (!arg.hasRegIndex()) {
arg._stackOffset += static_cast<uint16_t>(static_cast<int32_t>(regSize) - stackOffset);
}
}
self->_argStackSize = static_cast<uint32_t>(-stackOffset);
return kErrorOk;
}
Error X86FuncDecl::setPrototype(const FuncPrototype& p) {
uint32_t callConv = p.getCallConv();
uint32_t arch = x86GetArchFromCConv(callConv);
if (arch == kArchNone)
return kErrorInvalidArgument;
if (p.getNumArgs() > kFuncArgCount)
return kErrorInvalidArgument;
// Validate that the required convention is supported by the current asmjit
// configuration, if only one target is compiled.
#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64)
if (arch == kArchX64)
return kErrorInvalidState;
#endif // ASMJIT_BUILD_X86 && !ASMJIT_BUILD_X64
#if !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64)
if (arch == kArchX86)
return kErrorInvalidState;
#endif // !ASMJIT_BUILD_X86 && ASMJIT_BUILD_X64
ASMJIT_PROPAGATE_ERROR(X86FuncDecl_initConv(this, arch, callConv));
ASMJIT_PROPAGATE_ERROR(X86FuncDecl_initFunc(this, arch, p.getRet(), p.getArgs(), p.getNumArgs()));
return kErrorOk;
}
// ============================================================================
// [asmjit::X86FuncDecl - Reset]
// ============================================================================
void X86FuncDecl::reset() {
uint32_t i;
_callConv = kCallConvNone;
_calleePopsStack = false;
_argsDirection = kFuncDirRTL;
_reserved0 = 0;
_numArgs = 0;
_retCount = 0;
_argStackSize = 0;
_redZoneSize = 0;
_spillZoneSize = 0;
for (i = 0; i < ASMJIT_ARRAY_SIZE(_args); i++)
_args[i].reset();
_rets[0].reset();
_rets[1].reset();
_used.reset();
_passed.reset();
_preserved.reset();
::memset(_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderGp));
::memset(_passedOrderXyz, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderXyz));
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER && (ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64)

View File

@@ -1,133 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86COMPILERFUNC_P_H
#define _ASMJIT_X86_X86COMPILERFUNC_P_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/compilerfunc.h"
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::TypeId]
// ============================================================================
#if !defined(ASMJIT_DOCGEN)
ASMJIT_TYPE_ID(X86MmReg, kX86VarTypeMm);
ASMJIT_TYPE_ID(X86MmVar, kX86VarTypeMm);
ASMJIT_TYPE_ID(X86XmmReg, kX86VarTypeXmm);
ASMJIT_TYPE_ID(X86XmmVar, kX86VarTypeXmm);
ASMJIT_TYPE_ID(X86YmmReg, kX86VarTypeYmm);
ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm);
ASMJIT_TYPE_ID(X86ZmmReg, kX86VarTypeZmm);
ASMJIT_TYPE_ID(X86ZmmVar, kX86VarTypeZmm);
#endif // !ASMJIT_DOCGEN
// ============================================================================
// [asmjit::X86FuncDecl]
// ============================================================================
//! X86 function, including calling convention, arguments and their
//! register indices or stack positions.
struct X86FuncDecl : public FuncDecl {
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `X86FuncDecl` instance.
ASMJIT_INLINE X86FuncDecl() { reset(); }
// --------------------------------------------------------------------------
// [Accessors - X86]
// --------------------------------------------------------------------------
//! Get used registers mask for the given register class `rc`.
//!
//! NOTE: The result depends on the function calling convention AND the
//! function prototype. Returned mask contains only registers actually used
//! to pass function arguments.
ASMJIT_INLINE uint32_t getUsed(uint32_t rc) const { return _used.get(rc); }
//! Get passed registers mask for the given register class `rc`.
//!
//! NOTE: The result depends on the function calling convention used; the
//! prototype of the function doesn't affect the mask returned.
ASMJIT_INLINE uint32_t getPassed(uint32_t rc) const { return _passed.get(rc); }
//! Get preserved registers mask for the given register class `rc`.
//!
//! NOTE: The result depends on the function calling convention used; the
//! prototype of the function doesn't affect the mask returned.
ASMJIT_INLINE uint32_t getPreserved(uint32_t rc) const { return _preserved.get(rc); }
//! Get ther order of passed registers (GP).
//!
//! NOTE: The result depends on the function calling convention used; the
//! prototype of the function doesn't affect the mask returned.
ASMJIT_INLINE const uint8_t* getPassedOrderGp() const { return _passedOrderGp; }
//! Get ther order of passed registers (XMM/YMM/ZMM).
//!
//! NOTE: The result depends on the function calling convention used; the
//! prototype of the function doesn't affect the mask returned.
ASMJIT_INLINE const uint8_t* getPassedOrderXyz() const { return _passedOrderXyz; }
// --------------------------------------------------------------------------
// [SetPrototype]
// --------------------------------------------------------------------------
//! Set function prototype.
//!
//! This will set function calling convention and setup arguments variables.
//!
//! NOTE: This function will allocate variables, it can be called only once.
ASMJIT_API Error setPrototype(const FuncPrototype& p);
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_API void reset();
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Used registers.
X86RegMask _used;
//! Passed registers (defined by the calling convention).
X86RegMask _passed;
//! Preserved registers (defined by the calling convention).
X86RegMask _preserved;
//! Order of registers used to pass GP function arguments.
uint8_t _passedOrderGp[8];
//! Order of registers used to pass XMM/YMM/ZMM function arguments.
uint8_t _passedOrderXyz[8];
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_X86_X86COMPILERFUNC_P_H

5044
src/asmjit/x86/x86emitter.h Normal file

File diff suppressed because it is too large Load Diff

505
src/asmjit/x86/x86globals.h Normal file
View File

@@ -0,0 +1,505 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86GLOBALS_H
#define _ASMJIT_X86_X86GLOBALS_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::x86regs::]
// ============================================================================
//! X86 registers.
namespace x86regs {}
// ============================================================================
// [asmjit::x86defs::]
// ============================================================================
//! X86 definitions.
namespace x86defs {
// ============================================================================
// [asmjit::x86defs::SpecialRegFlags]
// ============================================================================
//! Flags describing special registers and/or their parts.
ASMJIT_ENUM(SpecialRegFlags) {
kSpecialReg_Flags_CF = 0x00000001U, //!< [R|E]FLAGS - Carry flag.
kSpecialReg_Flags_PF = 0x00000002U, //!< [R|E]FLAGS - Parity flag.
kSpecialReg_Flags_AF = 0x00000004U, //!< [R|E]FLAGS - Adjust flag.
kSpecialReg_Flags_ZF = 0x00000008U, //!< [R|E]FLAGS - Zero flag.
kSpecialReg_Flags_SF = 0x00000010U, //!< [R|E]FLAGS - Sign flag.
kSpecialReg_Flags_TF = 0x00000020U, //!< [R|E]FLAGS - Trap flag.
kSpecialReg_Flags_IF = 0x00000040U, //!< [R|E]FLAGS - Interrupt enable flag.
kSpecialReg_Flags_DF = 0x00000080U, //!< [R|E]FLAGS - Direction flag.
kSpecialReg_Flags_OF = 0x00000100U, //!< [R|E]FLAGS - Overflow flag.
kSpecialReg_Flags_AC = 0x00000200U, //!< [R|E]FLAGS - Alignment check.
kSpecialReg_Flags_Sys = 0x00000400U, //!< [R|E]FLAGS - System flags.
kSpecialReg_X87CW_Exc = 0x00000800U, //!< X87 Control Word - Exception control.
kSpecialReg_X87CW_PC = 0x00001000U, //!< X87 Control Word - Precision control.
kSpecialReg_X87CW_RC = 0x00002000U, //!< X87 Control Word - Rounding control.
kSpecialReg_X87SW_Exc = 0x00004000U, //!< X87 Status Word - Exception flags.
kSpecialReg_X87SW_C0 = 0x00008000U, //!< X87 Status Word - C0 flag.
kSpecialReg_X87SW_C1 = 0x00010000U, //!< X87 Status Word - C1 flag.
kSpecialReg_X87SW_C2 = 0x00020000U, //!< X87 Status Word - C2 flag.
kSpecialReg_X87SW_Top = 0x00040000U, //!< X87 Status Word - Top of the FPU stack.
kSpecialReg_X87SW_C3 = 0x00080000U, //!< X87 Status Word - C3 flag.
kSpecialReg_XCR = 0x00100000U //!< XCR register(s).
};
// ============================================================================
// [asmjit::x86defs::X87SW]
// ============================================================================
//! FPU status word.
ASMJIT_ENUM(X87SW) {
kX87SW_Invalid = 0x0001U,
kX87SW_Denormalized = 0x0002U,
kX87SW_DivByZero = 0x0004U,
kX87SW_Overflow = 0x0008U,
kX87SW_Underflow = 0x0010U,
kX87SW_Precision = 0x0020U,
kX87SW_StackFault = 0x0040U,
kX87SW_Interrupt = 0x0080U,
kX87SW_C0 = 0x0100U,
kX87SW_C1 = 0x0200U,
kX87SW_C2 = 0x0400U,
kX87SW_Top = 0x3800U,
kX87SW_C3 = 0x4000U,
kX87SW_Busy = 0x8000U
};
// ============================================================================
// [asmjit::x86defs::X87CW]
// ============================================================================
//! FPU control word.
ASMJIT_ENUM(X87CW) {
// Bits 0-5.
kX87CW_EM_Mask = 0x003FU,
kX87CW_EM_Invalid = 0x0001U,
kX87CW_EM_Denormal = 0x0002U,
kX87CW_EM_DivByZero = 0x0004U,
kX87CW_EM_Overflow = 0x0008U,
kX87CW_EM_Underflow = 0x0010U,
kX87CW_EM_Inexact = 0x0020U,
// Bits 8-9.
kX87CW_PC_Mask = 0x0300U,
kX87CW_PC_Float = 0x0000U,
kX87CW_PC_Reserved = 0x0100U,
kX87CW_PC_Double = 0x0200U,
kX87CW_PC_Extended = 0x0300U,
// Bits 10-11.
kX87CW_RC_Mask = 0x0C00U,
kX87CW_RC_Nearest = 0x0000U,
kX87CW_RC_Down = 0x0400U,
kX87CW_RC_Up = 0x0800U,
kX87CW_RC_Truncate = 0x0C00U,
// Bit 12.
kX87CW_IC_Mask = 0x1000U,
kX87CW_IC_Projective = 0x0000U,
kX87CW_IC_Affine = 0x1000U
};
// ============================================================================
// [asmjit::x86defs::Cond]
// ============================================================================
//! Condition codes.
ASMJIT_ENUM(Cond) {
kCondO = 0x00U, //!< OF==1
kCondNO = 0x01U, //!< OF==0
kCondB = 0x02U, //!< CF==1 (unsigned < )
kCondC = 0x02U, //!< CF==1
kCondNAE = 0x02U, //!< CF==1 (unsigned < )
kCondAE = 0x03U, //!< CF==0 (unsigned >=)
kCondNB = 0x03U, //!< CF==0 (unsigned >=)
kCondNC = 0x03U, //!< CF==0
kCondE = 0x04U, //!< ZF==1 (any_sign ==)
kCondZ = 0x04U, //!< ZF==1 (any_sign ==)
kCondNE = 0x05U, //!< ZF==0 (any_sign !=)
kCondNZ = 0x05U, //!< ZF==0 (any_sign !=)
kCondBE = 0x06U, //!< CF==1 | ZF==1 (unsigned <=)
kCondNA = 0x06U, //!< CF==1 | ZF==1 (unsigned <=)
kCondA = 0x07U, //!< CF==0 & ZF==0 (unsigned > )
kCondNBE = 0x07U, //!< CF==0 & ZF==0 (unsigned > )
kCondS = 0x08U, //!< SF==1 (is negative)
kCondNS = 0x09U, //!< SF==0 (is positive or zero)
kCondP = 0x0AU, //!< PF==1
kCondPE = 0x0AU, //!< PF==1
kCondPO = 0x0BU, //!< PF==0
kCondNP = 0x0BU, //!< PF==0
kCondL = 0x0CU, //!< SF!=OF (signed < )
kCondNGE = 0x0CU, //!< SF!=OF (signed < )
kCondGE = 0x0DU, //!< SF==OF (signed >=)
kCondNL = 0x0DU, //!< SF==OF (signed >=)
kCondLE = 0x0EU, //!< ZF==1 | SF!=OF (signed <=)
kCondNG = 0x0EU, //!< ZF==1 | SF!=OF (signed <=)
kCondG = 0x0FU, //!< ZF==0 & SF==OF (signed > )
kCondNLE = 0x0FU, //!< ZF==0 & SF==OF (signed > )
kCondCount = 0x10U,
// Simplified condition codes.
kCondSign = kCondS, //!< Sign.
kCondNotSign = kCondNS, //!< Not Sign.
kCondOverflow = kCondO, //!< Signed overflow.
kCondNotOverflow = kCondNO, //!< Not signed overflow.
kCondEqual = kCondE, //!< Equal `a == b`.
kCondNotEqual = kCondNE, //!< Not Equal `a != b`.
kCondSignedLT = kCondL, //!< Signed `a < b`.
kCondSignedLE = kCondLE, //!< Signed `a <= b`.
kCondSignedGT = kCondG, //!< Signed `a > b`.
kCondSignedGE = kCondGE, //!< Signed `a >= b`.
kCondUnsignedLT = kCondB, //!< Unsigned `a < b`.
kCondUnsignedLE = kCondBE, //!< Unsigned `a <= b`.
kCondUnsignedGT = kCondA, //!< Unsigned `a > b`.
kCondUnsignedGE = kCondAE, //!< Unsigned `a >= b`.
kCondZero = kCondZ,
kCondNotZero = kCondNZ,
kCondNegative = kCondS,
kCondPositive = kCondNS,
kCondParityEven = kCondP,
kCondParityOdd = kCondPO
};
// ============================================================================
// [asmjit::x86defs::CmpPredicate]
// ============================================================================
//! A predicate used by CMP[PD|PS|SD|SS] instructions.
ASMJIT_ENUM(CmpPredicate) {
kCmpEQ = 0x00U, //!< Equal (Quiet).
kCmpLT = 0x01U, //!< Less (Signaling).
kCmpLE = 0x02U, //!< Less/Equal (Signaling).
kCmpUNORD = 0x03U, //!< Unordered (Quiet).
kCmpNEQ = 0x04U, //!< Not Equal (Quiet).
kCmpNLT = 0x05U, //!< Not Less (Signaling).
kCmpNLE = 0x06U, //!< Not Less/Equal (Signaling).
kCmpORD = 0x07U //!< Ordered (Quiet).
};
// ============================================================================
// [asmjit::x86defs::VCmpPredicate]
// ============================================================================
//! A predicate used by VCMP[PD|PS|SD|SS] instructions.
//!
//! The first 8 values are compatible with \ref CmpPredicate.
ASMJIT_ENUM(VCmpPredicate) {
kVCmpEQ_OQ = 0x00U, //!< Equal (Quiet , Ordered).
kVCmpLT_OS = 0x01U, //!< Less (Signaling, Ordered).
kVCmpLE_OS = 0x02U, //!< Less/Equal (Signaling, Ordered).
kVCmpUNORD_Q = 0x03U, //!< Unordered (Quiet).
kVCmpNEQ_UQ = 0x04U, //!< Not Equal (Quiet , Unordered).
kVCmpNLT_US = 0x05U, //!< Not Less (Signaling, Unordered).
kVCmpNLE_US = 0x06U, //!< Not Less/Equal (Signaling, Unordered).
kVCmpORD_Q = 0x07U, //!< Ordered (Quiet).
kVCmpEQ_UQ = 0x08U, //!< Equal (Quiet , Unordered).
kVCmpNGE_US = 0x09U, //!< Not Greater/Equal (Signaling, Unordered).
kVCmpNGT_US = 0x0AU, //!< Not Greater (Signaling, Unordered).
kVCmpFALSE_OQ = 0x0BU, //!< False (Quiet , Ordered).
kVCmpNEQ_OQ = 0x0CU, //!< Not Equal (Quiet , Ordered).
kVCmpGE_OS = 0x0DU, //!< Greater/Equal (Signaling, Ordered).
kVCmpGT_OS = 0x0EU, //!< Greater (Signaling, Ordered).
kVCmpTRUE_UQ = 0x0FU, //!< True (Quiet , Unordered).
kVCmpEQ_OS = 0x10U, //!< Equal (Signaling, Ordered).
kVCmpLT_OQ = 0x11U, //!< Less (Quiet , Ordered).
kVCmpLE_OQ = 0x12U, //!< Less/Equal (Quiet , Ordered).
kVCmpUNORD_S = 0x13U, //!< Unordered (Signaling).
kVCmpNEQ_US = 0x14U, //!< Not Equal (Signaling, Unordered).
kVCmpNLT_UQ = 0x15U, //!< Not Less (Quiet , Unordered).
kVCmpNLE_UQ = 0x16U, //!< Not Less/Equal (Quiet , Unordered).
kVCmpORD_S = 0x17U, //!< Ordered (Signaling).
kVCmpEQ_US = 0x18U, //!< Equal (Signaling, Unordered).
kVCmpNGE_UQ = 0x19U, //!< Not Greater/Equal (Quiet , Unordered).
kVCmpNGT_UQ = 0x1AU, //!< Not Greater (Quiet , Unordered).
kVCmpFALSE_OS = 0x1BU, //!< False (Signaling, Ordered).
kVCmpNEQ_OS = 0x1CU, //!< Not Equal (Signaling, Ordered).
kVCmpGE_OQ = 0x1DU, //!< Greater/Equal (Quiet , Ordered).
kVCmpGT_OQ = 0x1EU, //!< Greater (Quiet , Ordered).
kVCmpTRUE_US = 0x1FU //!< True (Signaling, Unordered).
};
// ============================================================================
// [asmjit::x86defs::PCmpStrPredicate]
// ============================================================================
//! A predicate used by [V]PCMP[I|E]STR[I|M] instructions.
ASMJIT_ENUM(PCmpStrPredicate) {
// Source data format:
kPCmpStrUB = 0x00U << 0, //!< The source data format is unsigned bytes.
kPCmpStrUW = 0x01U << 0, //!< The source data format is unsigned words.
kPCmpStrSB = 0x02U << 0, //!< The source data format is signed bytes.
kPCmpStrSW = 0x03U << 0, //!< The source data format is signed words.
// Aggregation operation:
kPCmpStrEqualAny = 0x00U << 2, //!< The arithmetic comparison is "equal".
kPCmpStrRanges = 0x01U << 2, //!< The arithmetic comparison is “greater than or equal”
//!< between even indexed elements and “less than or equal”
//!< between odd indexed elements.
kPCmpStrEqualEach = 0x02U << 2, //!< The arithmetic comparison is "equal".
kPCmpStrEqualOrdered = 0x03U << 2, //!< The arithmetic comparison is "equal".
// Polarity:
kPCmpStrPosPolarity = 0x00U << 4, //!< IntRes2 = IntRes1.
kPCmpStrNegPolarity = 0x01U << 4, //!< IntRes2 = -1 XOR IntRes1.
kPCmpStrPosMasked = 0x02U << 4, //!< IntRes2 = IntRes1.
kPCmpStrNegMasked = 0x03U << 4, //!< IntRes2[i] = second[i] == invalid ? IntRes1[i] : ~IntRes1[i].
// Output selection (pcmpstri):
kPCmpStrOutputLSI = 0x00U << 6, //!< The index returned to ECX is of the least significant set bit in IntRes2.
kPCmpStrOutputMSI = 0x01U << 6, //!< The index returned to ECX is of the most significant set bit in IntRes2.
// Output selection (pcmpstrm):
kPCmpStrBitMask = 0x00U << 6, //!< IntRes2 is returned as the mask to the least significant bits of XMM0.
kPCmpStrIndexMask = 0x01U << 6 //!< IntRes2 is expanded into a byte/word mask and placed in XMM0.
};
// ============================================================================
// [asmjit::x86defs::VPCmpPredicate]
// ============================================================================
//! A predicate used by VPCMP[U][B|W|D|Q] instructions (AVX-512).
ASMJIT_ENUM(VPCmpPredicate) {
kVPCmpEQ = 0x00U, //!< Equal.
kVPCmpLT = 0x01U, //!< Less.
kVPCmpLE = 0x02U, //!< Less/Equal.
kVPCmpFALSE = 0x03U, //!< False.
kVPCmpNE = 0x04U, //!< Not Equal.
kVPCmpGE = 0x05U, //!< Greater/Equal.
kVPCmpGT = 0x06U, //!< Greater.
kVPCmpTRUE = 0x07U //!< True.
};
// ============================================================================
// [asmjit::x86defs::VPComPredicate]
// ============================================================================
//! A predicate used by VPCOM[U][B|W|D|Q] instructions (XOP).
ASMJIT_ENUM(VPComPredicate) {
kVPComLT = 0x00U, //!< Less.
kVPComLE = 0x01U, //!< Less/Equal
kVPComGT = 0x02U, //!< Greater.
kVPComGE = 0x03U, //!< Greater/Equal.
kVPComEQ = 0x04U, //!< Equal.
kVPComNE = 0x05U, //!< Not Equal.
kVPComFALSE = 0x06U, //!< False.
kVPComTRUE = 0x07U //!< True.
};
// ============================================================================
// [asmjit::x86defs::VFPClassPredicate]
// ============================================================================
//! A predicate used by VFPCLASS[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VFPClassPredicate) {
kVFPClassQNaN = 0x00U,
kVFPClassPZero = 0x01U,
kVFPClassNZero = 0x02U,
kVFPClassPInf = 0x03U,
kVFPClassNInf = 0x04U,
kVFPClassDenormal = 0x05U,
kVFPClassNegative = 0x06U,
kVFPClassSNaN = 0x07U
};
// ============================================================================
// [asmjit::x86defs::VFixupImmPredicate]
// ============================================================================
//! A predicate used by VFIXUPIMM[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VFixupImmPredicate) {
kVFixupImmZEOnZero = 0x01U,
kVFixupImmIEOnZero = 0x02U,
kVFixupImmZEOnOne = 0x04U,
kVFixupImmIEOnOne = 0x08U,
kVFixupImmIEOnSNaN = 0x10U,
kVFixupImmIEOnNInf = 0x20U,
kVFixupImmIEOnNegative= 0x40U,
kVFixupImmIEOnPInf = 0x80U
};
// ============================================================================
// [asmjit::x86defs::VGetMantPredicate]
// ============================================================================
//! A predicate used by VGETMANT[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VGetMantPredicate) {
kVGetMant1To2 = 0x00U,
kVGetMant1Div2To2 = 0x01U,
kVGetMant1Div2To1 = 0x02U,
kVGetMant3Div4To3Div2 = 0x03U,
kVGetMantNoSign = 0x04U,
kVGetMantQNaNIfSign = 0x08U
};
// ============================================================================
// [asmjit::x86defs::VRangePredicate]
// ============================================================================
//! A predicate used by VRANGE[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VRangePredicate) {
kVRangeSelectMin = 0x00U, //!< Select minimum value.
kVRangeSelectMax = 0x01U, //!< Select maximum value.
kVRangeSelectAbsMin = 0x02U, //!< Select minimum absolute value.
kVRangeSelectAbsMax = 0x03U, //!< Select maximum absolute value.
kVRangeSignSrc1 = 0x00U, //!< Select sign of SRC1.
kVRangeSignSrc2 = 0x04U, //!< Select sign of SRC2.
kVRangeSign0 = 0x08U, //!< Set sign to 0.
kVRangeSign1 = 0x0CU //!< Set sign to 1.
};
// ============================================================================
// [asmjit::x86defs::VReducePredicate]
// ============================================================================
//! A predicate used by VREDUCE[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VReducePredicate) {
kVReduceRoundCurrent = 0x00U, //!< Round to the current mode set.
kVReduceRoundEven = 0x04U, //!< Round to nearest even.
kVReduceRoundDown = 0x05U, //!< Round down.
kVReduceRoundUp = 0x06U, //!< Round up.
kVReduceRoundTrunc = 0x07U, //!< Truncate.
kVReduceSuppress = 0x08U //!< Suppress exceptions.
};
// ============================================================================
// [asmjit::x86defs::TLogPredicate]
// ============================================================================
//! A predicate that can be used to create an immediate for VTERNLOG[D|Q].
ASMJIT_ENUM(TLogPredicate) {
kTLog0 = 0x00U,
kTLog1 = 0xFFU,
kTLogA = 0xF0U,
kTLogB = 0xCCU,
kTLogC = 0xAAU,
kTLogNotA = kTLogA ^ 0xFFU,
kTLogNotB = kTLogB ^ 0xFFU,
kTLogNotC = kTLogC ^ 0xFFU,
kTLogAB = kTLogA & kTLogB,
kTLogAC = kTLogA & kTLogC,
kTLogBC = kTLogB & kTLogC,
kTLogNotAB = kTLogAB ^ 0xFFU,
kTLogNotAC = kTLogAC ^ 0xFFU,
kTLogNotBC = kTLogBC ^ 0xFFU,
kTLogABC = kTLogA & kTLogB & kTLogC,
kTLogNotABC = kTLogABC ^ 0xFFU
};
// ============================================================================
// [asmjit::x86defs::RoundPredicate]
// ============================================================================
//! A predicate used by ROUND[PD|PS|SD|SS] instructions.
ASMJIT_ENUM(RoundPredicate) {
kRoundNearest = 0x00U, //!< Round to nearest (even).
kRoundDown = 0x01U, //!< Round to down toward -INF (floor),
kRoundUp = 0x02U, //!< Round to up toward +INF (ceil).
kRoundTrunc = 0x03U, //!< Round toward zero (truncate).
kRoundCurrent = 0x04U, //!< Round to the current rounding mode set (ignores other RC bits).
kRoundInexact = 0x08U //!< Avoids inexact exception, if set.
};
} // x86defs namespace
// ============================================================================
// [asmjit::x86::]
// ============================================================================
//! X86 constants, registers, and utilities.
namespace x86 {
// Include all x86 specific namespaces here.
using namespace x86defs;
using namespace x86regs;
//! Pack a shuffle constant to be used by SSE/AVX/AVX-512 instructions (2 values).
//!
//! \param a Position of the first component [0, 1].
//! \param b Position of the second component [0, 1].
//!
//! Shuffle constants can be used to encode an immediate for these instructions:
//! - `shufpd`
static ASMJIT_INLINE int shufImm(uint32_t a, uint32_t b) noexcept {
ASMJIT_ASSERT(a <= 1 && b <= 1);
return static_cast<int>((a << 1) | b);
}
//! Pack a shuffle constant to be used by SSE/AVX/AVX-512 instructions (4 values).
//!
//! \param a Position of the first component [0, 3].
//! \param b Position of the second component [0, 3].
//! \param c Position of the third component [0, 3].
//! \param d Position of the fourth component [0, 3].
//!
//! Shuffle constants can be used to encode an immediate for these instructions:
//! - `pshufw()`
//! - `pshufd()`
//! - `pshuflw()`
//! - `pshufhw()`
//! - `shufps()`
static ASMJIT_INLINE int shufImm(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept {
ASMJIT_ASSERT(a <= 3 && b <= 3 && c <= 3 && d <= 3);
return static_cast<int>((a << 6) | (b << 4) | (c << 2) | d);
}
//! Create an immediate that can be used by VTERNLOG[D|Q] instructions.
static ASMJIT_INLINE int tlogImm(
uint32_t b000, uint32_t b001, uint32_t b010, uint32_t b011,
uint32_t b100, uint32_t b101, uint32_t b110, uint32_t b111) noexcept {
ASMJIT_ASSERT(b000 <= 1 && b001 <= 1 && b010 <= 1 && b011 <= 1 &&
b100 <= 1 && b101 <= 1 && b110 <= 1 && b111 <= 1);
return static_cast<int>((b000 << 0) | (b001 << 1) | (b010 << 2) | (b011 << 3) |
(b100 << 4) | (b101 << 5) | (b110 << 6) | (b111 << 7));
}
//! Create an immediate that can be used by VTERNLOG[D|Q] instructions.
static ASMJIT_INLINE int tlogVal(int x) noexcept { return x & 0xFF; }
//! Negate an immediate that can be used by VTERNLOG[D|Q] instructions.
static ASMJIT_INLINE int tlogNot(int x) noexcept { return x ^ 0xFF; }
//! Create an if/else logic that can be used by VTERNLOG[D|Q] instructions.
static ASMJIT_INLINE int tlogIf(int cond, int a, int b) noexcept { return (cond & a) | (tlogNot(cond) & b); }
} // x86 namespace
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86GLOBALS_H

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,79 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86INTERNAL_P_H
#define _ASMJIT_X86_X86INTERNAL_P_H
#include "../asmjit_build.h"
// [Dependencies]
#include "../base/func.h"
#include "../x86/x86emitter.h"
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::X86Internal]
// ============================================================================
//! \internal
//!
//! X86 utilities used at multiple places, not part of public API, not exported.
struct X86Internal {
//! Initialize `CallConv` to X86/X64 specific calling convention.
static Error initCallConv(CallConv& cc, uint32_t ccId) noexcept;
//! Initialize `FuncDetail` to X86/X64 specific function signature.
static Error initFuncDetail(FuncDetail& func, const FuncSignature& sign, uint32_t gpSize) noexcept;
//! Initialize `FuncFrameLayout` from X86/X64 specific function detail and frame information.
static Error initFrameLayout(FuncFrameLayout& layout, const FuncDetail& func, const FuncFrameInfo& ffi) noexcept;
static Error argsToFrameInfo(const FuncArgsMapper& args, FuncFrameInfo& ffi) noexcept;
//! Emit function prolog.
static Error emitProlog(X86Emitter* emitter, const FuncFrameLayout& layout);
//! Emit function epilog.
static Error emitEpilog(X86Emitter* emitter, const FuncFrameLayout& layout);
//! Emit a pure move operation between two registers or the same type or
//! between a register and its home slot. This function does not handle
//! register conversion.
static Error emitRegMove(X86Emitter* emitter,
const Operand_& dst_,
const Operand_& src_, uint32_t typeId, bool avxEnabled, const char* comment = nullptr);
//! Emit move from a function argument (either register or stack) to a register.
//!
//! This function can handle the necessary conversion from one argument to
//! another, and from one register type to another, if it's possible. Any
//! attempt of conversion that requires third register of a different kind
//! (for example conversion from K to MMX) will fail.
static Error emitArgMove(X86Emitter* emitter,
const X86Reg& dst_, uint32_t dstTypeId,
const Operand_& src_, uint32_t srcTypeId, bool avxEnabled, const char* comment = nullptr);
static Error allocArgs(X86Emitter* emitter, const FuncFrameLayout& layout, const FuncArgsMapper& args);
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86INTERNAL_P_H

View File

@@ -0,0 +1,691 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_LOGGING)
// [Dependencies]
#include "../base/misc_p.h"
#include "../x86/x86inst.h"
#include "../x86/x86logging_p.h"
#include "../x86/x86operand.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
#include "../base/codecompiler.h"
#endif // !ASMJIT_DISABLE_COMPILER
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86Logging - Constants]
// ============================================================================
struct X86RegFormatInfo {
uint8_t count;
uint8_t formatIndex;
uint8_t specialIndex;
uint8_t specialCount;
};
static const char x86RegFormatStrings[] =
"r%ub" "\0" // #0
"r%uh" "\0" // #5
"r%uw" "\0" // #10
"r%ud" "\0" // #15
"r%u" "\0" // #20
"xmm%u" "\0" // #24
"ymm%u" "\0" // #30
"zmm%u" "\0" // #36
"rip%u" "\0" // #42
"seg%u" "\0" // #48
"fp%u" "\0" // #54
"mm%u" "\0" // #59
"k%u" "\0" // #64
"bnd%u" "\0" // #68
"cr%u" "\0" // #74
"dr%u" "\0" // #79
"rip\0" // #84
"\0\0\0\0" // #88
"\0\0\0\0" // #92
"al\0\0" "cl\0\0" "dl\0\0" "bl\0\0" "spl\0" "bpl\0" "sil\0" "dil\0" // #96
"ah\0\0" "ch\0\0" "dh\0\0" "bh\0\0" "n/a\0" "n/a\0" "n/a\0" "n/a\0" // #128
"eax\0" "ecx\0" "edx\0" "ebx\0" "esp\0" "ebp\0" "esi\0" "edi\0" // #160
"rax\0" "rcx\0" "rdx\0" "rbx\0" "rsp\0" "rbp\0" "rsi\0" "rdi\0" // #192
"n/a\0" "es\0\0" "cs\0\0" "ss\0\0" "ds\0\0" "fs\0\0" "gs\0\0" "n/a\0"; // #224
template<uint32_t X>
struct X86RegFormatInfo_T {
enum {
kFormatIndex = X == X86Reg::kRegGpbLo ? 0 :
X == X86Reg::kRegGpbHi ? 5 :
X == X86Reg::kRegGpw ? 10 :
X == X86Reg::kRegGpd ? 15 :
X == X86Reg::kRegGpq ? 20 :
X == X86Reg::kRegXmm ? 24 :
X == X86Reg::kRegYmm ? 30 :
X == X86Reg::kRegZmm ? 36 :
X == X86Reg::kRegRip ? 42 :
X == X86Reg::kRegSeg ? 48 :
X == X86Reg::kRegFp ? 54 :
X == X86Reg::kRegMm ? 59 :
X == X86Reg::kRegK ? 64 :
X == X86Reg::kRegBnd ? 68 :
X == X86Reg::kRegCr ? 74 :
X == X86Reg::kRegDr ? 79 : 0,
kSpecialIndex = X == X86Reg::kRegGpbLo ? 96 :
X == X86Reg::kRegGpbHi ? 128 :
X == X86Reg::kRegGpw ? 161 :
X == X86Reg::kRegGpd ? 160 :
X == X86Reg::kRegGpq ? 192 :
X == X86Reg::kRegRip ? 84 :
X == X86Reg::kRegSeg ? 224 : 0,
kSpecialCount = X == X86Reg::kRegGpbLo ? 8 :
X == X86Reg::kRegGpbHi ? 4 :
X == X86Reg::kRegGpw ? 8 :
X == X86Reg::kRegGpd ? 8 :
X == X86Reg::kRegGpq ? 8 :
X == X86Reg::kRegRip ? 1 :
X == X86Reg::kRegSeg ? 7 : 0
};
};
#define ASMJIT_X86_REG_FORMAT(TYPE) { \
X86RegTraits<TYPE>::kCount, \
X86RegFormatInfo_T<TYPE>::kFormatIndex, \
X86RegFormatInfo_T<TYPE>::kSpecialIndex, \
X86RegFormatInfo_T<TYPE>::kSpecialCount \
}
static const X86RegFormatInfo x86RegFormatInfo[] = {
ASMJIT_TABLE_16(ASMJIT_X86_REG_FORMAT, 0 ),
ASMJIT_TABLE_16(ASMJIT_X86_REG_FORMAT, 16)
};
static const char* x86GetAddressSizeString(uint32_t size) noexcept {
switch (size) {
case 1 : return "byte ";
case 2 : return "word ";
case 4 : return "dword ";
case 8 : return "qword ";
case 10: return "tword ";
case 16: return "oword ";
case 32: return "yword ";
case 64: return "zword ";
default: return "";
}
}
// ============================================================================
// [asmjit::X86Logging - Format Operand]
// ============================================================================
ASMJIT_FAVOR_SIZE Error X86Logging::formatOperand(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Operand_& op) noexcept {
if (op.isReg())
return formatRegister(sb, logOptions, emitter, archType, op.as<Reg>().getType(), op.as<Reg>().getId());
if (op.isMem()) {
const X86Mem& m = op.as<X86Mem>();
ASMJIT_PROPAGATE(sb.appendString(x86GetAddressSizeString(m.getSize())));
// Segment override prefix.
uint32_t seg = m.getSegmentId();
if (seg != X86Seg::kIdNone && seg < X86Seg::kIdCount)
ASMJIT_PROPAGATE(sb.appendFormat("%s:", x86RegFormatStrings + 224 + seg * 4));
ASMJIT_PROPAGATE(sb.appendChar('['));
if (m.isAbs())
ASMJIT_PROPAGATE(sb.appendString("abs "));
if (m.hasBase()) {
if (m.hasBaseLabel()) {
ASMJIT_PROPAGATE(Logging::formatLabel(sb, logOptions, emitter, m.getBaseId()));
}
else {
if (m.isArgHome()) ASMJIT_PROPAGATE(sb.appendString("$"));
if (m.isRegHome()) ASMJIT_PROPAGATE(sb.appendString("&"));
ASMJIT_PROPAGATE(formatRegister(sb, logOptions, emitter, archType, m.getBaseType(), m.getBaseId()));
}
}
if (m.hasIndex()) {
ASMJIT_PROPAGATE(sb.appendChar('+'));
ASMJIT_PROPAGATE(formatRegister(sb, logOptions, emitter, archType, m.getIndexType(), m.getIndexId()));
if (m.hasShift())
ASMJIT_PROPAGATE(sb.appendFormat("*%u", 1 << m.getShift()));
}
uint64_t off = static_cast<uint64_t>(m.getOffset());
if (off) {
uint32_t base = 10;
char prefix = '+';
if (static_cast<int64_t>(off) < 0) {
off = ~off + 1;
prefix = '-';
}
ASMJIT_PROPAGATE(sb.appendChar(prefix));
if ((logOptions & Logger::kOptionHexDisplacement) != 0 && off > 9) {
ASMJIT_PROPAGATE(sb.appendString("0x", 2));
base = 16;
}
ASMJIT_PROPAGATE(sb.appendUInt(off, base));
}
return sb.appendChar(']');
}
if (op.isImm()) {
const Imm& i = op.as<Imm>();
int64_t val = i.getInt64();
if ((logOptions & Logger::kOptionHexImmediate) != 0 && static_cast<uint64_t>(val) > 9)
return sb.appendUInt(static_cast<uint64_t>(val), 16);
else
return sb.appendInt(val, 10);
}
if (op.isLabel()) {
return Logging::formatLabel(sb, logOptions, emitter, op.getId());
}
return sb.appendString("<None>");
}
// ============================================================================
// [asmjit::X86Logging - Format Immediate (Extension)]
// ============================================================================
struct ImmBits {
enum Mode {
kModeLookup = 0x0,
kModeFormat = 0x1
};
uint8_t mask;
uint8_t shift;
uint8_t mode;
char text[45];
};
ASMJIT_FAVOR_SIZE static Error X86Logging_formatImmShuf(StringBuilder& sb, uint32_t u8, uint32_t bits, uint32_t count) noexcept {
ASMJIT_PROPAGATE(sb.appendChar('<'));
uint32_t mask = (1 << bits) - 1;
for (uint32_t i = 0; i < count; i++, u8 >>= bits) {
uint32_t value = u8 & mask;
if (i != 0)
ASMJIT_PROPAGATE(sb.appendChar('|'));
ASMJIT_PROPAGATE(sb.appendUInt(value));
}
return sb.appendChar('>');
}
ASMJIT_FAVOR_SIZE static Error X86Logging_formatImmBits(StringBuilder& sb, uint32_t u8, const ImmBits* bits, uint32_t count) noexcept {
uint32_t n = 0;
char buf[64];
for (uint32_t i = 0; i < count; i++) {
const ImmBits& spec = bits[i];
uint32_t value = (u8 & static_cast<uint32_t>(spec.mask)) >> spec.shift;
const char* str = nullptr;
switch (spec.mode) {
case ImmBits::kModeLookup:
str = Utils::findPackedString(spec.text, value);
break;
case ImmBits::kModeFormat:
snprintf(buf, sizeof(buf), spec.text, static_cast<unsigned int>(value));
str = buf;
break;
default:
return DebugUtils::errored(kErrorInvalidState);
}
if (!str[0])
continue;
ASMJIT_PROPAGATE(sb.appendChar(++n == 1 ? '<' : '|'));
ASMJIT_PROPAGATE(sb.appendString(str));
}
return n ? sb.appendChar('>') : static_cast<Error>(kErrorOk);
}
ASMJIT_FAVOR_SIZE static Error X86Logging_formatImmText(StringBuilder& sb, uint32_t u8, uint32_t bits, uint32_t advance, const char* text, uint32_t count = 1) noexcept {
ASMJIT_PROPAGATE(sb.appendChar('<'));
uint32_t mask = (1 << bits) - 1;
uint32_t pos = 0;
for (uint32_t i = 0; i < count; i++, u8 >>= bits, pos += advance) {
uint32_t value = (u8 & mask) + pos;
if (i != 0)
ASMJIT_PROPAGATE(sb.appendChar('|'));
ASMJIT_PROPAGATE(sb.appendString(Utils::findPackedString(text, value)));
}
return sb.appendChar('>');
}
ASMJIT_FAVOR_SIZE static Error X86Logging_formatImmExtended(
StringBuilder& sb,
uint32_t logOptions,
uint32_t instId,
uint32_t vecSize,
const Imm& imm) noexcept {
static const char vcmpx[] =
"eq_oq\0" "lt_os\0" "le_os\0" "unord_q\0" "neq_uq\0" "nlt_us\0" "nle_us\0" "ord_q\0"
"eq_uq\0" "nge_us\0" "ngt_us\0" "false_oq\0" "neq_oq\0" "ge_os\0" "gt_os\0" "true_uq\0"
"eq_os\0" "lt_oq\0" "le_oq\0" "unord_s\0" "neq_us\0" "nlt_uq\0" "nle_uq\0" "ord_s\0"
"eq_us\0" "nge_uq\0" "ngt_uq\0" "false_os\0" "neq_os\0" "ge_oq\0" "gt_oq\0" "true_us\0";
// Try to find 7 differences...
static const char vpcmpx[] = "eq\0" "lt\0" "le\0" "false\0" "neq\0" "ge\0" "gt\0" "true\0";
static const char vpcomx[] = "lt\0" "le\0" "gt\0" "ge\0" "eq\0" "neq\0" "false\0" "true\0";
static const char vshufpd[] = "a0\0a1\0b0\0b1\0a2\0a3\0b2\0b3\0a4\0a5\0b4\0b5\0a6\0a7\0b6\0b7\0";
static const char vshufps[] = "a0\0a1\0a2\0a3\0a0\0a1\0a2\0a3\0b0\0b1\0b2\0b3\0b0\0b1\0b2\0b3\0";
static const ImmBits vfpclassxx[] = {
{ 0x07, 0, ImmBits::kModeLookup, "qnan\0" "+0\0" "-0\0" "+inf\0" "-inf\0" "denormal\0" "-finite\0" "snan\0" }
};
static const ImmBits vgetmantxx[] = {
{ 0x03, 0, ImmBits::kModeLookup, "[1, 2)\0" "[1/2, 2)\0" "1/2, 1)\0" "[3/4, 3/2)\0" },
{ 0x04, 2, ImmBits::kModeLookup, "\0" "no-sign\0" },
{ 0x08, 3, ImmBits::kModeLookup, "\0" "qnan-if-sign\0" }
};
static const ImmBits vmpsadbw[] = {
{ 0x04, 2, ImmBits::kModeLookup, "blk1[0]\0" "blk1[1]\0" },
{ 0x03, 0, ImmBits::kModeLookup, "blk2[0]\0" "blk2[1]\0" "blk2[2]\0" "blk2[3]\0" },
{ 0x40, 6, ImmBits::kModeLookup, "blk1[4]\0" "blk1[5]\0" },
{ 0x30, 4, ImmBits::kModeLookup, "blk2[4]\0" "blk2[5]\0" "blk2[6]\0" "blk2[7]\0" }
};
static const ImmBits vpclmulqdq[] = {
{ 0x01, 0, ImmBits::kModeLookup, "lq\0" "hq\0" },
{ 0x10, 4, ImmBits::kModeLookup, "lq\0" "hq\0" }
};
static const ImmBits vperm2x128[] = {
{ 0x0B, 0, ImmBits::kModeLookup, "a0\0" "a1\0" "b0\0" "b1\0" "\0" "\0" "\0" "\0" "0\0" "0\0" "0\0" "0\0" },
{ 0xB0, 4, ImmBits::kModeLookup, "a0\0" "a1\0" "b0\0" "b1\0" "\0" "\0" "\0" "\0" "0\0" "0\0" "0\0" "0\0" }
};
static const ImmBits vrangexx[] = {
{ 0x03, 0, ImmBits::kModeLookup, "min\0" "max\0" "min-abs\0" "max-abs\0" },
{ 0x0C, 2, ImmBits::kModeLookup, "sign=src1\0" "sign=src2\0" "sign=0\0" "sign=1\0" }
};
static const ImmBits vreducexx_vrndscalexx[] = {
{ 0x07, 0, ImmBits::kModeLookup, "\0" "\0" "\0" "\0" "round\0" "floor\0" "ceil\0" "truncate\0" },
{ 0x08, 3, ImmBits::kModeLookup, "\0" "suppress\0" },
{ 0xF0, 4, ImmBits::kModeFormat, "len=%d" }
};
static const ImmBits vroundxx[] = {
{ 0x07, 0, ImmBits::kModeLookup, "round\0" "floor\0" "ceil\0" "truncate\0" "\0" "\0" "\0" "\0" },
{ 0x08, 3, ImmBits::kModeLookup, "\0" "inexact\0" }
};
uint32_t u8 = imm.getUInt8();
switch (instId) {
case X86Inst::kIdVblendpd:
case X86Inst::kIdBlendpd:
return X86Logging_formatImmShuf(sb, u8, 1, vecSize / 8);
case X86Inst::kIdVblendps:
case X86Inst::kIdBlendps:
return X86Logging_formatImmShuf(sb, u8, 1, vecSize / 4);
case X86Inst::kIdVcmppd:
case X86Inst::kIdVcmpps:
case X86Inst::kIdVcmpsd:
case X86Inst::kIdVcmpss:
return X86Logging_formatImmText(sb, u8, 5, 0, vcmpx);
case X86Inst::kIdCmppd:
case X86Inst::kIdCmpps:
case X86Inst::kIdCmpsd:
case X86Inst::kIdCmpss:
return X86Logging_formatImmText(sb, u8, 3, 0, vcmpx);
case X86Inst::kIdVdbpsadbw:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
case X86Inst::kIdVdppd:
case X86Inst::kIdVdpps:
case X86Inst::kIdDppd:
case X86Inst::kIdDpps:
return X86Logging_formatImmShuf(sb, u8, 1, 8);
case X86Inst::kIdVmpsadbw:
case X86Inst::kIdMpsadbw:
return X86Logging_formatImmBits(sb, u8, vmpsadbw, std::min<uint32_t>(vecSize / 8, 4));
case X86Inst::kIdVpblendw:
case X86Inst::kIdPblendw:
return X86Logging_formatImmShuf(sb, u8, 1, 8);
case X86Inst::kIdVpblendd:
return X86Logging_formatImmShuf(sb, u8, 1, std::min<uint32_t>(vecSize / 4, 8));
case X86Inst::kIdVpclmulqdq:
case X86Inst::kIdPclmulqdq:
return X86Logging_formatImmBits(sb, u8, vpclmulqdq, ASMJIT_ARRAY_SIZE(vpclmulqdq));
case X86Inst::kIdVroundpd:
case X86Inst::kIdVroundps:
case X86Inst::kIdVroundsd:
case X86Inst::kIdVroundss:
case X86Inst::kIdRoundpd:
case X86Inst::kIdRoundps:
case X86Inst::kIdRoundsd:
case X86Inst::kIdRoundss:
return X86Logging_formatImmBits(sb, u8, vroundxx, ASMJIT_ARRAY_SIZE(vroundxx));
case X86Inst::kIdVshufpd:
case X86Inst::kIdShufpd:
return X86Logging_formatImmText(sb, u8, 1, 2, vshufpd, std::min<uint32_t>(vecSize / 8, 8));
case X86Inst::kIdVshufps:
case X86Inst::kIdShufps:
return X86Logging_formatImmText(sb, u8, 2, 4, vshufps, 4);
case X86Inst::kIdVcvtps2ph:
return X86Logging_formatImmBits(sb, u8, vroundxx, 1);
case X86Inst::kIdVperm2f128:
case X86Inst::kIdVperm2i128:
return X86Logging_formatImmBits(sb, u8, vperm2x128, ASMJIT_ARRAY_SIZE(vperm2x128));
case X86Inst::kIdVpermilpd:
return X86Logging_formatImmShuf(sb, u8, 1, vecSize / 8);
case X86Inst::kIdVpermilps:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
case X86Inst::kIdVpshufd:
case X86Inst::kIdPshufd:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
case X86Inst::kIdVpshufhw:
case X86Inst::kIdVpshuflw:
case X86Inst::kIdPshufhw:
case X86Inst::kIdPshuflw:
case X86Inst::kIdPshufw:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
// TODO: Maybe?
case X86Inst::kIdVfixupimmpd:
case X86Inst::kIdVfixupimmps:
case X86Inst::kIdVfixupimmsd:
case X86Inst::kIdVfixupimmss:
return kErrorOk;
case X86Inst::kIdVfpclasspd:
case X86Inst::kIdVfpclassps:
case X86Inst::kIdVfpclasssd:
case X86Inst::kIdVfpclassss:
return X86Logging_formatImmBits(sb, u8, vfpclassxx, ASMJIT_ARRAY_SIZE(vfpclassxx));
case X86Inst::kIdVgetmantpd:
case X86Inst::kIdVgetmantps:
case X86Inst::kIdVgetmantsd:
case X86Inst::kIdVgetmantss:
return X86Logging_formatImmBits(sb, u8, vgetmantxx, ASMJIT_ARRAY_SIZE(vgetmantxx));
case X86Inst::kIdVpcmpb:
case X86Inst::kIdVpcmpd:
case X86Inst::kIdVpcmpq:
case X86Inst::kIdVpcmpw:
case X86Inst::kIdVpcmpub:
case X86Inst::kIdVpcmpud:
case X86Inst::kIdVpcmpuq:
case X86Inst::kIdVpcmpuw:
return X86Logging_formatImmText(sb, u8, 2, 4, vpcmpx, 4);
case X86Inst::kIdVpcomb:
case X86Inst::kIdVpcomd:
case X86Inst::kIdVpcomq:
case X86Inst::kIdVpcomw:
case X86Inst::kIdVpcomub:
case X86Inst::kIdVpcomud:
case X86Inst::kIdVpcomuq:
case X86Inst::kIdVpcomuw:
return X86Logging_formatImmText(sb, u8, 2, 4, vpcomx, 4);
case X86Inst::kIdVpermq:
case X86Inst::kIdVpermpd:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
case X86Inst::kIdVpternlogd:
case X86Inst::kIdVpternlogq:
return X86Logging_formatImmShuf(sb, u8, 1, 8);
case X86Inst::kIdVrangepd:
case X86Inst::kIdVrangeps:
case X86Inst::kIdVrangesd:
case X86Inst::kIdVrangess:
return X86Logging_formatImmBits(sb, u8, vrangexx, ASMJIT_ARRAY_SIZE(vrangexx));
case X86Inst::kIdVreducepd:
case X86Inst::kIdVreduceps:
case X86Inst::kIdVreducesd:
case X86Inst::kIdVreducess:
case X86Inst::kIdVrndscalepd:
case X86Inst::kIdVrndscaleps:
case X86Inst::kIdVrndscalesd:
case X86Inst::kIdVrndscaless:
return X86Logging_formatImmBits(sb, u8, vreducexx_vrndscalexx, ASMJIT_ARRAY_SIZE(vreducexx_vrndscalexx));
case X86Inst::kIdVshuff32x4:
case X86Inst::kIdVshuff64x2:
case X86Inst::kIdVshufi32x4:
case X86Inst::kIdVshufi64x2: {
uint32_t count = std::max<uint32_t>(vecSize / 16, 2);
uint32_t bits = count <= 2 ? 1 : 2;
return X86Logging_formatImmShuf(sb, u8, bits, count);
}
default:
return kErrorOk;
}
}
// ============================================================================
// [asmjit::X86Logging - Format Register]
// ============================================================================
ASMJIT_FAVOR_SIZE Error X86Logging::formatRegister(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t rType,
uint32_t rId) noexcept {
ASMJIT_UNUSED(logOptions);
ASMJIT_UNUSED(archType);
if (Operand::isPackedId(rId)) {
if (emitter && emitter->getType() == CodeEmitter::kTypeCompiler) {
const CodeCompiler* cc = static_cast<const CodeCompiler*>(emitter);
if (cc->isVirtRegValid(rId)) {
VirtReg* vReg = cc->getVirtRegById(rId);
ASMJIT_ASSERT(vReg != nullptr);
const char* name = vReg->getName();
if (name && name[0] != '\0')
return sb.appendString(name);
else
return sb.appendFormat("v%u", static_cast<unsigned int>(Operand::unpackId(rId)));
}
}
return sb.appendFormat("VirtReg<Type=%u Id=%u>", rType, rId);
}
else {
if (rType < ASMJIT_ARRAY_SIZE(x86RegFormatInfo)) {
const X86RegFormatInfo& rfi = x86RegFormatInfo[rType];
if (rId < rfi.specialCount)
return sb.appendString(x86RegFormatStrings + rfi.specialIndex + rId * 4);
if (rId < rfi.count)
return sb.appendFormat(x86RegFormatStrings + rfi.formatIndex, static_cast<unsigned int>(rId));
}
return sb.appendFormat("PhysReg<Type=%u Id=%u>", rType, rId);
}
}
// ============================================================================
// [asmjit::X86Logging - Format Instruction]
// ============================================================================
ASMJIT_FAVOR_SIZE Error X86Logging::formatInstruction(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t instId,
uint32_t options,
const Operand_& opExtra,
const Operand_* opArray, uint32_t opCount) noexcept {
bool opExtraDone = false;
// Format instruction options and instruction mnemonic.
if (instId < X86Inst::_kIdCount) {
const X86Inst& instInfo = X86Inst::getInst(instId);
// SHORT/LONG forms.
if (options & X86Inst::kOptionShortForm) ASMJIT_PROPAGATE(sb.appendString("short "));
if (options & X86Inst::kOptionLongForm) ASMJIT_PROPAGATE(sb.appendString("long "));
// LOCK option.
if (options & X86Inst::kOptionLock) ASMJIT_PROPAGATE(sb.appendString("lock "));
// REP options.
if (options & (X86Inst::kOptionRep | X86Inst::kOptionRepnz)) {
const char* rep = "repnz ";
if ((options & (X86Inst::kOptionRep | X86Inst::kOptionRepnz)) == X86Inst::kOptionRep)
rep = instInfo.hasFlag(X86Inst::kInstFlagRepnz) ? "repz " : "rep ";
sb.appendString(rep);
if (!opExtra.isNone()) {
ASMJIT_PROPAGATE(sb.appendChar('{'));
ASMJIT_PROPAGATE(formatOperand(sb, logOptions, emitter, archType, opExtra));
ASMJIT_PROPAGATE(sb.appendString("} "));
opExtraDone = true;
}
}
// REX options.
if (options & X86Inst::kOptionRex) {
const uint32_t kRXBWMask = X86Inst::kOptionOpCodeR |
X86Inst::kOptionOpCodeX |
X86Inst::kOptionOpCodeB |
X86Inst::kOptionOpCodeW ;
if (options & kRXBWMask) {
sb.appendString("rex.");
if (options & X86Inst::kOptionOpCodeR) sb.appendChar('r');
if (options & X86Inst::kOptionOpCodeX) sb.appendChar('x');
if (options & X86Inst::kOptionOpCodeB) sb.appendChar('b');
if (options & X86Inst::kOptionOpCodeW) sb.appendChar('w');
sb.appendChar(' ');
}
else {
ASMJIT_PROPAGATE(sb.appendString("rex "));
}
}
// VEX options.
if (options & X86Inst::kOptionVex3)
ASMJIT_PROPAGATE(sb.appendString("vex3 "));
ASMJIT_PROPAGATE(sb.appendString(instInfo.getName()));
}
else {
ASMJIT_PROPAGATE(sb.appendFormat("<unknown id=#%u>", static_cast<unsigned int>(instId)));
}
for (uint32_t i = 0; i < opCount; i++) {
const Operand_& op = opArray[i];
if (op.isNone()) break;
ASMJIT_PROPAGATE(sb.appendString(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, logOptions, emitter, archType, op));
if (op.isImm() && (logOptions & Logger::kOptionImmExtended)) {
uint32_t vecSize = 16;
for (uint32_t j = 0; j < opCount; j++)
if (opArray[j].isReg())
vecSize = std::max<uint32_t>(vecSize, opArray[j].getSize());
ASMJIT_PROPAGATE(X86Logging_formatImmExtended(sb, logOptions, instId, vecSize, op.as<Imm>()));
}
// Support AVX-512 {k}{z}.
if (i == 0) {
const uint32_t kExtMsk =
X86Inst::kOptionOpExtra |
X86Inst::kOptionRep |
X86Inst::kOptionRepnz ;
if ((options & kExtMsk) == X86Inst::kOptionOpExtra) {
ASMJIT_PROPAGATE(sb.appendString(" {"));
ASMJIT_PROPAGATE(formatOperand(sb, logOptions, emitter, archType, opExtra));
ASMJIT_PROPAGATE(sb.appendChar('}'));
if (options & X86Inst::kOptionKZ)
ASMJIT_PROPAGATE(sb.appendString("{z}"));
}
else if (options & X86Inst::kOptionKZ) {
ASMJIT_PROPAGATE(sb.appendString(" {z}"));
}
}
// Support AVX-512 {1tox}.
if (op.isMem() && (options & X86Inst::kOption1ToX))
ASMJIT_PROPAGATE(sb.appendString(" {1tox}"));
}
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGING

View File

@@ -0,0 +1,66 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86LOGGING_P_H
#define _ASMJIT_X86_X86LOGGING_P_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_LOGGING)
// [Dependencies]
#include "../base/logging.h"
#include "../x86/x86globals.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::X86Logging]
// ============================================================================
struct X86Logging {
static Error formatRegister(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t regType,
uint32_t regId) noexcept;
static Error formatOperand(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Operand_& op) noexcept;
static Error formatInstruction(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t instId,
uint32_t options,
const Operand_& opExtra,
const Operand_* opArray, uint32_t opCount) noexcept;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGING
#endif // _ASMJIT_X86_X86LOGGING_P_H

388
src/asmjit/x86/x86misc.h Normal file
View File

@@ -0,0 +1,388 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86MISC_H
#define _ASMJIT_X86_X86MISC_H
// [Dependencies]
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86RegCount]
// ============================================================================
//! \internal
//!
//! X86/X64 registers count.
//!
//! Since the number of registers changed across CPU generations `X86RegCount`
//! class is used by `X86Assembler` and `X86Compiler` to provide a way to get
//! number of available registers dynamically. 32-bit mode offers always only
//! 8 registers of all classes, however, 64-bit mode offers 16 GP registers and
//! 16 XMM/YMM/ZMM registers. AVX512 instruction set doubles the number of SIMD
//! registers (XMM/YMM/ZMM) to 32, this mode has to be explicitly enabled to
//! take effect as it changes some assumptions.
//!
//! `X86RegCount` is also used extensively by X86Compiler's register allocator
//! and data structures. FP registers were omitted as they are never mapped to
//! variables, thus, not needed to be managed.
//!
//! NOTE: At the moment `X86RegCount` can fit into 32-bits, having 8-bits for
//! each register kind except FP. This can change in the future after a new
//! instruction set, which adds more registers, is introduced.
struct X86RegCount {
// --------------------------------------------------------------------------
// [Zero]
// --------------------------------------------------------------------------
//! Reset all counters to zero.
ASMJIT_INLINE void reset() noexcept { _packed = 0; }
// --------------------------------------------------------------------------
// [Get]
// --------------------------------------------------------------------------
//! Get register count by a register `kind`.
ASMJIT_INLINE uint32_t get(uint32_t kind) const noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
uint32_t shift = Utils::byteShiftOfDWordStruct(kind);
return (_packed >> shift) & static_cast<uint32_t>(0xFF);
}
//! Get Gp count.
ASMJIT_INLINE uint32_t getGp() const noexcept { return get(X86Reg::kKindGp); }
//! Get Mm count.
ASMJIT_INLINE uint32_t getMm() const noexcept { return get(X86Reg::kKindMm); }
//! Get K count.
ASMJIT_INLINE uint32_t getK() const noexcept { return get(X86Reg::kKindK); }
//! Get XMM/YMM/ZMM count.
ASMJIT_INLINE uint32_t getVec() const noexcept { return get(X86Reg::kKindVec); }
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
//! Set register count by a register `kind`.
ASMJIT_INLINE void set(uint32_t kind, uint32_t n) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
ASMJIT_ASSERT(n <= 0xFF);
uint32_t shift = Utils::byteShiftOfDWordStruct(kind);
_packed = (_packed & ~static_cast<uint32_t>(0xFF << shift)) + (n << shift);
}
//! Set Gp count.
ASMJIT_INLINE void setGp(uint32_t n) noexcept { set(X86Reg::kKindGp, n); }
//! Set Mm count.
ASMJIT_INLINE void setMm(uint32_t n) noexcept { set(X86Reg::kKindMm, n); }
//! Set K count.
ASMJIT_INLINE void setK(uint32_t n) noexcept { set(X86Reg::kKindK, n); }
//! Set XMM/YMM/ZMM count.
ASMJIT_INLINE void setVec(uint32_t n) noexcept { set(X86Reg::kKindVec, n); }
// --------------------------------------------------------------------------
// [Add]
// --------------------------------------------------------------------------
//! Add register count by a register `kind`.
ASMJIT_INLINE void add(uint32_t kind, uint32_t n = 1) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
ASMJIT_ASSERT(0xFF - static_cast<uint32_t>(_regs[kind]) >= n);
uint32_t shift = Utils::byteShiftOfDWordStruct(kind);
_packed += n << shift;
}
//! Add GP count.
ASMJIT_INLINE void addGp(uint32_t n) noexcept { add(X86Reg::kKindGp, n); }
//! Add MMX count.
ASMJIT_INLINE void addMm(uint32_t n) noexcept { add(X86Reg::kKindMm, n); }
//! Add K count.
ASMJIT_INLINE void addK(uint32_t n) noexcept { add(X86Reg::kKindK, n); }
//! Add XMM/YMM/ZMM count.
ASMJIT_INLINE void addVec(uint32_t n) noexcept { add(X86Reg::kKindVec, n); }
// --------------------------------------------------------------------------
// [Misc]
// --------------------------------------------------------------------------
//! Build register indexes based on the given `count` of registers.
ASMJIT_INLINE void indexFromRegCount(const X86RegCount& count) noexcept {
uint32_t x = static_cast<uint32_t>(count._regs[0]);
uint32_t y = static_cast<uint32_t>(count._regs[1]) + x;
uint32_t z = static_cast<uint32_t>(count._regs[2]) + y;
ASMJIT_ASSERT(y <= 0xFF);
ASMJIT_ASSERT(z <= 0xFF);
_packed = Utils::pack32_4x8(0, x, y, z);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
//! Count of GP registers.
uint8_t _gp;
//! Count of XMM|YMM|ZMM registers.
uint8_t _vec;
//! Count of MMX registers.
uint8_t _mm;
//! Count of K registers.
uint8_t _k;
};
uint8_t _regs[4];
uint32_t _packed;
};
};
// ============================================================================
// [asmjit::X86RegMask]
// ============================================================================
//! \internal
//!
//! X86/X64 registers mask.
struct X86RegMask {
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Reset all register masks to zero.
ASMJIT_INLINE void reset() noexcept {
_packed.reset();
}
// --------------------------------------------------------------------------
// [IsEmpty / Has]
// --------------------------------------------------------------------------
//! Get whether all register masks are zero (empty).
ASMJIT_INLINE bool isEmpty() const noexcept {
return _packed.isZero();
}
ASMJIT_INLINE bool has(uint32_t kind, uint32_t mask = 0xFFFFFFFFU) const noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : return (static_cast<uint32_t>(_gp ) & mask) != 0;
case X86Reg::kKindVec: return (static_cast<uint32_t>(_vec) & mask) != 0;
case X86Reg::kKindMm : return (static_cast<uint32_t>(_mm ) & mask) != 0;
case X86Reg::kKindK : return (static_cast<uint32_t>(_k ) & mask) != 0;
}
return false;
}
ASMJIT_INLINE bool hasGp(uint32_t mask = 0xFFFFFFFFU) const noexcept { return has(X86Reg::kKindGp, mask); }
ASMJIT_INLINE bool hasVec(uint32_t mask = 0xFFFFFFFFU) const noexcept { return has(X86Reg::kKindVec, mask); }
ASMJIT_INLINE bool hasMm(uint32_t mask = 0xFFFFFFFFU) const noexcept { return has(X86Reg::kKindMm, mask); }
ASMJIT_INLINE bool hasK(uint32_t mask = 0xFFFFFFFFU) const noexcept { return has(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [Get]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t get(uint32_t kind) const noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : return _gp;
case X86Reg::kKindVec: return _vec;
case X86Reg::kKindMm : return _mm;
case X86Reg::kKindK : return _k;
}
return 0;
}
ASMJIT_INLINE uint32_t getGp() const noexcept { return get(X86Reg::kKindGp); }
ASMJIT_INLINE uint32_t getVec() const noexcept { return get(X86Reg::kKindVec); }
ASMJIT_INLINE uint32_t getMm() const noexcept { return get(X86Reg::kKindMm); }
ASMJIT_INLINE uint32_t getK() const noexcept { return get(X86Reg::kKindK); }
// --------------------------------------------------------------------------
// [Zero]
// --------------------------------------------------------------------------
ASMJIT_INLINE void zero(uint32_t kind) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp = 0; break;
case X86Reg::kKindVec: _vec = 0; break;
case X86Reg::kKindMm : _mm = 0; break;
case X86Reg::kKindK : _k = 0; break;
}
}
ASMJIT_INLINE void zeroGp() noexcept { zero(X86Reg::kKindGp); }
ASMJIT_INLINE void zeroVec() noexcept { zero(X86Reg::kKindVec); }
ASMJIT_INLINE void zeroMm() noexcept { zero(X86Reg::kKindMm); }
ASMJIT_INLINE void zeroK() noexcept { zero(X86Reg::kKindK); }
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
ASMJIT_INLINE void set(const X86RegMask& other) noexcept {
_packed = other._packed;
}
ASMJIT_INLINE void set(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp = static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm = static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k = static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec = static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void setGp(uint32_t mask) noexcept { return set(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void setVec(uint32_t mask) noexcept { return set(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void setMm(uint32_t mask) noexcept { return set(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void setK(uint32_t mask) noexcept { return set(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [And]
// --------------------------------------------------------------------------
ASMJIT_INLINE void and_(const X86RegMask& other) noexcept {
_packed.and_(other._packed);
}
ASMJIT_INLINE void and_(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp &= static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm &= static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k &= static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec &= static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void andGp(uint32_t mask) noexcept { and_(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void andVec(uint32_t mask) noexcept { and_(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void andMm(uint32_t mask) noexcept { and_(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void andK(uint32_t mask) noexcept { and_(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [AndNot]
// --------------------------------------------------------------------------
ASMJIT_INLINE void andNot(const X86RegMask& other) noexcept {
_packed.andNot(other._packed);
}
ASMJIT_INLINE void andNot(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp &= ~static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm &= ~static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k &= ~static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec &= ~static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void andNotGp(uint32_t mask) noexcept { andNot(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void andNotVec(uint32_t mask) noexcept { andNot(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void andNotMm(uint32_t mask) noexcept { andNot(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void andNotK(uint32_t mask) noexcept { andNot(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [Or]
// --------------------------------------------------------------------------
ASMJIT_INLINE void or_(const X86RegMask& other) noexcept {
_packed.or_(other._packed);
}
ASMJIT_INLINE void or_(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp |= static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm |= static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k |= static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec |= static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void orGp(uint32_t mask) noexcept { return or_(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void orVec(uint32_t mask) noexcept { return or_(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void orMm(uint32_t mask) noexcept { return or_(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void orK(uint32_t mask) noexcept { return or_(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [Xor]
// --------------------------------------------------------------------------
ASMJIT_INLINE void xor_(const X86RegMask& other) noexcept {
_packed.xor_(other._packed);
}
ASMJIT_INLINE void xor_(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp ^= static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm ^= static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k ^= static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec ^= static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void xorGp(uint32_t mask) noexcept { xor_(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void xorVec(uint32_t mask) noexcept { xor_(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void xorMm(uint32_t mask) noexcept { xor_(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void xorK(uint32_t mask) noexcept { xor_(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
//! GP registers mask (16 bits).
uint16_t _gp;
//! MMX registers mask (8 bits).
uint8_t _mm;
//! K registers mask (8 bits).
uint8_t _k;
//! XMM|YMM|ZMM registers mask (32 bits).
uint32_t _vec;
};
//! Packed masks.
UInt64 _packed;
};
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86MISC_H

View File

@@ -8,78 +8,169 @@
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86)
// [Dependencies]
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
namespace x86 {
// ============================================================================
// [asmjit::X86Mem - abs[]]
// [asmjit::X86Operand - Test]
// ============================================================================
X86Mem ptr_abs(Ptr p, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
#if defined(ASMJIT_TEST)
UNIT(x86_operand) {
Label L;
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, 0, kInvalidValue);
m._vmem.index = kInvalidValue;
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
INFO("Checking basic properties of built-in X86 registers");
EXPECT(x86::gpb(X86Gp::kIdAx) == x86::al);
EXPECT(x86::gpb(X86Gp::kIdBx) == x86::bl);
EXPECT(x86::gpb(X86Gp::kIdCx) == x86::cl);
EXPECT(x86::gpb(X86Gp::kIdDx) == x86::dl);
return m;
EXPECT(x86::gpb_lo(X86Gp::kIdAx) == x86::al);
EXPECT(x86::gpb_lo(X86Gp::kIdBx) == x86::bl);
EXPECT(x86::gpb_lo(X86Gp::kIdCx) == x86::cl);
EXPECT(x86::gpb_lo(X86Gp::kIdDx) == x86::dl);
EXPECT(x86::gpb_hi(X86Gp::kIdAx) == x86::ah);
EXPECT(x86::gpb_hi(X86Gp::kIdBx) == x86::bh);
EXPECT(x86::gpb_hi(X86Gp::kIdCx) == x86::ch);
EXPECT(x86::gpb_hi(X86Gp::kIdDx) == x86::dh);
EXPECT(x86::gpw(X86Gp::kIdAx) == x86::ax);
EXPECT(x86::gpw(X86Gp::kIdBx) == x86::bx);
EXPECT(x86::gpw(X86Gp::kIdCx) == x86::cx);
EXPECT(x86::gpw(X86Gp::kIdDx) == x86::dx);
EXPECT(x86::gpd(X86Gp::kIdAx) == x86::eax);
EXPECT(x86::gpd(X86Gp::kIdBx) == x86::ebx);
EXPECT(x86::gpd(X86Gp::kIdCx) == x86::ecx);
EXPECT(x86::gpd(X86Gp::kIdDx) == x86::edx);
EXPECT(x86::gpq(X86Gp::kIdAx) == x86::rax);
EXPECT(x86::gpq(X86Gp::kIdBx) == x86::rbx);
EXPECT(x86::gpq(X86Gp::kIdCx) == x86::rcx);
EXPECT(x86::gpq(X86Gp::kIdDx) == x86::rdx);
EXPECT(x86::gpb(X86Gp::kIdAx) != x86::dl);
EXPECT(x86::gpw(X86Gp::kIdBx) != x86::cx);
EXPECT(x86::gpd(X86Gp::kIdCx) != x86::ebx);
EXPECT(x86::gpq(X86Gp::kIdDx) != x86::rax);
INFO("Checking if x86::reg(...) matches built-in IDs");
EXPECT(x86::fp(5) == x86::fp5);
EXPECT(x86::mm(5) == x86::mm5);
EXPECT(x86::k(5) == x86::k5);
EXPECT(x86::cr(5) == x86::cr5);
EXPECT(x86::dr(5) == x86::dr5);
EXPECT(x86::xmm(5) == x86::xmm5);
EXPECT(x86::ymm(5) == x86::ymm5);
EXPECT(x86::zmm(5) == x86::zmm5);
INFO("Checking GP register properties");
EXPECT(X86Gp().isReg() == false);
EXPECT(x86::eax.isReg() == true);
EXPECT(x86::eax.getId() == 0);
EXPECT(x86::eax.getSize() == 4);
EXPECT(x86::eax.getType() == X86Reg::kRegGpd);
EXPECT(x86::eax.getKind() == X86Reg::kKindGp);
INFO("Checking FP register properties");
EXPECT(X86Fp().isReg() == false);
EXPECT(x86::fp1.isReg() == true);
EXPECT(x86::fp1.getId() == 1);
EXPECT(x86::fp1.getSize() == 10);
EXPECT(x86::fp1.getType() == X86Reg::kRegFp);
EXPECT(x86::fp1.getKind() == X86Reg::kKindFp);
INFO("Checking MM register properties");
EXPECT(X86Mm().isReg() == false);
EXPECT(x86::mm2.isReg() == true);
EXPECT(x86::mm2.getId() == 2);
EXPECT(x86::mm2.getSize() == 8);
EXPECT(x86::mm2.getType() == X86Reg::kRegMm);
EXPECT(x86::mm2.getKind() == X86Reg::kKindMm);
INFO("Checking K register properties");
EXPECT(X86KReg().isReg() == false);
EXPECT(x86::k3.isReg() == true);
EXPECT(x86::k3.getId() == 3);
EXPECT(x86::k3.getSize() == 0);
EXPECT(x86::k3.getType() == X86Reg::kRegK);
EXPECT(x86::k3.getKind() == X86Reg::kKindK);
INFO("Checking XMM register properties");
EXPECT(X86Xmm().isReg() == false);
EXPECT(x86::xmm4.isReg() == true);
EXPECT(x86::xmm4.getId() == 4);
EXPECT(x86::xmm4.getSize() == 16);
EXPECT(x86::xmm4.getType() == X86Reg::kRegXmm);
EXPECT(x86::xmm4.getKind() == X86Reg::kKindVec);
EXPECT(x86::xmm4.isVec());
INFO("Checking YMM register properties");
EXPECT(X86Ymm().isReg() == false);
EXPECT(x86::ymm5.isReg() == true);
EXPECT(x86::ymm5.getId() == 5);
EXPECT(x86::ymm5.getSize() == 32);
EXPECT(x86::ymm5.getType() == X86Reg::kRegYmm);
EXPECT(x86::ymm5.getKind() == X86Reg::kKindVec);
EXPECT(x86::ymm5.isVec());
INFO("Checking ZMM register properties");
EXPECT(X86Zmm().isReg() == false);
EXPECT(x86::zmm6.isReg() == true);
EXPECT(x86::zmm6.getId() == 6);
EXPECT(x86::zmm6.getSize() == 64);
EXPECT(x86::zmm6.getType() == X86Reg::kRegZmm);
EXPECT(x86::zmm6.getKind() == X86Reg::kKindVec);
EXPECT(x86::zmm6.isVec());
INFO("Checking XYZ register properties");
EXPECT(X86Vec().isReg() == false);
// Converts a XYZ register to a type of the passed register, but keeps the ID.
EXPECT(x86::xmm4.cloneAs(x86::ymm10) == x86::ymm4);
EXPECT(x86::xmm4.cloneAs(x86::zmm11) == x86::zmm4);
EXPECT(x86::ymm5.cloneAs(x86::xmm12) == x86::xmm5);
EXPECT(x86::ymm5.cloneAs(x86::zmm13) == x86::zmm5);
EXPECT(x86::zmm6.cloneAs(x86::xmm14) == x86::xmm6);
EXPECT(x86::zmm6.cloneAs(x86::ymm15) == x86::ymm6);
INFO("Checking if default constructed regs behave as expected");
EXPECT(X86Reg().isValid() == false);
EXPECT(X86Gp().isValid() == false);
EXPECT(X86Fp().isValid() == false);
EXPECT(X86Mm().isValid() == false);
EXPECT(X86Xmm().isValid() == false);
EXPECT(X86Ymm().isValid() == false);
EXPECT(X86Zmm().isValid() == false);
EXPECT(X86KReg().isValid() == false);
INFO("Checking X86Mem operand");
X86Mem m;
EXPECT(m == X86Mem(),
"Two default constructed X86Mem operands must be equal");
X86Mem mL = x86::ptr(L);
EXPECT(mL.hasBase() == true,
"Memory constructed from Label must hasBase()");
EXPECT(mL.hasBaseReg() == false,
"Memory constructed from Label must not report hasBaseReg()");
EXPECT(mL.hasBaseLabel() == true,
"Memory constructed from Label must report hasBaseLabel()");
}
#endif // ASMJIT_TEST
X86Mem ptr_abs(Ptr p, const X86Reg& index, uint32_t shift, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
uint32_t flags = shift << kX86MemShiftIndex;
if (index.isGp())
flags |= X86Mem::_getGpdFlags(index);
else if (index.isXmm())
flags |= kX86MemVSibXmm << kX86MemVSibIndex;
else if (index.isYmm())
flags |= kX86MemVSibYmm << kX86MemVSibIndex;
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue);
m._vmem.index = index.getRegIndex();
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
return m;
}
#if !defined(ASMJIT_DISABLE_COMPILER)
X86Mem ptr_abs(Ptr p, const X86Var& index, uint32_t shift, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
uint32_t flags = shift << kX86MemShiftIndex;
const Var& index_ = reinterpret_cast<const Var&>(index);
uint32_t indexRegType = index_.getRegType();
if (indexRegType <= kX86RegTypeGpq)
flags |= X86Mem::_getGpdFlags(reinterpret_cast<const Var&>(index));
else if (indexRegType == kX86RegTypeXmm)
flags |= kX86MemVSibXmm << kX86MemVSibIndex;
else if (indexRegType == kX86RegTypeYmm)
flags |= kX86MemVSibYmm << kX86MemVSibIndex;
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue);
m._vmem.index = index_.getId();
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
return m;
}
#endif // !ASMJIT_DISABLE_COMPILER
} // x86 namespace
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64
#endif // ASMJIT_BUILD_X86

File diff suppressed because it is too large Load Diff

View File

@@ -6,79 +6,117 @@
// [Export]
#define ASMJIT_EXPORTS
#define ASMJIT_EXPORTS_X86_REGS
#define ASMJIT_EXPORTS_X86_OPERAND
// [Guard]
#include "../build.h"
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86)
// [Dependencies]
#include "../base/misc_p.h"
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../apibegin.h"
#include "../asmjit_apibegin.h"
namespace asmjit {
#define REG(type, index, size) {{{ \
Operand::kTypeReg, size, { ((type) << 8) + index }, kInvalidValue, {{ kInvalidVar, 0 }} \
// ============================================================================
// [asmjit::X86OpData]
// ============================================================================
// Register Operand {
// uint32_t signature;
// uint32_t id;
// uint32_t reserved8_4;
// uint32_t reserved12_4;
// }
#define ASMJIT_X86_REG_01(TYPE, ID) \
{{{ \
uint32_t(X86RegTraits<TYPE>::kSignature), \
uint32_t(ID), \
uint32_t(0), \
uint32_t(0) \
}}}
#define REG_LIST_04(type, start, size) \
REG(type, start + 0, size), \
REG(type, start + 1, size), \
REG(type, start + 2, size), \
REG(type, start + 3, size)
#define ASMJIT_X86_REG_04(TYPE, ID) \
ASMJIT_X86_REG_01(TYPE, ID + 0 ), \
ASMJIT_X86_REG_01(TYPE, ID + 1 ), \
ASMJIT_X86_REG_01(TYPE, ID + 2 ), \
ASMJIT_X86_REG_01(TYPE, ID + 3 )
#define REG_LIST_08(type, start, size) \
REG_LIST_04(type, start + 0, size), \
REG_LIST_04(type, start + 4, size)
#define ASMJIT_X86_REG_07(TYPE, ID) \
ASMJIT_X86_REG_04(TYPE, ID + 0 ), \
ASMJIT_X86_REG_01(TYPE, ID + 4 ), \
ASMJIT_X86_REG_01(TYPE, ID + 5 ), \
ASMJIT_X86_REG_01(TYPE, ID + 6 )
#define REG_LIST_16(type, start, size) \
REG_LIST_08(type, start + 0, size), \
REG_LIST_08(type, start + 8, size)
#define ASMJIT_X86_REG_08(TYPE, ID) \
ASMJIT_X86_REG_04(TYPE, ID + 0 ), \
ASMJIT_X86_REG_04(TYPE, ID + 4 )
#define REG_LIST_32(type, start, size) \
REG_LIST_16(type, start + 0, size), \
REG_LIST_16(type, start + 16, size)
#define ASMJIT_X86_REG_16(TYPE, ID) \
ASMJIT_X86_REG_08(TYPE, ID + 0 ), \
ASMJIT_X86_REG_08(TYPE, ID + 8 )
const X86RegData x86RegData = {
{ REG_LIST_16(kX86RegTypeGpd , 0, 4) },
{ REG_LIST_16(kX86RegTypeGpq , 0, 8) },
{ REG_LIST_16(kX86RegTypeGpbLo, 0, 1) },
{ REG_LIST_04(kX86RegTypeGpbHi, 0, 1) },
{ REG_LIST_16(kX86RegTypeGpw , 0, 2) },
{ REG_LIST_32(kX86RegTypeXmm , 0, 16) },
{ REG_LIST_32(kX86RegTypeYmm , 0, 32) },
{ REG_LIST_32(kX86RegTypeZmm , 0, 64) },
{ REG_LIST_08(kX86RegTypeK , 0, 8) },
{ REG_LIST_08(kX86RegTypeFp , 0, 10) },
{ REG_LIST_08(kX86RegTypeMm , 0, 8) },
#define ASMJIT_X86_REG_32(TYPE, ID) \
ASMJIT_X86_REG_16(TYPE, ID + 0 ), \
ASMJIT_X86_REG_16(TYPE, ID + 16)
const X86OpData x86OpData = {
// --------------------------------------------------------------------------
// [ArchRegs]
// --------------------------------------------------------------------------
{
REG(kX86RegTypeSeg, 0, 2), // Default.
REG(kX86RegTypeSeg, 1, 2), // ES.
REG(kX86RegTypeSeg, 2, 2), // CS.
REG(kX86RegTypeSeg, 3, 2), // SS.
REG(kX86RegTypeSeg, 4, 2), // DS.
REG(kX86RegTypeSeg, 5, 2), // FS.
REG(kX86RegTypeSeg, 6, 2) // GS.
{
#define ASMJIT_X86_REG_SIGNATURE(TYPE) { X86RegTraits<TYPE>::kSignature }
ASMJIT_TABLE_16(ASMJIT_X86_REG_SIGNATURE, 0),
ASMJIT_TABLE_16(ASMJIT_X86_REG_SIGNATURE, 16)
#undef ASMJIT_X86_REG_SIGNATURE
},
// RegCount[]
{ ASMJIT_TABLE_T_32(X86RegTraits, kCount, 0) },
// RegTypeToTypeId[]
{ ASMJIT_TABLE_T_32(X86RegTraits, kTypeId, 0) }
},
REG(kInvalidReg, kInvalidReg, 0), // NoGp.
REG(kX86RegTypeRip, 0, 0), // RIP.
// --------------------------------------------------------------------------
// [Registers]
// --------------------------------------------------------------------------
{ ASMJIT_X86_REG_01(X86Reg::kRegRip , 0) },
{ ASMJIT_X86_REG_07(X86Reg::kRegSeg , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegGpbLo, 0) },
{ ASMJIT_X86_REG_04(X86Reg::kRegGpbHi, 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegGpw , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegGpd , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegGpq , 0) },
{ ASMJIT_X86_REG_08(X86Reg::kRegFp , 0) },
{ ASMJIT_X86_REG_08(X86Reg::kRegMm , 0) },
{ ASMJIT_X86_REG_08(X86Reg::kRegK , 0) },
{ ASMJIT_X86_REG_32(X86Reg::kRegXmm , 0) },
{ ASMJIT_X86_REG_32(X86Reg::kRegYmm , 0) },
{ ASMJIT_X86_REG_32(X86Reg::kRegZmm , 0) },
{ ASMJIT_X86_REG_04(X86Reg::kRegBnd , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegCr , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegDr , 0) }
};
#undef REG_LIST_32
#undef REG_LIST_16
#undef REG_LIST_08
#undef REG_LIST_04
#undef REG
#undef ASMJIT_X86_REG_32
#undef ASMJIT_X86_REG_16
#undef ASMJIT_X86_REG_08
#undef ASMJIT_X86_REG_04
#undef ASMJIT_X86_REG_01
#undef ASMJIT_X86_REG_SIGNATURE
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64
#endif // ASMJIT_BUILD_X86

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,705 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86REGALLOC_P_H
#define _ASMJIT_X86_X86REGALLOC_P_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/codecompiler.h"
#include "../base/regalloc_p.h"
#include "../base/utils.h"
#include "../x86/x86assembler.h"
#include "../x86/x86compiler.h"
#include "../x86/x86misc.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86RAData]
// ============================================================================
struct X86RAData : public RAData {
ASMJIT_INLINE X86RAData(uint32_t tiedTotal) noexcept : RAData(tiedTotal) {
inRegs.reset();
outRegs.reset();
clobberedRegs.reset();
tiedIndex.reset();
tiedCount.reset();
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get TiedReg array.
ASMJIT_INLINE TiedReg* getTiedArray() const noexcept {
return const_cast<TiedReg*>(tiedArray);
}
//! Get TiedReg array for a given register `kind`.
ASMJIT_INLINE TiedReg* getTiedArrayByKind(uint32_t kind) const noexcept {
return const_cast<TiedReg*>(tiedArray) + tiedIndex.get(kind);
}
//! Get TiedReg index for a given register `kind`.
ASMJIT_INLINE uint32_t getTiedStart(uint32_t kind) const noexcept {
return tiedIndex.get(kind);
}
//! Get TiedReg count for a given register `kind`.
ASMJIT_INLINE uint32_t getTiedCountByKind(uint32_t kind) const noexcept {
return tiedCount.get(kind);
}
//! Get TiedReg at the specified `index`.
ASMJIT_INLINE TiedReg* getTiedAt(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < tiedTotal);
return getTiedArray() + index;
}
//! Get TiedReg at the specified index for a given register `kind`.
ASMJIT_INLINE TiedReg* getTiedAtByKind(uint32_t kind, uint32_t index) const noexcept {
ASMJIT_ASSERT(index < tiedCount._regs[kind]);
return getTiedArrayByKind(kind) + index;
}
ASMJIT_INLINE void setTiedAt(uint32_t index, TiedReg& tied) noexcept {
ASMJIT_ASSERT(index < tiedTotal);
tiedArray[index] = tied;
}
// --------------------------------------------------------------------------
// [Utils]
// --------------------------------------------------------------------------
//! Find TiedReg.
ASMJIT_INLINE TiedReg* findTied(VirtReg* vreg) const noexcept {
TiedReg* tiedArray = getTiedArray();
uint32_t tiedCount = tiedTotal;
for (uint32_t i = 0; i < tiedCount; i++)
if (tiedArray[i].vreg == vreg)
return &tiedArray[i];
return nullptr;
}
//! Find TiedReg (by class).
ASMJIT_INLINE TiedReg* findTiedByKind(uint32_t kind, VirtReg* vreg) const noexcept {
TiedReg* tiedArray = getTiedArrayByKind(kind);
uint32_t tiedCount = getTiedCountByKind(kind);
for (uint32_t i = 0; i < tiedCount; i++)
if (tiedArray[i].vreg == vreg)
return &tiedArray[i];
return nullptr;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Special registers on input.
//!
//! Special register(s) restricted to one or more physical register. If there
//! is more than one special register it means that we have to duplicate the
//! variable content to all of them (it means that the same varible was used
//! by two or more operands). We forget about duplicates after the register
//! allocation finishes and marks all duplicates as non-assigned.
X86RegMask inRegs;
//! Special registers on output.
//!
//! Special register(s) used on output. Each variable can have only one
//! special register on the output, 'X86RAData' contains all registers from
//! all 'TiedReg's.
X86RegMask outRegs;
//! Clobbered registers (by a function call).
X86RegMask clobberedRegs;
//! Start indexes of `TiedReg`s per register kind.
X86RegCount tiedIndex;
//! Count of variables per register kind.
X86RegCount tiedCount;
//! Linked registers.
TiedReg tiedArray[1];
};
// ============================================================================
// [asmjit::X86StateCell]
// ============================================================================
//! X86/X64 state-cell.
union X86StateCell {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getState() const noexcept { return _state; }
ASMJIT_INLINE void setState(uint32_t state) noexcept { _state = static_cast<uint8_t>(state); }
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() noexcept { _packed = 0; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint8_t _packed;
struct {
uint8_t _state : 2;
uint8_t _unused : 6;
};
};
// ============================================================================
// [asmjit::X86RAState]
// ============================================================================
//! X86/X64 state.
struct X86RAState : RAState {
enum {
//! Base index of GP registers.
kGpIndex = 0,
//! Count of GP registers.
kGpCount = 16,
//! Base index of MMX registers.
kMmIndex = kGpIndex + kGpCount,
//! Count of Mm registers.
kMmCount = 8,
//! Base index of XMM registers.
kXmmIndex = kMmIndex + kMmCount,
//! Count of XMM registers.
kXmmCount = 16,
//! Count of all registers in `X86RAState`.
kAllCount = kXmmIndex + kXmmCount
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE VirtReg** getList() {
return _list;
}
ASMJIT_INLINE VirtReg** getListByKind(uint32_t kind) {
switch (kind) {
case X86Reg::kKindGp : return _listGp;
case X86Reg::kKindMm : return _listMm;
case X86Reg::kKindVec: return _listXmm;
default:
return nullptr;
}
}
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset(size_t numCells) {
::memset(this, 0, kAllCount * sizeof(VirtReg*) +
2 * sizeof(X86RegMask) +
numCells * sizeof(X86StateCell));
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
//! List of all allocated variables in one array.
VirtReg* _list[kAllCount];
struct {
//! Allocated GP registers.
VirtReg* _listGp[kGpCount];
//! Allocated MMX registers.
VirtReg* _listMm[kMmCount];
//! Allocated XMM registers.
VirtReg* _listXmm[kXmmCount];
};
};
//! Occupied registers (mask).
X86RegMask _occupied;
//! Modified registers (mask).
X86RegMask _modified;
//! Variables data, the length is stored in `X86RAPass`.
X86StateCell _cells[1];
};
// ============================================================================
// [asmjit::X86RAPass]
// ============================================================================
#if defined(ASMJIT_DEBUG)
# define ASMJIT_X86_CHECK_STATE _checkState();
#else
# define ASMJIT_X86_CHECK_STATE
#endif // ASMJIT_DEBUG
//! \internal
//!
//! X86 register allocator pipeline.
//!
//! Takes care of generating function prologs and epilogs, and also performs
//! register allocation.
class X86RAPass : public RAPass {
public:
ASMJIT_NONCOPYABLE(X86RAPass)
typedef RAPass Base;
enum RegOp {
kRegOpMove,
kRegOpLoad,
kRegOpSave
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
X86RAPass() noexcept;
virtual ~X86RAPass() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
virtual Error process(Zone* zone) noexcept override;
virtual Error prepare(CCFunc* func) noexcept override;
// --------------------------------------------------------------------------
// [ArchInfo]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _zsp.getSize(); }
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get compiler as `X86Compiler`.
ASMJIT_INLINE X86Compiler* cc() const noexcept { return static_cast<X86Compiler*>(_cb); }
//! Get clobbered registers (global).
ASMJIT_INLINE uint32_t getClobberedRegs(uint32_t kind) noexcept { return _clobberedRegs.get(kind); }
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86RAData* newRAData(uint32_t tiedTotal) noexcept {
return new(_zone->alloc(sizeof(X86RAData) + tiedTotal * sizeof(TiedReg))) X86RAData(tiedTotal);
}
// --------------------------------------------------------------------------
// [Emit]
// --------------------------------------------------------------------------
// Tiny wrappers that call `X86Internal::emit...()`.
Error emitMove(VirtReg* vreg, uint32_t dstId, uint32_t srcId, const char* reason);
Error emitLoad(VirtReg* vreg, uint32_t id, const char* reason);
Error emitSave(VirtReg* vreg, uint32_t id, const char* reason);
Error emitSwapGp(VirtReg* aVReg, VirtReg* bVReg, uint32_t aId, uint32_t bId, const char* reason) noexcept;
Error emitImmToReg(uint32_t dstTypeId, uint32_t dstPhysId, const Imm* src) noexcept;
Error emitImmToStack(uint32_t dstTypeId, const X86Mem* dst, const Imm* src) noexcept;
Error emitRegToStack(uint32_t dstTypeId, const X86Mem* dst, uint32_t srcTypeId, uint32_t srcPhysId) noexcept;
// --------------------------------------------------------------------------
// [Register Management]
// --------------------------------------------------------------------------
void _checkState();
// --------------------------------------------------------------------------
// [Attach / Detach]
// --------------------------------------------------------------------------
//! Attach.
//!
//! Attach a register to the 'VirtReg', changing 'VirtReg' members to show
//! that the variable is currently alive and linking variable with the
//! current 'X86RAState'.
template<int C>
ASMJIT_INLINE void attach(VirtReg* vreg, uint32_t physId, bool modified) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(physId != Globals::kInvalidRegId);
// Prevent Esp allocation if C==Gp.
ASMJIT_ASSERT(C != X86Reg::kKindGp || physId != X86Gp::kIdSp);
uint32_t regMask = Utils::mask(physId);
vreg->setState(VirtReg::kStateReg);
vreg->setModified(modified);
vreg->setPhysId(physId);
vreg->addHomeId(physId);
_x86State.getListByKind(C)[physId] = vreg;
_x86State._occupied.or_(C, regMask);
_x86State._modified.or_(C, static_cast<uint32_t>(modified) << physId);
ASMJIT_X86_CHECK_STATE
}
//! Detach.
//!
//! The opposite of 'Attach'. Detach resets the members in 'VirtReg'
//! (physId, state and changed flags) and unlinks the variable with the
//! current 'X86RAState'.
template<int C>
ASMJIT_INLINE void detach(VirtReg* vreg, uint32_t physId, uint32_t vState) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vreg->getPhysId() == physId);
ASMJIT_ASSERT(vState != VirtReg::kStateReg);
uint32_t regMask = Utils::mask(physId);
vreg->setState(vState);
vreg->resetPhysId();
vreg->setModified(false);
_x86State.getListByKind(C)[physId] = nullptr;
_x86State._occupied.andNot(C, regMask);
_x86State._modified.andNot(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Rebase]
// --------------------------------------------------------------------------
//! Rebase.
//!
//! Change the register of the 'VirtReg' changing also the current 'X86RAState'.
//! Rebase is nearly identical to 'Detach' and 'Attach' sequence, but doesn't
//! change the `VirtReg`s modified flag.
template<int C>
ASMJIT_INLINE void rebase(VirtReg* vreg, uint32_t newPhysId, uint32_t oldPhysId) {
ASMJIT_ASSERT(vreg->getKind() == C);
uint32_t newRegMask = Utils::mask(newPhysId);
uint32_t oldRegMask = Utils::mask(oldPhysId);
uint32_t bothRegMask = newRegMask ^ oldRegMask;
vreg->setPhysId(newPhysId);
_x86State.getListByKind(C)[oldPhysId] = nullptr;
_x86State.getListByKind(C)[newPhysId] = vreg;
_x86State._occupied.xor_(C, bothRegMask);
_x86State._modified.xor_(C, bothRegMask & -static_cast<int32_t>(vreg->isModified()));
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Load / Save]
// --------------------------------------------------------------------------
//! Load.
//!
//! Load variable from its memory slot to a register, emitting 'Load'
//! instruction and changing the variable state to allocated.
template<int C>
ASMJIT_INLINE void load(VirtReg* vreg, uint32_t physId) {
// Can be only called if variable is not allocated.
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vreg->getState() != VirtReg::kStateReg);
ASMJIT_ASSERT(vreg->getPhysId() == Globals::kInvalidRegId);
emitLoad(vreg, physId, "Load");
attach<C>(vreg, physId, false);
ASMJIT_X86_CHECK_STATE
}
//! Save.
//!
//! Save the variable into its home location, but keep it as allocated.
template<int C>
ASMJIT_INLINE void save(VirtReg* vreg) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vreg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(vreg->getPhysId() != Globals::kInvalidRegId);
uint32_t physId = vreg->getPhysId();
uint32_t regMask = Utils::mask(physId);
emitSave(vreg, physId, "Save");
vreg->setModified(false);
_x86State._modified.andNot(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Move / Swap]
// --------------------------------------------------------------------------
//! Move a register.
//!
//! Move register from one index to another, emitting 'Move' if needed. This
//! function does nothing if register is already at the given index.
template<int C>
ASMJIT_INLINE void move(VirtReg* vreg, uint32_t newPhysId) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vreg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(vreg->getPhysId() != Globals::kInvalidRegId);
uint32_t oldPhysId = vreg->getPhysId();
if (newPhysId != oldPhysId) {
emitMove(vreg, newPhysId, oldPhysId, "Move");
rebase<C>(vreg, newPhysId, oldPhysId);
}
ASMJIT_X86_CHECK_STATE
}
//! Swap two registers
//!
//! It's only possible to swap Gp registers.
ASMJIT_INLINE void swapGp(VirtReg* aVReg, VirtReg* bVReg) {
ASMJIT_ASSERT(aVReg != bVReg);
ASMJIT_ASSERT(aVReg->getKind() == X86Reg::kKindGp);
ASMJIT_ASSERT(aVReg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(aVReg->getPhysId() != Globals::kInvalidRegId);
ASMJIT_ASSERT(bVReg->getKind() == X86Reg::kKindGp);
ASMJIT_ASSERT(bVReg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(bVReg->getPhysId() != Globals::kInvalidRegId);
uint32_t aIndex = aVReg->getPhysId();
uint32_t bIndex = bVReg->getPhysId();
emitSwapGp(aVReg, bVReg, aIndex, bIndex, "Swap");
aVReg->setPhysId(bIndex);
bVReg->setPhysId(aIndex);
_x86State.getListByKind(X86Reg::kKindGp)[aIndex] = bVReg;
_x86State.getListByKind(X86Reg::kKindGp)[bIndex] = aVReg;
uint32_t m = aVReg->isModified() ^ bVReg->isModified();
_x86State._modified.xor_(X86Reg::kKindGp, (m << aIndex) | (m << bIndex));
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Alloc / Spill]
// --------------------------------------------------------------------------
//! Alloc.
template<int C>
ASMJIT_INLINE void alloc(VirtReg* vreg, uint32_t physId) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(physId != Globals::kInvalidRegId);
uint32_t oldPhysId = vreg->getPhysId();
uint32_t oldState = vreg->getState();
uint32_t regMask = Utils::mask(physId);
ASMJIT_ASSERT(_x86State.getListByKind(C)[physId] == nullptr || physId == oldPhysId);
if (oldState != VirtReg::kStateReg) {
if (oldState == VirtReg::kStateMem)
emitLoad(vreg, physId, "Alloc");
vreg->setModified(false);
}
else if (oldPhysId != physId) {
emitMove(vreg, physId, oldPhysId, "Alloc");
_x86State.getListByKind(C)[oldPhysId] = nullptr;
regMask ^= Utils::mask(oldPhysId);
}
else {
ASMJIT_X86_CHECK_STATE
return;
}
vreg->setState(VirtReg::kStateReg);
vreg->setPhysId(physId);
vreg->addHomeId(physId);
_x86State.getListByKind(C)[physId] = vreg;
_x86State._occupied.xor_(C, regMask);
_x86State._modified.xor_(C, regMask & -static_cast<int32_t>(vreg->isModified()));
ASMJIT_X86_CHECK_STATE
}
//! Spill.
//!
//! Spill variable/register, saves the content to the memory-home if modified.
template<int C>
ASMJIT_INLINE void spill(VirtReg* vreg) {
ASMJIT_ASSERT(vreg->getKind() == C);
if (vreg->getState() != VirtReg::kStateReg) {
ASMJIT_X86_CHECK_STATE
return;
}
uint32_t physId = vreg->getPhysId();
ASMJIT_ASSERT(physId != Globals::kInvalidRegId);
ASMJIT_ASSERT(_x86State.getListByKind(C)[physId] == vreg);
if (vreg->isModified())
emitSave(vreg, physId, "Spill");
detach<C>(vreg, physId, VirtReg::kStateMem);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Modify]
// --------------------------------------------------------------------------
template<int C>
ASMJIT_INLINE void modify(VirtReg* vreg) {
ASMJIT_ASSERT(vreg->getKind() == C);
uint32_t physId = vreg->getPhysId();
uint32_t regMask = Utils::mask(physId);
vreg->setModified(true);
_x86State._modified.or_(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Unuse]
// --------------------------------------------------------------------------
//! Unuse.
//!
//! Unuse variable, it will be detached it if it's allocated then its state
//! will be changed to VirtReg::kStateNone.
template<int C>
ASMJIT_INLINE void unuse(VirtReg* vreg, uint32_t vState = VirtReg::kStateNone) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vState != VirtReg::kStateReg);
uint32_t physId = vreg->getPhysId();
if (physId != Globals::kInvalidRegId)
detach<C>(vreg, physId, vState);
else
vreg->setState(vState);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [State]
// --------------------------------------------------------------------------
//! Get state as `X86RAState`.
ASMJIT_INLINE X86RAState* getState() const { return const_cast<X86RAState*>(&_x86State); }
virtual void loadState(RAState* src) override;
virtual RAState* saveState() override;
virtual void switchState(RAState* src) override;
virtual void intersectStates(RAState* a, RAState* b) override;
// --------------------------------------------------------------------------
// [Memory]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86Mem getVarMem(VirtReg* vreg) {
(void)getVarCell(vreg);
return X86Mem(Init,
cc()->_nativeGpReg.getType(), vreg->getId(),
Reg::kRegNone, kInvalidValue,
0, 0, Mem::kSignatureMemRegHomeFlag);
}
// --------------------------------------------------------------------------
// [Fetch]
// --------------------------------------------------------------------------
virtual Error fetch() override;
// --------------------------------------------------------------------------
// [Annotate]
// --------------------------------------------------------------------------
virtual Error annotate() override;
// --------------------------------------------------------------------------
// [Translate]
// --------------------------------------------------------------------------
virtual Error translate() override;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Count of X86/X64 registers.
X86RegCount _regCount;
//! X86/X64 stack-pointer (esp or rsp).
X86Gp _zsp;
//! X86/X64 frame-pointer (ebp or rbp).
X86Gp _zbp;
//! X86/X64 specific compiler state, linked to `_state`.
X86RAState _x86State;
//! Clobbered registers (for the whole function).
X86RegMask _clobberedRegs;
//! Global allocable registers mask.
uint32_t _gaRegs[Globals::kMaxVRegKinds];
bool _avxEnabled;
//! Function variables base pointer (register).
uint8_t _varBaseRegId;
//! Function variables base offset.
int32_t _varBaseOffset;
//! Temporary string builder used for logging.
StringBuilderTmp<256> _stringBuilder;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_X86_X86REGALLOC_P_H

View File

@@ -0,0 +1,100 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_DISABLE_BUILDER)
// [Dependencies]
#include "../x86/x86inst.h"
#include "../x86/x86operand.h"
#include "../x86/x86ssetoavxpass_p.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86SseToAvxPass]
// ============================================================================
class X86SseToAvxPass : public CBPass {
ASMJIT_NONCOPYABLE(X86SseToAvxPass)
public:
X86SseToAvxPass() noexcept : CBPass("SseToAvx") {}
virtual Error process(Zone* zone) noexcept override;
enum ProbeMask {
kProbeMmx = 1U << X86Reg::kRegMm, //!< Instruction uses MMX registers.
kProbeXmm = 1U << X86Reg::kRegXmm //!< Instruction uses XMM registers.
};
static ASMJIT_INLINE uint32_t probeRegs(const Operand* opArray, uint32_t opCount) noexcept {
uint32_t mask = 0;
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
if (!op.isReg()) continue;
mask |= Utils::mask(static_cast<const Reg&>(op).getType());
}
return mask;
}
};
Error X86SseToAvxPass::process(Zone* zone) noexcept {
ASMJIT_UNUSED(zone);
CBNode* node_ = cb()->getFirstNode();
while (node_) {
if (node_->getType() == CBNode::kNodeInst) {
CBInst* node = static_cast<CBInst*>(node_);
uint32_t instId = node->getInstId();
// Skip invalid and high-level instructions; we don't care here.
if (!X86Inst::isDefinedId(instId)) continue;
// Skip non-SSE instructions.
const X86Inst& instData = X86Inst::getInst(instId);
if (!instData.isSseFamily()) continue;
// Skip instructions that don't use XMM registers.
uint32_t regs = probeRegs(node->getOpArray(), node->getOpCount());
if (!(regs & kProbeXmm)) continue;
if (!(regs & kProbeMmx)) {
// This is the common case.
const X86Inst::SseData& sseData = instData.getSseData();
// TODO: Wait for some fixes in CBInst first.
}
else {
// If this instruction uses MMX register it means that it's a conversion
// between MMX and XMM (and vice versa), this cannot be directly translated
// to AVX as there is no such AVX instruction that works with MMX registers.
// TODO: Needs a mem-slot to be able to do this.
}
}
node_ = node_->getNext();
}
return kErrorOk;
}
Error X86SseToAvxPassInit::add(CodeBuilder* cb) noexcept { return cb->addPassT<X86SseToAvxPass>(); }
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 && !ASMJIT_DISABLE_BUILDER

View File

@@ -0,0 +1,38 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86SSETOAVXPASS_P_H
#define _ASMJIT_X86_X86SSETOAVXPASS_P_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_BUIILDER)
// [Dependencies]
#include "../base/codebuilder.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
struct X86SseToAvxPassInit {
static Error add(CodeBuilder* cb) noexcept;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_BUIILDER
#endif // _ASMJIT_X86_X86SSETOAVXPASS_P_H

View File

@@ -1,77 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// This file is used to test opcodes generated by AsmJit. Output can be
// disassembled in your IDE or by your favourite disassembler. Instructions
// are grouped by category and then sorted alphabetically.
// [Dependencies]
#include "../asmjit/asmjit.h"
#include "./asmjit_test_opcode.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef void (*VoidFunc)(void);
struct OpcodeDumpInfo {
uint32_t arch;
bool useRex1;
bool useRex2;
};
static const char* archIdToString(uint32_t archId) {
switch (archId) {
case asmjit::kArchNone : return "None";
case asmjit::kArchX86 : return "X86";
case asmjit::kArchX64 : return "X64";
case asmjit::kArchArm32: return "ARM32";
case asmjit::kArchArm64: return "ARM64";
default: return "<unknown>";
}
}
int main(int argc, char* argv[]) {
asmjit::FileLogger logger(stdout);
logger.addOptions(asmjit::Logger::kOptionBinaryForm);
OpcodeDumpInfo infoList[] = {
# if defined(ASMJIT_BUILD_X86)
{ asmjit::kArchX86, false, false },
# endif // ASMJIT_BUILD_X86
# if defined(ASMJIT_BUILD_X64)
{ asmjit::kArchX64, false, false },
{ asmjit::kArchX64, false, true },
{ asmjit::kArchX64, true , false },
{ asmjit::kArchX64, true , true }
# endif // ASMJIT_BUILD_X64
};
for (int i = 0; i < ASMJIT_ARRAY_SIZE(infoList); i++) {
const OpcodeDumpInfo& info = infoList[i];
printf("Opcodes [ARCH=%s REX1=%s REX2=%s]\n",
archIdToString(info.arch),
info.useRex1 ? "true" : "false",
info.useRex2 ? "true" : "false");
asmjit::JitRuntime runtime;
asmjit::X86Assembler a(&runtime, info.arch);
a.setLogger(&logger);
asmgen::opcode(a, info.useRex1, info.useRex2);
VoidFunc p = asmjit_cast<VoidFunc>(a.make());
// Only run if disassembly makes sense.
if (info.arch == asmjit::kArchHost)
p();
runtime.release((void*)p);
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,249 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Dependencies]
#include "../asmjit/asmjit.h"
// ============================================================================
// [DumpCpu]
// ============================================================================
struct DumpCpuFeature {
uint32_t feature;
const char* name;
};
static void dumpCpuFeatures(const asmjit::CpuInfo& cpu, const DumpCpuFeature* data, size_t count) {
for (size_t i = 0; i < count; i++)
if (cpu.hasFeature(data[i].feature))
INFO(" %s", data[i].name);
}
static void dumpCpu(void) {
const asmjit::CpuInfo& cpu = asmjit::CpuInfo::getHost();
INFO("Host CPU:");
INFO(" Vendor string : %s", cpu.getVendorString());
INFO(" Brand string : %s", cpu.getBrandString());
INFO(" Family : %u", cpu.getFamily());
INFO(" Model : %u", cpu.getModel());
INFO(" Stepping : %u", cpu.getStepping());
INFO(" HW-Threads Count : %u", cpu.getHwThreadsCount());
INFO("");
// --------------------------------------------------------------------------
// [ARM / ARM64]
// --------------------------------------------------------------------------
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
static const DumpCpuFeature armFeaturesList[] = {
{ asmjit::CpuInfo::kArmFeatureV6 , "ARMv6" },
{ asmjit::CpuInfo::kArmFeatureV7 , "ARMv7" },
{ asmjit::CpuInfo::kArmFeatureV8 , "ARMv8" },
{ asmjit::CpuInfo::kArmFeatureTHUMB , "THUMB" },
{ asmjit::CpuInfo::kArmFeatureTHUMB2 , "THUMBv2" },
{ asmjit::CpuInfo::kArmFeatureVFP2 , "VFPv2" },
{ asmjit::CpuInfo::kArmFeatureVFP3 , "VFPv3" },
{ asmjit::CpuInfo::kArmFeatureVFP4 , "VFPv4" },
{ asmjit::CpuInfo::kArmFeatureVFP_D32 , "VFP D32" },
{ asmjit::CpuInfo::kArmFeatureNEON , "NEON" },
{ asmjit::CpuInfo::kArmFeatureDSP , "DSP" },
{ asmjit::CpuInfo::kArmFeatureIDIV , "IDIV" },
{ asmjit::CpuInfo::kArmFeatureAES , "AES" },
{ asmjit::CpuInfo::kArmFeatureCRC32 , "CRC32" },
{ asmjit::CpuInfo::kArmFeatureSHA1 , "SHA1" },
{ asmjit::CpuInfo::kArmFeatureSHA256 , "SHA256" },
{ asmjit::CpuInfo::kArmFeatureAtomics64 , "64-bit atomics" }
};
INFO("ARM Features:");
dumpCpuFeatures(cpu, armFeaturesList, ASMJIT_ARRAY_SIZE(armFeaturesList));
INFO("");
#endif
// --------------------------------------------------------------------------
// [X86 / X64]
// --------------------------------------------------------------------------
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
static const DumpCpuFeature x86FeaturesList[] = {
{ asmjit::CpuInfo::kX86FeatureNX , "NX (Non-Execute Bit)" },
{ asmjit::CpuInfo::kX86FeatureMT , "MT (Multi-Threading)" },
{ asmjit::CpuInfo::kX86FeatureRDTSC , "RDTSC" },
{ asmjit::CpuInfo::kX86FeatureRDTSCP , "RDTSCP" },
{ asmjit::CpuInfo::kX86FeatureCMOV , "CMOV" },
{ asmjit::CpuInfo::kX86FeatureCMPXCHG8B , "CMPXCHG8B" },
{ asmjit::CpuInfo::kX86FeatureCMPXCHG16B , "CMPXCHG16B" },
{ asmjit::CpuInfo::kX86FeatureCLFLUSH , "CLFLUSH" },
{ asmjit::CpuInfo::kX86FeatureCLFLUSH_OPT , "CLFLUSH (Opt)" },
{ asmjit::CpuInfo::kX86FeatureCLWB , "CLWB" },
{ asmjit::CpuInfo::kX86FeaturePCOMMIT , "PCOMMIT" },
{ asmjit::CpuInfo::kX86FeaturePREFETCH , "PREFETCH" },
{ asmjit::CpuInfo::kX86FeaturePREFETCHWT1 , "PREFETCHWT1" },
{ asmjit::CpuInfo::kX86FeatureLAHF_SAHF , "LAHF/SAHF" },
{ asmjit::CpuInfo::kX86FeatureFXSR , "FXSR" },
{ asmjit::CpuInfo::kX86FeatureFXSR_OPT , "FXSR (Opt)" },
{ asmjit::CpuInfo::kX86FeatureMMX , "MMX" },
{ asmjit::CpuInfo::kX86FeatureMMX2 , "MMX2" },
{ asmjit::CpuInfo::kX86Feature3DNOW , "3DNOW" },
{ asmjit::CpuInfo::kX86Feature3DNOW2 , "3DNOW2" },
{ asmjit::CpuInfo::kX86FeatureSSE , "SSE" },
{ asmjit::CpuInfo::kX86FeatureSSE2 , "SSE2" },
{ asmjit::CpuInfo::kX86FeatureSSE3 , "SSE3" },
{ asmjit::CpuInfo::kX86FeatureSSSE3 , "SSSE3" },
{ asmjit::CpuInfo::kX86FeatureSSE4A , "SSE4A" },
{ asmjit::CpuInfo::kX86FeatureSSE4_1 , "SSE4.1" },
{ asmjit::CpuInfo::kX86FeatureSSE4_2 , "SSE4.2" },
{ asmjit::CpuInfo::kX86FeatureMSSE , "Misaligned SSE" },
{ asmjit::CpuInfo::kX86FeatureMONITOR , "MONITOR/MWAIT" },
{ asmjit::CpuInfo::kX86FeatureMOVBE , "MOVBE" },
{ asmjit::CpuInfo::kX86FeaturePOPCNT , "POPCNT" },
{ asmjit::CpuInfo::kX86FeatureLZCNT , "LZCNT" },
{ asmjit::CpuInfo::kX86FeatureAESNI , "AESNI" },
{ asmjit::CpuInfo::kX86FeaturePCLMULQDQ , "PCLMULQDQ" },
{ asmjit::CpuInfo::kX86FeatureRDRAND , "RDRAND" },
{ asmjit::CpuInfo::kX86FeatureRDSEED , "RDSEED" },
{ asmjit::CpuInfo::kX86FeatureSMAP , "SMAP" },
{ asmjit::CpuInfo::kX86FeatureSMEP , "SMEP" },
{ asmjit::CpuInfo::kX86FeatureSHA , "SHA" },
{ asmjit::CpuInfo::kX86FeatureXSAVE , "XSAVE" },
{ asmjit::CpuInfo::kX86FeatureXSAVE_OS , "XSAVE (OS)" },
{ asmjit::CpuInfo::kX86FeatureAVX , "AVX" },
{ asmjit::CpuInfo::kX86FeatureAVX2 , "AVX2" },
{ asmjit::CpuInfo::kX86FeatureF16C , "F16C" },
{ asmjit::CpuInfo::kX86FeatureFMA3 , "FMA3" },
{ asmjit::CpuInfo::kX86FeatureFMA4 , "FMA4" },
{ asmjit::CpuInfo::kX86FeatureXOP , "XOP" },
{ asmjit::CpuInfo::kX86FeatureBMI , "BMI" },
{ asmjit::CpuInfo::kX86FeatureBMI2 , "BMI2" },
{ asmjit::CpuInfo::kX86FeatureADX , "ADX" },
{ asmjit::CpuInfo::kX86FeatureTBM , "TBM" },
{ asmjit::CpuInfo::kX86FeatureMPX , "MPX" },
{ asmjit::CpuInfo::kX86FeatureHLE , "HLE" },
{ asmjit::CpuInfo::kX86FeatureRTM , "RTM" },
{ asmjit::CpuInfo::kX86FeatureERMS , "ERMS" },
{ asmjit::CpuInfo::kX86FeatureFSGSBASE , "FS/GS Base" },
{ asmjit::CpuInfo::kX86FeatureAVX512F , "AVX512F" },
{ asmjit::CpuInfo::kX86FeatureAVX512CD , "AVX512CD" },
{ asmjit::CpuInfo::kX86FeatureAVX512PF , "AVX512PF" },
{ asmjit::CpuInfo::kX86FeatureAVX512ER , "AVX512ER" },
{ asmjit::CpuInfo::kX86FeatureAVX512DQ , "AVX512DQ" },
{ asmjit::CpuInfo::kX86FeatureAVX512BW , "AVX512BW" },
{ asmjit::CpuInfo::kX86FeatureAVX512VL , "AVX512VL" },
{ asmjit::CpuInfo::kX86FeatureAVX512IFMA , "AVX512IFMA" },
{ asmjit::CpuInfo::kX86FeatureAVX512VBMI , "AVX512VBMI" }
};
INFO("X86 Specific:");
INFO(" Processor Type : %u", cpu.getX86ProcessorType());
INFO(" Brand Index : %u", cpu.getX86BrandIndex());
INFO(" CL Flush Cache Line : %u", cpu.getX86FlushCacheLineSize());
INFO(" Max logical Processors : %u", cpu.getX86MaxLogicalProcessors());
INFO("");
INFO("X86 Features:");
dumpCpuFeatures(cpu, x86FeaturesList, ASMJIT_ARRAY_SIZE(x86FeaturesList));
INFO("");
#endif
}
// ============================================================================
// [DumpSizeOf]
// ============================================================================
#define DUMP_TYPE(_Type_) \
INFO(" %-27s: %u", #_Type_, static_cast<uint32_t>(sizeof(_Type_)))
static void dumpSizeOf(void) {
INFO("SizeOf Types:");
DUMP_TYPE(int8_t);
DUMP_TYPE(int16_t);
DUMP_TYPE(int32_t);
DUMP_TYPE(int64_t);
DUMP_TYPE(int);
DUMP_TYPE(long);
DUMP_TYPE(size_t);
DUMP_TYPE(intptr_t);
DUMP_TYPE(float);
DUMP_TYPE(double);
DUMP_TYPE(void*);
DUMP_TYPE(asmjit::Ptr);
DUMP_TYPE(asmjit::SignedPtr);
INFO("");
INFO("SizeOf Base:");
DUMP_TYPE(asmjit::Assembler);
DUMP_TYPE(asmjit::ConstPool);
DUMP_TYPE(asmjit::LabelData);
DUMP_TYPE(asmjit::RelocData);
DUMP_TYPE(asmjit::Runtime);
DUMP_TYPE(asmjit::Zone);
INFO("");
INFO("SizeOf Operand:");
DUMP_TYPE(asmjit::Operand);
DUMP_TYPE(asmjit::Reg);
DUMP_TYPE(asmjit::Var);
DUMP_TYPE(asmjit::BaseMem);
DUMP_TYPE(asmjit::Imm);
DUMP_TYPE(asmjit::Label);
INFO("");
#if !defined(ASMJIT_DISABLE_COMPILER)
INFO("SizeOf Compiler:");
DUMP_TYPE(asmjit::Compiler);
DUMP_TYPE(asmjit::HLNode);
DUMP_TYPE(asmjit::HLInst);
DUMP_TYPE(asmjit::HLJump);
DUMP_TYPE(asmjit::HLData);
DUMP_TYPE(asmjit::HLAlign);
DUMP_TYPE(asmjit::HLLabel);
DUMP_TYPE(asmjit::HLComment);
DUMP_TYPE(asmjit::HLSentinel);
DUMP_TYPE(asmjit::HLFunc);
DUMP_TYPE(asmjit::HLCall);
DUMP_TYPE(asmjit::FuncDecl);
DUMP_TYPE(asmjit::FuncInOut);
DUMP_TYPE(asmjit::FuncPrototype);
INFO("");
#endif // !ASMJIT_DISABLE_COMPILER
// --------------------------------------------------------------------------
// [X86/X64]
// --------------------------------------------------------------------------
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
INFO("SizeOf X86/X64:");
DUMP_TYPE(asmjit::X86Assembler);
DUMP_TYPE(asmjit::X86InstInfo);
DUMP_TYPE(asmjit::X86InstExtendedInfo);
#if !defined(ASMJIT_DISABLE_COMPILER)
DUMP_TYPE(asmjit::X86Compiler);
DUMP_TYPE(asmjit::X86CallNode);
DUMP_TYPE(asmjit::X86FuncNode);
DUMP_TYPE(asmjit::X86FuncDecl);
#endif // !ASMJIT_DISABLE_COMPILER
INFO("");
#endif // ASMJIT_BUILD_X86
}
#undef DUMP_TYPE
// ============================================================================
// [Main]
// ============================================================================
static void onBeforeRun(void) {
dumpCpu();
dumpSizeOf();
}
int main(int argc, const char* argv[]) {
INFO("AsmJit Unit-Test\n\n");
return BrokenAPI::run(argc, argv, onBeforeRun);
}

View File

@@ -1,178 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _TEST_GENBLEND_H
#define _TEST_GENBLEND_H
// [Dependencies]
#include "../asmjit/asmjit.h"
namespace asmgen {
// Generate a typical alpha blend function using SSE2 instruction set. Used
// for benchmarking and also in test86. The generated code should be stable
// and fully functional.
static void blend(asmjit::X86Compiler& c) {
using namespace asmjit;
using namespace asmjit::x86;
X86GpVar dst = c.newIntPtr("dst");
X86GpVar src = c.newIntPtr("src");
X86GpVar i = c.newIntPtr("i");
X86GpVar j = c.newIntPtr("j");
X86GpVar t = c.newIntPtr("t");
X86XmmVar x0 = c.newXmm("x0");
X86XmmVar x1 = c.newXmm("x1");
X86XmmVar y0 = c.newXmm("y0");
X86XmmVar a0 = c.newXmm("a0");
X86XmmVar a1 = c.newXmm("a1");
X86XmmVar cZero = c.newXmm("cZero");
X86XmmVar cMul255A = c.newXmm("cMul255A");
X86XmmVar cMul255M = c.newXmm("cMul255M");
Label L_SmallLoop = c.newLabel();
Label L_SmallEnd = c.newLabel();
Label L_LargeLoop = c.newLabel();
Label L_LargeEnd = c.newLabel();
Label L_Data = c.newLabel();
c.addFunc(FuncBuilder3<Void, void*, const void*, size_t>(c.getRuntime()->getCdeclConv()));
c.setArg(0, dst);
c.setArg(1, src);
c.setArg(2, i);
c.alloc(dst);
c.alloc(src);
c.alloc(i);
// How many pixels have to be processed to make the loop aligned.
c.lea(t, ptr(L_Data));
c.xor_(j, j);
c.xorps(cZero, cZero);
c.sub(j, dst);
c.movaps(cMul255A, ptr(t, 0));
c.and_(j, 15);
c.movaps(cMul255M, ptr(t, 16));
c.shr(j, 2);
c.jz(L_SmallEnd);
// j = min(i, j).
c.cmp(j, i);
c.cmovg(j, i);
// i -= j.
c.sub(i, j);
// Small loop.
c.bind(L_SmallLoop);
c.pcmpeqb(a0, a0);
c.movd(y0, ptr(src));
c.pxor(a0, y0);
c.movd(x0, ptr(dst));
c.psrlw(a0, 8);
c.punpcklbw(x0, cZero);
c.pshuflw(a0, a0, X86Util::shuffle(1, 1, 1, 1));
c.punpcklbw(y0, cZero);
c.pmullw(x0, a0);
c.paddsw(x0, cMul255A);
c.pmulhuw(x0, cMul255M);
c.paddw(x0, y0);
c.packuswb(x0, x0);
c.movd(ptr(dst), x0);
c.add(dst, 4);
c.add(src, 4);
c.dec(j);
c.jnz(L_SmallLoop);
// Second section, prepare for an aligned loop.
c.bind(L_SmallEnd);
c.test(i, i);
c.mov(j, i);
c.jz(c.getFunc()->getExitLabel());
c.and_(j, 3);
c.shr(i, 2);
c.jz(L_LargeEnd);
// Aligned loop.
c.bind(L_LargeLoop);
c.movups(y0, ptr(src));
c.pcmpeqb(a0, a0);
c.movaps(x0, ptr(dst));
c.xorps(a0, y0);
c.movaps(x1, x0);
c.psrlw(a0, 8);
c.punpcklbw(x0, cZero);
c.movaps(a1, a0);
c.punpcklwd(a0, a0);
c.punpckhbw(x1, cZero);
c.punpckhwd(a1, a1);
c.pshufd(a0, a0, X86Util::shuffle(3, 3, 1, 1));
c.pshufd(a1, a1, X86Util::shuffle(3, 3, 1, 1));
c.pmullw(x0, a0);
c.pmullw(x1, a1);
c.paddsw(x0, cMul255A);
c.paddsw(x1, cMul255A);
c.pmulhuw(x0, cMul255M);
c.pmulhuw(x1, cMul255M);
c.add(src, 16);
c.packuswb(x0, x1);
c.paddw(x0, y0);
c.movaps(ptr(dst), x0);
c.add(dst, 16);
c.dec(i);
c.jnz(L_LargeLoop);
c.bind(L_LargeEnd);
c.test(j, j);
c.jnz(L_SmallLoop);
c.endFunc();
// Data.
c.align(kAlignData, 16);
c.bind(L_Data);
c.dxmm(Vec128::fromSW(0x0080));
c.dxmm(Vec128::fromSW(0x0101));
}
} // asmgen namespace
// [Guard]
#endif // _TEST_GENBLEND_H

Some files were not shown because too many files have changed in this diff Show More