diff --git a/.travis.yml b/.travis.yml index b0cade9..b9928b3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,15 +21,26 @@ matrix: compiler: gcc before_install: - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo add-apt-repository ppa:kubuntu-ppa/backports -y; fi; - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get -qq update; fi; - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get -qq install cmake gcc-multilib g++-multilib valgrind; fi; + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then + CMAKE_PACKAGE="cmake-3.3.2-Linux-x86_64" + && wget https://www.cmake.org/files/v3.3/${CMAKE_PACKAGE}.tar.gz --no-check-certificate + && sudo apt-get -qq update + && sudo apt-get -qq install gcc-multilib g++-multilib valgrind + && tar -xzf ${CMAKE_PACKAGE}.tar.gz + && sudo cp -fR ${CMAKE_PACKAGE}/* /usr + ; + else + brew update + && brew unlink cmake + && brew install -v cmake + ; + fi before_script: - mkdir build - cd build - cmake --version - - cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE="$BUILD_TYPE" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 + - cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE="$BUILD_TYPE" -DASMJIT_BUILD_TEST=1 - cd .. script: diff --git a/BREAKING.md b/BREAKING.md new file mode 100644 index 0000000..fa0b953 --- /dev/null +++ b/BREAKING.md @@ -0,0 +1,66 @@ +2015-12-07 +---------- + +Compiler now attaches to Assembler. This change was required to create resource sharing where Assembler is the central part and Compiler is a "high-level" part that serializes to it. It's an incremental work to implement sections and to allow code generators to create executables and libraries. + +Also, Compiler has no longer Logger interface, it uses Assembler's one after it's attached to it. + +``` +JitRuntime runtime; +X86Compiler c(&runtime); + +// ... code generation ... + +void* p = c.make(); +``` + +to + +``` +JitRuntime runtime; +X86Assembler a(&runtime); +X86Compiler c(&a); + +// ... code generation ... + +c.finalize(); +void* p = a.make(); +``` + +All nodes were prefixed with HL, except for platform-specific nodes, change: + +``` +Node -> HLNode +FuncNode -> HLFunc +X86FuncNode -> X86Func +X86CallNode -> X86Call +``` + +`FuncConv` renamed to `CallConv` and is now part of a function prototype, change: + +``` +compiler.addFunc(kFuncConvHost, FuncBuilder0()); +``` + +to + +``` +compiler.addFunc(FuncBuilder0(kCallConvHost)); +``` + +Operand constructors that accept Assembler or Compiler are deprecated. Variables can now be created by using handy shortcuts like newInt32(), newIntPtr(), newXmmPd(), etc... Change: + +``` +X86Compiler c(...); +Label L(c); +X86GpVar x(c, kVarTypeIntPtr, "x"); +``` + +to + +``` +X86Compiler c(...); +Label L = c.newLabel(); +X86GpVar x = c.newIntPtr("x"); +``` + diff --git a/CMakeLists.txt b/CMakeLists.txt index 77c2726..467bb24 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,264 +1,202 @@ -# ============================================================================= -# [AsmJit - CMakeLists.txt] -# ============================================================================= - -CMake_Minimum_Required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.1) # ============================================================================= # [AsmJit - Configuration] # ============================================================================= -# Whether not to build anything (default FALSE). -# Set(ASMJIT_EMBED FALSE) +# Embedding mode, asmjit will not create any targets (default FALSE). +# set(ASMJIT_EMBED FALSE) -# Whether to build static library (default FALSE). -# Set(ASMJIT_STATIC FALSE) +# Whether to build a static library (default FALSE). +# set(ASMJIT_STATIC FALSE) -# Whether to build tests (default FALSE). -# Set(ASMJIT_BUILD_TEST FALSE) - -# Whether to build samples (default FALSE). -# Set(ASMJIT_BUILD_SAMPLES FALSE) +# Whether to build tests and samples (default FALSE). +# set(ASMJIT_BUILD_TEST FALSE) # ============================================================================= -# [AsmJit - Build] +# [AsmJit - Build / Embed] # ============================================================================= -If(ASMJIT_EMBED) - Set(ASMJIT_STATIC TRUE) -EndIf() +# Do not create a project if this CMakeLists.txt is included from another +# project. This makes it easy to embed or create a static library. +if(NOT CMAKE_PROJECT_NAME OR "${CMAKE_PROJECT_NAME}" MATCHES "^asmjit$") + project(asmjit C CXX) + set(ASMJIT_SIGNATURE "Standalone") +else() + set(ASMJIT_SIGNATURE "Included") +endif() -If(NOT CMAKE_PROJECT_NAME OR CMAKE_PROJECT_NAME MATCHES "^asmjit$") - Project(asmjit C CXX) - Set(ASMJIT_PROJECT_STR "Project") -Else() - # Do not create a project if this CMakeLists.txt is included by a different - # project. This allows easy static library build including debugger support. - Set(ASMJIT_PROJECT_STR "Include") -EndIf() +if(ASMJIT_EMBED) + set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Mode=Embed") + set(ASMJIT_STATIC TRUE) # Implies ASMJIT_STATIC. +elseif(ASMJIT_STATIC) + set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Mode=Static") +else() + set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Mode=Shared") +endif() -If(ASMJIT_STATIC) - Set(ASMJIT_PROJECT_STR "${ASMJIT_PROJECT_STR}|Static") -Else() - Set(ASMJIT_PROJECT_STR "${ASMJIT_PROJECT_STR}|Shared") -EndIf() +if(ASMJIT_BUILD_TEST) + set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Test=On") +else() + set(ASMJIT_SIGNATURE "${ASMJIT_SIGNATURE} | Test=Off") +endif() -Message("") -Message("== ====================================================") -Message("== [AsmJit ${ASMJIT_PROJECT_STR}]") -Message("== ====================================================") -Message("") +if(NOT ASMJIT_DIR) + set(ASMJIT_DIR ${CMAKE_CURRENT_LIST_DIR}) +endif() + +message("-- [asmjit] ${ASMJIT_SIGNATURE}") +message("-- [asmjit] ASMJIT_DIR=${ASMJIT_DIR}") # ============================================================================= -# [AsmJit - Directories] +# [AsmJit - Flags / Deps] # ============================================================================= -If(NOT ASMJIT_DIR) - Set(ASMJIT_DIR ${CMAKE_CURRENT_LIST_DIR}) - Message("-- Initializing ASMJIT_DIR=${ASMJIT_DIR}") -Else() - Message("-- Using Custom ASMJIT_DIR=${ASMJIT_DIR}") -EndIf() +set(ASMJIT_SOURCE_DIR "${ASMJIT_DIR}/src") # Asmjit source directory. +set(ASMJIT_INCLUDE_DIR "${ASMJIT_SOURCE_DIR}") # Asmjit include directory. -Set(ASMJIT_SRC_DIR "${ASMJIT_DIR}/src") -Set(ASMJIT_INC_DIR "${ASMJIT_SRC_DIR}") +set(ASMJIT_DEPS) # Asmjit dependencies (list of libraries) for the linker. +set(ASMJIT_LIBS) # Asmjit dependencies with asmjit included, for consumers. -Include_Directories(${ASMJIT_SRC_DIR}) +# Internal, never use. +set(ASMJIT_D "-D") # Used to define a C/C++ preprocessor parameter (-D or /D). +set(ASMJIT_PRIVATE_CFLAGS) # Compiler flags independent of build type. +set(ASMJIT_PRIVATE_LFLAGS "") # Linker flags used by the library and tests. -# ============================================================================= -# [AsmJit - Flags/Deps] -# ============================================================================= +set(ASMJIT_PRIVATE_CFLAGS_DBG) # Compiler flags used only by debug build. +set(ASMJIT_PRIVATE_CFLAGS_REL) # Compiler flags used only by release build. -Set(ASMJIT_DEPS) -Set(ASMJIT_LFLAGS) +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") + set(ASMJIT_D "/D") + set(ASMJIT_PRIVATE_LFLAGS "/OPT:REF /OPT:ICF") -Set(ASMJIT_CFLAGS) -Set(ASMJIT_CFLAGS_DBG) -Set(ASMJIT_CFLAGS_REL) + list(APPEND ASMJIT_PRIVATE_CFLAGS /GF) + list(APPEND ASMJIT_PRIVATE_CFLAGS_DBG /GS /GR-) + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL /Oi /Oy /GS- /GR-) + if(NOT MSVC60 AND NOT MSVC70 AND NOT MSVC71) + list(APPEND ASMJIT_PRIVATE_CFLAGS /MP) # Enable multi-process compilation. + endif() +endif() -Set(ASMJIT_DEFINE "-D") +if("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(GNU|Clang)$") + list(APPEND ASMJIT_PRIVATE_CFLAGS -fno-exceptions) + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -fmerge-all-constants) +endif() -# MSVC. -If("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - Message("-- Using MSVC") +if(ASMJIT_EMBED) + list(APPEND ASMJIT_PRIVATE_CFLAGS "${ASMJIT_D}ASMJIT_EMBED") +elseif(ASMJIT_STATIC) + list(APPEND ASMJIT_PRIVATE_CFLAGS "${ASMJIT_D}ASMJIT_STATIC") +endif() - Set(ASMJIT_DEFINE "/D") - Set(ASMJIT_LFLAGS "/OPT:REF /OPT:ICF") - Set(ASMJIT_CFLAGS /GF) - Set(ASMJIT_CFLAGS_DBG /DASMJIT_DEBUG /GS /GR-) - Set(ASMJIT_CFLAGS_REL /DASMJIT_RELEASE /Oi /Oy /GS- /GR-) +if(WIN32) + list(APPEND ASMJIT_PRIVATE_CFLAGS "${ASMJIT_D}_UNICODE") +else() + list(APPEND ASMJIT_DEPS pthread) +endif() - # Enable multi-process compilation. - If(NOT MSVC60 AND NOT MSVC70 AND NOT MSVC71) - List(APPEND ASMJIT_CFLAGS /MP) - EndIf() -EndIf() +if("${CMAKE_SYSTEM_NAME}" MATCHES "Linux") + list(APPEND ASMJIT_DEPS rt) +endif() -# GCC -If("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - Message("-- Using GCC") +set(ASMJIT_LIBS ${ASMJIT_DEPS}) +if(NOT ASMJIT_EMBED) + list(INSERT ASMJIT_LIBS 0 asmjit) +endif() - Set(ASMJIT_CFLAGS - -fno-exceptions) - Set(ASMJIT_CFLAGS_DBG - -DASMJIT_DEBUG -O0) - Set(ASMJIT_CFLAGS_REL - -DASMJIT_RELEASE -O2 - -finline-functions - -fomit-frame-pointer - -fmerge-all-constants - -fno-keep-static-consts) -EndIf() +set(ASMJIT_PRIVATE_CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_DBG}) +set(ASMJIT_PRIVATE_CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_REL}) -# Clang. -If("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") - Message("-- Using Clang") - - Set(ASMJIT_CFLAGS - -fno-exceptions) - Set(ASMJIT_CFLAGS_DBG - -DASMJIT_DEBUG -O0) - Set(ASMJIT_CFLAGS_REL - -DASMJIT_RELEASE -O2 - -fomit-frame-pointer - -fmerge-all-constants) -EndIf() - -# Use Unicode by default on Windows target. -If(WIN32) - List(APPEND ASMJIT_CFLAGS "${ASMJIT_DEFINE}_UNICODE") -EndIf() - -# Static library. -If(ASMJIT_STATIC) - List(APPEND ASMJIT_CFLAGS "${ASMJIT_DEFINE}ASMJIT_STATIC") -EndIf() - -# Dependencies - pthread (Unix). -If(NOT WIN32) - List(APPEND ASMJIT_DEPS pthread) -EndIf() - -# Dependencies - librt (Linux). -IF(${CMAKE_SYSTEM_NAME} MATCHES "Linux") - LIST(APPEND ASMJIT_DEPS rt) -ENDIF() - -Set(ASMJIT_CFLAGS_DBG ${ASMJIT_CFLAGS} ${ASMJIT_CFLAGS_DBG}) -Set(ASMJIT_CFLAGS_REL ${ASMJIT_CFLAGS} ${ASMJIT_CFLAGS_REL}) +message("-- [asmjit] ASMJIT_DEPS=${ASMJIT_DEPS}") +message("-- [asmjit] ASMJIT_LIBS=${ASMJIT_LIBS}") # ============================================================================= # [AsmJit - Macros] # ============================================================================= -Macro(AsmJit_AddSource in_dst in_path) - Set(__list "") - Set(__path "${ASMJIT_SRC_DIR}/${in_path}") +macro(asmjit_add_source _out_dst _src_dir) + set(_src_path "${ASMJIT_SOURCE_DIR}/${_src_dir}") + set(_src_list) - ForEach(__name ${ARGN}) - Set(__file "${__path}/${__name}") - Set(__cflags ${ASMJIT_CFLAGS}) + foreach(_arg ${ARGN}) + set(_src_file "${_src_path}/${_arg}") + list(APPEND _src_list ${_src_file}) + endforeach() - If(__name MATCHES "\\.cpp|\\.h") - If("${__cflags}") - Set_Source_Files_Properties(${__name} PROPERTIES COMPILE_FLAGS ${__cflags}) - EndIf() - List(APPEND __list ${__file}) - EndIf() - EndForEach() + list(APPEND "${_out_dst}" ${_src_list}) + source_group(${_src_dir} FILES ${_src_list}) +endmacro() - List(APPEND "${in_dst}" ${__list}) - Source_Group(${in_path} FILES ${__list}) -EndMacro() +macro(asmjit_add_library _target _src _deps _cflags _cflags_dbg _cflags_rel) + if(NOT ASMJIT_STATIC) + add_library(${_target} SHARED ${_src}) + else() + add_library(${_target} STATIC ${_src}) + endif() -Macro(AsmJit_AddLibrary in_name in_src in_deps in_cflags in_cflags_dbg in_cflags_rel) - If(NOT ASMJIT_STATIC) - Set(__type "SHARED") - Else() - Set(__type "STATIC") - EndIf() + target_link_libraries(${_target} ${_deps}) + set_target_properties(${_target} PROPERTIES LINK_FLAGS "${ASMJIT_PRIVATE_LFLAGS}") - Add_Library(${in_name} ${__type} ${in_src}) + if(CMAKE_BUILD_TYPE) + if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + target_compile_options(${_target} PRIVATE ${_cflags} ${_cflags_dbg}) + else() + target_compile_options(${_target} PRIVATE ${_cflags} ${_cflags_rel}) + endif() + else() + target_compile_options(${_target} PRIVATE ${_cflags} + $<$:${_cflags_dbg}> + $<$>:${_cflags_rel}>) + endif() - # Dependencies. - Target_Link_Libraries(${in_name} ${in_deps}) - - # Compiler Flags. - If(${CMAKE_BUILD_TYPE}) - If(${CMAKE_BUILD_TYPE} MATCHES "Debug") - Set_Target_Properties(${in_name} PROPERTIES COMPILE_FLAGS ${in_cflags} ${in_cflags_dbg}) - Else() - Set_Target_Properties(${in_name} PROPERTIES COMPILE_FLAGS ${in_cflags} ${in_cflags_rel}) - EndIf() - Else() - Target_Compile_Options(${in_name} PUBLIC ${in_cflags} - $<$:${in_cflags_dbg}> - $<$>:${in_cflags_rel}>) - EndIf() - - # Linker Flags. - Set_Target_Properties(${in_name} PROPERTIES LINK_FLAGS "${ASMJIT_LFLAGS}") - - # Install Instructions. - If(NOT ASMJIT_EMBED) - Install(TARGETS ${in_name} LIBRARY DESTINATION lib${LIB_SUFFIX} - ARCHIVE DESTINATION lib${LIB_SUFFIX} - RUNTIME DESTINATION bin) - EndIf() - - Unset(__type) -EndMacro() + if(NOT ASMJIT_STATIC) + install(TARGETS ${_target} DESTINATION "lib${LIB_SUFFIX}") + endif() +endmacro() # ============================================================================= # [AsmJit - Source] # ============================================================================= -Set(ASMJIT_SRC "") +set(ASMJIT_SRC "") -AsmJit_AddSource(ASMJIT_SRC asmjit +asmjit_add_source(ASMJIT_SRC asmjit apibegin.h apiend.h asmjit.h base.h build.h - config.h host.h x86.h ) -AsmJit_AddSource(ASMJIT_SRC asmjit/base +asmjit_add_source(ASMJIT_SRC asmjit/base assembler.cpp assembler.h - codegen.cpp - codegen.h compiler.cpp compiler.h + compilercontext.cpp + compilercontext_p.h + compilerfunc.h constpool.cpp constpool.h containers.cpp containers.h - context.cpp - context_p.h cpuinfo.cpp cpuinfo.h - cputicks.cpp - cputicks.h - error.cpp - error.h globals.cpp globals.h - intutil.cpp - intutil.h - lock.h + hlstream.cpp + hlstream.h logger.cpp logger.h operand.cpp operand.h runtime.cpp runtime.h - string.cpp - string.h + utils.cpp + utils.h vectypes.h vmem.cpp vmem.h @@ -266,13 +204,15 @@ AsmJit_AddSource(ASMJIT_SRC asmjit/base zone.h ) -AsmJit_AddSource(ASMJIT_SRC asmjit/x86 +asmjit_add_source(ASMJIT_SRC asmjit/x86 x86assembler.cpp x86assembler.h x86compiler.cpp x86compiler.h - x86context.cpp - x86context_p.h + x86compilercontext.cpp + x86compilercontext_p.h + x86compilerfunc.cpp + x86compilerfunc.h x86cpuinfo.cpp x86cpuinfo.h x86inst.cpp @@ -285,81 +225,55 @@ AsmJit_AddSource(ASMJIT_SRC asmjit/x86 ) # ============================================================================= -# [AsmJit - Headers] +# [AsmJit - Targets] # ============================================================================= -If(NOT ASMJIT_EMBED) - ForEach(i ${ASMJIT_SRC}) - Get_Filename_Component(path ${i} PATH) - Get_Filename_Component(name ${i} NAME) - String(REGEX REPLACE "^${ASMJIT_SRC_DIR}/" "" targetpath "${path}") - If(name MATCHES ".h$") - If(NOT name MATCHES "_p.h$") - Install(FILES ${i} DESTINATION "include/${targetpath}") - EndIf() - EndIf() - EndForEach() -EndIf() - -# ============================================================================= -# [Asmjit - Library] -# ============================================================================= - -If(NOT ASMJIT_EMBED) - AsmJit_AddLibrary(asmjit +if(NOT ASMJIT_EMBED) + # Add `asmjit` library. + asmjit_add_library(asmjit "${ASMJIT_SRC}" "${ASMJIT_DEPS}" - "${ASMJIT_CFLAGS}" - "${ASMJIT_CFLAGS_DBG}" - "${ASMJIT_CFLAGS_REL}" - ) -EndIf() - -# ============================================================================= -# [Asmjit - Testing] -# ============================================================================= - -# AsmJit library is always embedded into the tests executable. This way it's -# much easier to test private functions than just linking to `libasmjit.so`. -If(ASMJIT_BUILD_TEST) - AsmJit_AddSource(ASMJIT_TEST_SRC test asmjit_test_unit.cpp broken.cpp broken.h) - - Set(ASMJIT_TEST_CFLAGS - ${ASMJIT_CFLAGS} - ${ASMJIT_DEFINE}ASMJIT_STATIC - ${ASMJIT_DEFINE}ASMJIT_TEST) - - Add_Executable(asmjit_test_unit ${ASMJIT_SRC} ${ASMJIT_TEST_SRC}) - Target_Link_Libraries(asmjit_test_unit ${ASMJIT_DEPS}) - - If(${CMAKE_BUILD_TYPE}) - If(${CMAKE_BUILD_TYPE} MATCHES "Debug") - Set_Target_Properties(asmjit_test_unit PROPERTIES COMPILE_FLAGS ${ASMJIT_TEST_CFLAGS} ${ASMJIT_CFLAGS_DBG}) - Else() - Set_Target_Properties(asmjit_test_unit PROPERTIES COMPILE_FLAGS ${ASMJIT_TEST_CFLAGS} ${ASMJIT_CFLAGS_REL}) - EndIf() - Else() - Target_Compile_Options(asmjit_test_unit PUBLIC ${ASMJIT_TEST_CFLAGS} - $<$:${ASMJIT_CFLAGS_DBG}> - $<$>:${ASMJIT_CFLAGS_REL}>) - EndIf() - - Set_Target_Properties(asmjit_test_unit PROPERTIES LINK_FLAGS "${ASMJIT_LFLAGS}") -EndIf() - -# ============================================================================= -# [Asmjit - Samples] -# ============================================================================= - -If(ASMJIT_BUILD_SAMPLES) - Set(ASMJIT_SRC_SAMPLES - asmjit_bench_x86 - asmjit_test_opcode - asmjit_test_x86 + "" + "${ASMJIT_PRIVATE_CFLAGS_DBG}" + "${ASMJIT_PRIVATE_CFLAGS_REL}" ) - ForEach(file ${ASMJIT_SRC_SAMPLES}) - Add_Executable(${file} src/test/${file}.cpp) - Target_Link_Libraries(${file} asmjit ${ASMJIT_DEPS}) - EndForEach(file) -EndIf() + foreach(_src_file ${ASMJIT_SRC}) + get_filename_component(_src_dir ${_src_file} PATH) + get_filename_component(_src_name ${_src_file} NAME) + string(REGEX REPLACE "^${ASMJIT_SOURCE_DIR}/" "" targetpath "${_src_dir}") + if("${_src_name}" MATCHES ".h$") + if(NOT "${_src_name}" MATCHES "_p.h$") + install(FILES ${_src_file} DESTINATION "include/${targetpath}") + endif() + endif() + endforeach() + + # Add `asmjit` tests and samples. + if(ASMJIT_BUILD_TEST) + set(ASMJIT_TEST_SRC "") + set(ASMJIT_TEST_CFLAGS ${ASMJIT_D}ASMJIT_TEST ${ASMJIT_D}ASMJIT_EMBED) + asmjit_add_source(ASMJIT_TEST_SRC test asmjit_test_unit.cpp broken.cpp broken.h) + + add_executable(asmjit_test_unit ${ASMJIT_SRC} ${ASMJIT_TEST_SRC}) + target_link_libraries(asmjit_test_unit ${ASMJIT_DEPS}) + set_target_properties(asmjit_test_unit PROPERTIES LINK_FLAGS "${ASMJIT_PRIVATE_LFLAGS}") + + if(CMAKE_BUILD_TYPE) + if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + target_compile_options(asmjit_test_unit PRIVATE ${ASMJIT_TEST_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_DBG}) + else() + target_compile_options(asmjit_test_unit PRIVATE ${ASMJIT_TEST_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_REL}) + endif() + else() + target_compile_options(asmjit_test_unit PRIVATE ${ASMJIT_TEST_CFLAGS} + $<$:${ASMJIT_PRIVATE_CFLAGS_DBG}> + $<$>:${ASMJIT_PRIVATE_CFLAGS_REL}>) + endif() + + foreach(_target asmjit_bench_x86 asmjit_test_opcode asmjit_test_x86) + add_executable(${_target} "src/test/${_target}.cpp") + target_link_libraries(${_target} ${ASMJIT_LIBS}) + endforeach() + endif() +endif() diff --git a/README.md b/README.md index 19bff3c..790e7c8 100644 --- a/README.md +++ b/README.md @@ -9,44 +9,44 @@ Complete x86/x64 JIT and Remote Assembler for C++. Introduction ------------ -AsmJit is a complete JIT and remote assembler for C++ language. It can generate native code for x86 and x64 architectures and supports the whole x86/x64 instruction set - from legacy MMX to the newest AVX2. It has a type-safe API that allows C++ compiler to do a semantic checks at compile-time even before the assembled code is generated or run. +AsmJit is a complete JIT and remote assembler for C++ language. It can generate native code for x86 and x64 architectures and supports the whole x86/x64 instruction set - from legacy MMX to the newest AVX2. It has a type-safe API that allows C++ compiler to do semantic checks at compile-time even before the assembled code is generated and executed. -AsmJit is not a virtual machine nor tries to be. It's a tool that can be used to encode instructions into their machine code representations and tries to make such process easy and fun. AsmJit has been used so far in software encryption, image/sound processing, emulators and as a JIT backend in virtual machines. +AsmJit is not a virtual machine (VM). It doesn't have functionality to implement VM out of the box; however, it can be be used as a JIT backend of your own VM. The usage of AsmJit is not limited at all; it's suitable for multimedia, VM backends, remote code generation, and many other tasks. Features -------- - * Complete x86/x64 instruction set - MMX, SSE, AVX, BMI, XOP, FMA..., - * Low-level and high-level code generation, - * Built-in CPU detection, - * Virtual Memory management, - * Pretty logging and error handling, - * Small and embeddable, around 150-200kB compiled, - * Zero dependencies, not even STL or RTTI. + * Complete x86/x64 instruction set - MMX, SSEx, AVX1/2, BMI, XOP, FMA3, and FMA4 (AVX-512 in progress) + * Low-level and high-level code generation concepts + * Built-in CPU features detection + * Virtual Memory management similar to malloc/free + * Powerful logging and error handling + * Small and embeddable, around 150-200kB compiled + * Zero dependencies, not even STL or RTTI Supported Environments ---------------------- ### Operating Systems - * BSDs - * Linux - * Mac - * Windows + * BSDs (not tested regularly) + * Linux (tested by Travis-CI) + * Mac (tested by Travis-CI) + * Windows (tested manually) ### C++ Compilers - * BorlandC++ - * Clang (Travis-CI) - * Gcc (Travis-CI) - * MinGW - * MSVC - * Other compilers require testing and support in `asmjit/build.h` header + * BorlandC++ (not tested regularly) + * CLang (tested by Travis-CI) + * GCC (tested by Travis-CI) + * MinGW (tested manually) + * MSVC (tested manually) + * Other compilers require some testing and support in `asmjit/build.h` header ### Backends - * X86 - * X64 + * X86 (tested by Travis-CI) + * X64 (tested by Travis-CI) Project Organization -------------------- @@ -54,22 +54,21 @@ Project Organization * `/` - Project root * `src` - Source code * `asmjit` - Public header files (always include from here) - * `base` - Base files, used by the AsmJit and all backends - * `contrib` - Contributions that extend the base functionality - * `test` - Unit testing support (don't include in your project) + * `base` - Base files, used by AsmJit and all backends * `x86` - X86/X64 specific files, used only by X86/X64 backend + * `test` - Unit and integration tests (don't embed in your project) * `tools` - Tools used for configuring, documenting and generating files Code Generation Concepts ------------------------ -AsmJit has two completely different code generation concepts. The difference is in how the code is generated. The first concept, also referred as the low level concept, is called `Assembler` and it's the same as writing RAW assembly by using physical registers directly. In this case AsmJit does only instruction encoding, verification and optionally code-relocation. +AsmJit has two completely different code generation concepts. The difference is in how the code is generated. The first concept, also referred as a low level concept, is called `Assembler` and it's the same as writing RAW assembly by inserting instructions that use physical registers directly. In this case AsmJit does only instruction encoding, verification and final code relocation. -The second concept, also referred as the high level concept, is called `Compiler`. Compiler lets you use virtually unlimited number of registers (called variables) significantly simplifying the code generation process. Compiler allocates these virtual registers to physical registers after the code generation is done. This requires some extra effort - Compiler has to generate information for each node (instruction, function declaration, function call) in the code, perform a variable liveness analysis and translate the code having variables into code having only registers. +The second concept, also referred as a high level concept, is called `Compiler`. Compiler lets you use virtually unlimited number of registers (it calls them variables), which significantly simplifies the code generation process. Compiler allocates these virtual registers to physical registers after the code generation is done. This requires some extra effort - Compiler has to generate information for each node (instruction, function declaration, function call, etc...) in the code, perform a variable liveness analysis and translate the code using variables to a code that uses only physical registers. -In addition, Compiler understands functions and function calling conventions. It has been designed in a way that the code generated is always a function having a prototype like in a programming language. By having a function prototype the Compiler is able to insert prolog and epilog to a function being generated and it is able to call a function inside a generated one. +In addition, Compiler understands functions and their calling conventions. It has been designed in a way that the code generated is always a function having a prototype like a real programming language. By having a function prototype the Compiler is able to insert prolog and epilog sequence to the function being generated and it's able to also generate a necessary code to call other function from your own code. -There is no conclusion on which concept is better. Assembler brings full control on how the code is generated, while Compiler makes the generation easier and more portable. However, Compiler does sometimes relatively bad job when it comes to register allocation, so for projects where there is already an analysis performed, pure Assembler code generator is the preferred way. +There is no conclusion on which concept is better. `Assembler` brings full control and the best performance, while `Compiler` makes the code-generation more fun and more portable. Configuring & Building ---------------------- @@ -101,16 +100,14 @@ AsmJit is designed to be easy embeddable in any kind project. However, it has so ### Features - * `ASMJIT_DISABLE_COMPILER` - Disable `Compiler` completely. Use this flag if you don't use Compiler and want slimmer binary. - - * `ASMJIT_DISABLE_LOGGER` - Disable `Logger` completely. Use this flag if you don't need `Logger` functionality and want slimmer binary. AsmJit compiled with or without `Logger` support is binary compatible (all classes that use Logger pointer will simply use `void*`), but the Logger interface and in general instruction dumps are not available. - - * `ASMJIT_DISABLE_NAMES` - Disable everything that uses strings and that causes certain strings to be stored in the resulting binary. For example when this flag is enabled instruction or error names (and related APIs) will not be available. This flag has to be disabled together with `ASMJIT_DISABLE_LOGGER`. + * `ASMJIT_DISABLE_COMPILER` - Disable `Compiler` completely. Use this flag if you don't use Compiler and want a slimmer binary. + * `ASMJIT_DISABLE_LOGGER` - Disable `Logger` completely. Use this flag if you don't need `Logger` functionality and want slimmer binary. AsmJit compiled with or without `Logger` support is binary compatible (all classes that use Logger pointer will simply use `void*`), but the Logger interface and in general instruction dumps won't be available anymore. + * `ASMJIT_DISABLE_NAMES` - Disable everything that uses strings and that causes certain strings to be stored in the resulting binary. For example when this flag is enabled all instruction and error names (and related APIs) will not be available. This flag has to be disabled together with `ASMJIT_DISABLE_LOGGER`. Using AsmJit ------------ -AsmJit library uses one global namespace called `asmjit`, which contains the basics. Architecture specific code is prefixed by the architecture and architecture registers and operand builders are in its own namespace. For example classes for both x86 and x64 code generation are prefixed by `X86`, enums by `kX86`, registers and operand builders are accessible through `x86` namespace. This design is very different from the initial version of AsmJit and it seems now as the most convenient one. +AsmJit library uses one global namespace called `asmjit`, which contains the basics. Architecture specific code is prefixed by the architecture and architecture specific registers and operand builders are in its own namespace. For example classes for both x86 and x64 code generation are prefixed by `X86`, enums by `kX86`, registers and operand builders are accessible through `x86` namespace. This design is very different from the initial version of AsmJit and it seems now as the most convenient one. ### Runtime & Code-Generators @@ -118,7 +115,7 @@ AsmJit contains two classes that are required to generate a machine code. `Runti ### Instruction Operands -Operand is a part of CPU instruction which specifies the data the instruction will operate on. There are five types of operands in AsmJit: +Operand is a part of an instruction, which specifies the data the instruction will operate on. There are five types of operands in AsmJit: * `Reg` - Physical register, used only by `Assembler` * `Var` - Virtual register, used only by `Compiler` @@ -138,47 +135,52 @@ AsmJit needs to know the prototype of the function it will generate or call. Asm Let's put all together and generate a first function that sums its two arguments and returns the result. At the end the generated function is called from a C++ code. -```C++ +```c++ #include using namespace asmjit; int main(int argc, char* argv[]) { - // Create JitRuntime and X86 Compiler. + // Create JitRuntime and X86 Assembler/Compiler. JitRuntime runtime; - X86Compiler c(&runtime); + X86Assembler a(&runtime); + X86Compiler c(&a); // Build function having two arguments and a return value of type 'int'. - // First type in function builder describes the return value. kFuncConvHost - // tells compiler to use a host calling convention. - c.addFunc(kFuncConvHost, FuncBuilder2()); + // First type in function builder describes the return value. kCallConvHost + // tells the compiler to use the host calling convention. + c.addFunc(FuncBuilder2(kCallConvHost)); // Create 32-bit variables (virtual registers) and assign some names to - // them. Using names is purely optional and only greatly helps while - // debugging. - X86GpVar a(c, kVarTypeInt32, "a"); - X86GpVar b(c, kVarTypeInt32, "b"); + // them. Using variable names is not necessary, however, it can make + // debugging easier. + X86GpVar x = c.newInt32("x"); + X86GpVar y = c.newInt32("y"); // Tell asmjit to use these variables as function arguments. - c.setArg(0, a); - c.setArg(1, b); + c.setArg(0, x); + c.setArg(1, y); - // a = a + b; - c.add(a, b); + // x = x + y; + c.add(x, y); - // Tell asmjit to return 'a'. - c.ret(a); + // Tell asmjit to return `x`. + c.ret(x); // Finalize the current function. c.endFunc(); // Now the Compiler contains the whole function, but the code is not yet - // generated. To tell compiler to generate the function make() has to be - // called. + // generated. To tell the compiler to serialize the code to `Assembler` + // `c.finalize()` has to be called. After finalization the `Compiler` + // won't contain the code anymore and will be detached from the `Assembler`. + c.finalize(); - // Make uses the JitRuntime passed to Compiler constructor to allocate a - // buffer for the function and make it executable. - void* funcPtr = c.make(); + // After finalization the code has been send to `Assembler`. It contains + // a handy method `make()`, which returns a pointer that points to the + // first byte of the generated code, which is the function entry in our + // case. + void* funcPtr = a.make(); // In order to run 'funcPtr' it has to be casted to the desired type. // Typedef is a recommended and safe way to create a function-type. @@ -189,25 +191,25 @@ int main(int argc, char* argv[]) { FuncType func = asmjit_cast(funcPtr); // Finally, run it and do something with the result... - int x = func(1, 2); - printf("x=%d\n", x); // Outputs "x=3". + int z = func(1, 2); + printf("z=%d\n", z); // Outputs "z=3". - // The function will remain in memory after Compiler is destroyed, but - // will be destroyed together with Runtime. This is just simple example - // where we can just destroy both at the end of the scope and that's it. - // However, it's a good practice to clean-up resources after they are - // not needed and using runtime.release() is the preferred way to free - // a function added to JitRuntime. + // The function will remain in memory after Compiler and Assembler are + // destroyed. This is why the `JitRuntime` is used - it keeps track of + // the code generated. When `Runtime` is destroyed it also invalidates + // all code relocated by it (which is in our case also our `func`). So + // it's safe to just do nothing in our case, because destroying `Runtime` + // will free `func` as well, however, it's always better to release the + // generated code that is not needed anymore manually. runtime.release((void*)func); - // Runtime and Compiler will be destroyed at the end of the scope. return 0; } ``` The code should be self explanatory, however there are some details to be clarified. -The code above generates and calls a function of `kFuncConvHost` calling convention. 32-bit architecture contains a wide range of function calling conventions that can be all used by a single program, so it's important to know which calling convention is used by your C/C++ compiler so you can call the function. However, most compilers should generate CDecl by default. In 64-bit mode there are only two calling conventions, one is specific for Windows (Win64 calling convention) and the other for Unix (AMD64 calling convention). The `kFuncConvHost` is defined to be one of CDecl, Win64 or AMD64 depending on your architecture and operating system. +The code above generates and calls a function of `kCallConvHost` calling convention. 32-bit architecture contains a wide range of function calling conventions that can be all used by a single program, so it's important to know which calling convention is used by your C/C++ compiler so you can call the function. However, most compilers should generate CDecl by default. In 64-bit mode there are only two calling conventions, one is specific for Windows (Win64 calling convention) and the other for Unix (AMD64 calling convention). The `kCallConvHost` is defined to be one of CDecl, Win64 or AMD64 depending on your architecture and operating system. Default integer size is platform specific, virtual types `kVarTypeIntPtr` and `kVarTypeUIntPtr` can be used to make the code more portable and they should be always used when a pointer type is needed. When no type is specified AsmJit always defaults to `kVarTypeIntPtr`. The code above works with integers where the default behavior has been overidden to 32-bits. Note it's always a good practice to specify the type of the variable used. Alternative form of creating a variable is `c.newGpVar(...)`, `c.newMmVar(...)`, `c.newXmmVar` and so on... @@ -217,70 +219,72 @@ The function starts with `c.addFunc()` and ends with `c.endFunc()`. It's not all Labels are essential for making jumps, function calls or to refer to a data that is embedded in the code section. Label has to be explicitly created by using `newLabel()` method of your code generator in order to be used. The following example executes a code that depends on the condition by using a `Label` and conditional jump instruction. If the first parameter is zero it returns `a + b`, otherwise `a - b`. -```C++ +```c++ #include using namespace asmjit; int main(int argc, char* argv[]) { JitRuntime runtime; - X86Compiler c(&runtime); + X86Assembler a(&runtime); + X86Compiler c(&a); // This function uses 3 arguments. - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); // New variable 'op' added. - X86GpVar op(c, kVarTypeInt32, "op"); - X86GpVar a(c, kVarTypeInt32, "a"); - X86GpVar b(c, kVarTypeInt32, "b"); + X86GpVar op = c.newInt32("op"); + X86GpVar x = c.newInt32("x"); + X86GpVar y = c.newInt32("y"); c.setArg(0, op); - c.setArg(1, a); - c.setArg(2, b); + c.setArg(1, x); + c.setArg(2, y); // Create labels. - Label L_Subtract(c); - Label L_Skip(c); + Label L_Sub = c.newLabel(); + Label L_Skip = c.newLabel(); // If (op != 0) - // goto L_Subtract; + // goto L_Sub; c.test(op, op); - c.jne(L_Subtract); + c.jne(L_Sub); - // a = a + b; + // x = x + y; // goto L_Skip; - c.add(a, b); + c.add(x, y); c.jmp(L_Skip); - // L_Subtract: - // a = a - b; - c.bind(L_Subtract); - c.sub(a, b); + // L_Sub: + // x = x - y; + c.bind(L_Sub); + c.sub(x, y); // L_Skip: c.bind(L_Skip); - c.ret(a); + c.ret(x); c.endFunc(); + c.finalize(); - // The prototype of the generated function changed also here. + // The prototype of the generated function. typedef int (*FuncType)(int, int, int); - FuncType func = asmjit_cast(c.make()); + FuncType func = asmjit_cast(a.make()); - int x = func(0, 1, 2); - int y = func(1, 1, 2); + int res0 = func(0, 1, 2); + int res1 = func(1, 1, 2); - printf("x=%d\n", x); // Outputs "x=3". - printf("y=%d\n", y); // Outputs "y=-1". + printf("res0=%d\n", res0); // Outputs "res0=3". + printf("res1=%d\n", res1); // Outputs "res1=-1". runtime.release((void*)func); return 0; } ``` -In this example conditional and unconditional jumps were used with labels together. Labels are created explicitly by the `Compiler` by passing a `Compiler` instance to a `Label` constructor or by using a `Label l = c.newLabel()` form. Each label as an unique ID that identifies it, however it's not a string and there is no way to query for a `Label` instance that already exists. Label is like any other operand moved by value, so the copy of the label will still reference the same label and changing a copied label will not change the original label. +In this example conditional and unconditional jumps were used with labels together. Labels have to be created explicitely by `Compiler` by using a `Label L = c.newLabel()` form. Each label as an unique ID that identifies it, however it's not a string and there is no way to query for a `Label` instance that already exists at the moment. Label is like any other operand moved by value, so the copy of the label will still reference the same label and changing a copied label will not change the original label. -Each label has to be bound to the location in the code by using `c.bind()`; however, it can be only bound once! Trying to bind the same label multiple times has undefined behavior - it will trigger an assertion failure in the best case. +Each label has to be bound to the location in the code by using `bind()`; however, it can be bound only once! Trying to bind the same label multiple times has undefined behavior - assertion failure is the best case. ### Memory Addressing @@ -288,62 +292,64 @@ X86/X64 architectures have several memory addressing modes which can be used to In the following example various memory addressing modes are used to demonstrate how to construct and use them. It creates a function that accepts an array and two indexes which specify which elements to sum and return. -```C++ +```c++ #include using namespace asmjit; int main(int argc, char* argv[]) { JitRuntime runtime; - X86Compiler c(&runtime); + X86Assembler a(&runtime); + X86Compiler c(&a); // Function returning 'int' accepting pointer and two indexes. - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); - X86GpVar p(c, kVarTypeIntPtr, "p"); - X86GpVar aIndex(c, kVarTypeIntPtr, "aIndex"); - X86GpVar bIndex(c, kVarTypeIntPtr, "bIndex"); + X86GpVar p = c.newIntPtr("p"); + X86GpVar xIndex = c.newIntPtr("xIndex"); + X86GpVar yIndex = c.newIntPtr("yIndex"); c.setArg(0, p); - c.setArg(1, aIndex); - c.setArg(2, bIndex); + c.setArg(1, xIndex); + c.setArg(2, yIndex); - X86GpVar a(c, kVarTypeInt32, "a"); - X86GpVar b(c, kVarTypeInt32, "b"); + X86GpVar x = c.newInt32("x"); + X86GpVar y = c.newInt32("y"); - // Read 'a' by using a memory operand having base register, index register - // and scale. Translates to 'mov a, dword ptr [p + aIndex << 2]'. - c.mov(a, ptr(p, aIndex, 2)); + // Read `x` by using a memory operand having base register, index register + // and scale. Translates to `mov x, dword ptr [p + xIndex << 2]`. + c.mov(x, x86::ptr(p, xIndex, 2)); - // Read 'b' by using a memory operand having base register only. Variables - // 'p' and 'bIndex' are both modified. + // Read `y` by using a memory operand having base register only. Variables + // `p` and `yIndex` are both modified. // Shift bIndex by 2 (exactly the same as multiplying by 4). // And add scaled 'bIndex' to 'p' resulting in 'p = p + bIndex * 4'. - c.shl(bIndex, 2); - c.add(p, bIndex); + c.shl(yIndex, 2); + c.add(p, yIndex); - // Read 'b'. - c.mov(b, ptr(p)); + // Read `y`. + c.mov(y, x86::ptr(p)); - // a = a + b; - c.add(a, b); + // x = x + y; + c.add(x, y); - c.ret(a); + c.ret(x); c.endFunc(); + c.finalize(); - // The prototype of the generated function changed also here. + // The prototype of the generated function. typedef int (*FuncType)(const int*, intptr_t, intptr_t); - FuncType func = asmjit_cast(c.make()); + FuncType func = asmjit_cast(a.make()); - // Array passed to 'func' - const int array[] = { 1, 2, 3, 5, 8, 13 }; + // Array passed to `func`. + static const int array[] = { 1, 2, 3, 5, 8, 13 }; - int x = func(array, 1, 2); - int y = func(array, 3, 5); + int xVal = func(array, 1, 2); + int yVal = func(array, 3, 5); - printf("x=%d\n", x); // Outputs "x=5". - printf("y=%d\n", y); // Outputs "y=18". + printf("xVal=%d\n", xVal); // Outputs "xVal=5". + printf("yVal=%d\n", yVal); // Outputs "yVal=18". runtime.release((void*)func); return 0; @@ -356,42 +362,44 @@ AsmJit uses stack automatically to spill variables if there is not enough regist In the following example a stack of 256 bytes size is allocated, filled by bytes starting from 0 to 255 and then iterated again to sum all the values. -```C++ +```c++ #include using namespace asmjit; int main(int argc, char* argv[]) { JitRuntime runtime; - X86Compiler c(&runtime); + X86Assembler a(&runtime); + X86Compiler c(&a); // Function returning 'int' without any arguments. - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); - // Allocate a function stack of size 256 aligned to 4 bytes. + // Allocate 256 bytes on the stack aligned to 4 bytes. X86Mem stack = c.newStack(256, 4); - X86GpVar p(c, kVarTypeIntPtr, "p"); - X86GpVar i(c, kVarTypeIntPtr, "i"); + X86GpVar p = c.newIntPtr("p"); + X86GpVar i = c.newIntPtr("i"); - // Load a stack address to 'p'. This step is purely optional and shows - // that 'lea' is useful to load a memory operands address (even absolute) + // Load a stack address to `p`. This step is purely optional and shows + // that `lea` is useful to load a memory operands address (even absolute) // to a general purpose register. c.lea(p, stack); - // Clear 'i'. Notice that xor_() is used instead of xor(), because xor is - // unfortunately a keyword in C++. + // Clear `i`. Notice that `xor_()` is used instead of `xor()` as it's keyword. c.xor_(i, i); + Label L1 = c.newLabel(); + Label L2 = c.newLabel(); + // First loop, fill the stack allocated by a sequence of bytes from 0 to 255. - Label L1(c); c.bind(L1); - // Mov [p + i], i. + // Mov byte ptr[p + i], i. // - // Any operand can be cloned and modified. By cloning 'stack' and calling - // 'setIndex' we created a new memory operand based on stack having an - // index register set. + // Any operand can be cloned and modified. By cloning `stack` and calling + // `setIndex()` we created a new memory operand based on stack having an + // index register assigned to it. c.mov(stack.clone().setIndex(i), i.r8()); // if (++i < 256) @@ -400,20 +408,19 @@ int main(int argc, char* argv[]) { c.cmp(i, 256); c.jb(L1); - // Second loop, sum all bytes stored in 'stack'. - X86GpVar a(c, kVarTypeInt32, "a"); - X86GpVar t(c, kVarTypeInt32, "t"); + // Second loop, sum all bytes stored in `stack`. + X86GpVar sum = c.newInt32("sum"); + X86GpVar val = c.newInt32("val"); c.xor_(i, i); - c.xor_(a, a); + c.xor_(sum, sum); - Label L2(c); c.bind(L2); - // Movzx t, byte ptr [stack + i] - c.movzx(t, stack.clone().setIndex(i).setSize(1)); - // a += t; - c.add(a, t); + // Movzx val, byte ptr [stack + i] + c.movzx(val, stack.clone().setIndex(i).setSize(1)); + // sum += val; + c.add(sum, val); // if (++i < 256) // goto L2; @@ -421,13 +428,14 @@ int main(int argc, char* argv[]) { c.cmp(i, 256); c.jb(L2); - c.ret(a); + c.ret(sum); c.endFunc(); + c.finalize(); typedef int (*FuncType)(void); + FuncType func = asmjit_cast(a.make()); - FuncType func = asmjit_cast(c.make()); - printf("a=%d\n", func()); // Outputs "a=32640". + printf("sum=%d\n", func()); // Outputs "sum=32640". runtime.release((void*)func); return 0; @@ -453,33 +461,34 @@ Loggers can be assigned to any code generator and there is no restriction of ass The following snippet describes how to log into `FILE*`: -```C++ +```c++ // Create logger logging to `stdout`. Logger life-time should always be -// greater than lifetime of the code generator. +// greater than a life-time of the code generator. Alternatively the +// logger can be reset before it's destroyed. FileLogger logger(stdout); -// Create a code generator and assign our logger into it. -X86Compiler c(...); -c.setLogger(&logger); +// Create runtime and assembler and attach a logger to the assembler. +JitRuntime runtime; +X86Assembler a(&runtime); +a.setLogger(&logger); // ... Generate the code ... ``` The following snippet describes how to log into a string: -```C++ +```c++ StringLogger logger; -// Create a code generator and assign our logger into it. -X86Compiler c(...); -c.setLogger(&logger); +JitRuntime runtime; +X86Assembler a(&runtime); +a.setLogger(&logger); // ... Generate the code ... printf("Logger Content:\n%s", logger.getString()); -// You can also use `logger.clearString()` if the logger -// instance will be reused. +// You can use `logger.clearString()` if the intend is to reuse the logger. ``` Logger can be configured to show more information by using `logger.setOption()` method. The following options are available: @@ -496,29 +505,29 @@ Code injection was one of key concepts of Compiler from the beginning. Compiler To manipulate the current cursor use Compiler's `getCursor()` and `setCursor()` methods. The following snippet demonstrates the proper way of code injection. -```C++ +```c++ X86Compiler c(...); -X86GpVar a(c, kVarTypeInt32, "a"); -X86GpVar b(c, kVarTypeInt32, "b"); +X86GpVar x = c.newInt32("x"); +X86GpVar y = c.newInt32("y"); -Node* here = c.getCursor(); -c.mov(b, 2); +ASNode* here = c.getCursor(); +c.mov(y, 2); -// Now, 'here' can be used to inject something before 'mov b, 2'. To inject -// anything it's good to remember the current cursor so it can be set back -// after the injecting is done. When setCursor() is called it returns the old -// cursor. -Node* oldCursor = c.setCursor(here); -c.mov(a, 1); -c.setCursor(oldCursor); +// Now, `here` can be used to inject something before `mov y, 2`. To inject +// something it's always good to remember the current cursor so it can be set +// back after the injecting is done. When `setCursor()` is called it returns +// the old cursor to be remembered. +ASNode* prev = c.setCursor(here); +c.mov(x, 1); +c.setCursor(prev); ``` The resulting code would look like: ``` -c.mov(a, 1); -c.mov(b, 2); +c.mov(x, 1); +c.mov(y, 2); ``` Support diff --git a/cxxconfig.js b/cxxconfig.js new file mode 100644 index 0000000..c82eb74 --- /dev/null +++ b/cxxconfig.js @@ -0,0 +1,16 @@ +module.exports = { + product: "asmjit", + version: "1.0.0", + + prefix: "ASMJIT", + source: "src/asmjit", + + tools: { + NoTabs : true, + NoTrailingLines : true, + NoTrailingSpaces: true, + UnixEOL : true, + SortIncludes : true, + ExpandTemplates : true + } +}; diff --git a/src/asmjit/apibegin.h b/src/asmjit/apibegin.h index cebd2dc..db4fce6 100644 --- a/src/asmjit/apibegin.h +++ b/src/asmjit/apibegin.h @@ -13,14 +13,14 @@ #if !defined(ASMJIT_API_SCOPE) # define ASMJIT_API_SCOPE #else -# error "AsmJit - Api-Scope is already active, previous scope not closed by apiend.h?" +# error "[asmjit] Api-Scope is already active, previous scope not closed by apiend.h?" #endif // ASMJIT_API_SCOPE // ============================================================================ // [Override] // ============================================================================ -#if !defined(ASMJIT_CC_HAS_OVERRIDE) && !defined(override) +#if !ASMJIT_CC_HAS_OVERRIDE && !defined(override) # define override # define ASMJIT_UNDEF_OVERRIDE #endif // !ASMJIT_CC_HAS_OVERRIDE && !override @@ -29,7 +29,7 @@ // [NoExcept] // ============================================================================ -#if !defined(ASMJIT_CC_HAS_NOEXCEPT) && !defined(noexcept) +#if !ASMJIT_CC_HAS_NOEXCEPT && !defined(noexcept) # define noexcept ASMJIT_NOEXCEPT # define ASMJIT_UNDEF_NOEXCEPT #endif // !ASMJIT_CC_HAS_NOEXCEPT && !noexcept diff --git a/src/asmjit/apiend.h b/src/asmjit/apiend.h index 8341594..14f61db 100644 --- a/src/asmjit/apiend.h +++ b/src/asmjit/apiend.h @@ -8,7 +8,7 @@ #if defined(ASMJIT_API_SCOPE) # undef ASMJIT_API_SCOPE #else -# error "AsmJit - Api-Scope not active, forgot to include apibegin.h?" +# error "[asmjit] Api-Scope not active, forgot to include apibegin.h?" #endif // ASMJIT_API_SCOPE // ============================================================================ diff --git a/src/asmjit/asmjit.h b/src/asmjit/asmjit.h index fb6cdf8..df8ec15 100644 --- a/src/asmjit/asmjit.h +++ b/src/asmjit/asmjit.h @@ -12,57 +12,57 @@ // [asmjit_mainpage] // ============================================================================ -//! @mainpage +//! \mainpage //! //! AsmJit - Complete x86/x64 JIT and Remote Assembler for C++. //! -//! AsmJit is a complete JIT and remote assembler for C++ language. It can -//! generate native code for x86 and x64 architectures having support for -//! a full instruction set, from legacy MMX to the newest AVX2. It has a -//! type-safe API that allows C++ compiler to do a semantic checks at -//! compile-time even before the assembled code is generated or run. +//! A complete JIT and remote assembler for C++ language. It can generate native +//! code for x86 and x64 architectures and supports the whole x86/x64 instruction +//! set - from legacy MMX to the newest AVX2. It has a type-safe API that allows +//! C++ compiler to do semantic checks at compile-time even before the assembled +//! code is generated and executed. //! //! AsmJit is not a virtual machine (VM). It doesn't have functionality to //! implement VM out of the box; however, it can be be used as a JIT backend -//! for your own VM. The usage of AsmJit is not limited at all; it's suitable -//! for multimedia, VM backends or remote code generation. +//! of your own VM. The usage of AsmJit is not limited at all; it's suitable +//! for multimedia, VM backends, remote code generation, and many other tasks. //! -//! @section AsmJit_Concepts Code Generation Concepts +//! \section AsmJit_Main_Concepts Code Generation Concepts //! //! AsmJit has two completely different code generation concepts. The difference -//! is in how the code is generated. The first concept, also referred as the low -//! level concept, is called 'Assembler' and it's the same as writing RAW -//! assembly by using physical registers directly. In this case AsmJit does only -//! instruction encoding, verification and relocation. +//! is in how the code is generated. The first concept, also referred as a low +//! level concept, is called `Assembler` and it's the same as writing RAW +//! assembly by inserting instructions that use physical registers directly. In +//! this case AsmJit does only instruction encoding, verification and final code +//! relocation. //! -//! The second concept, also referred as the high level concept, is called -//! 'Compiler'. Compiler lets you use virtually unlimited number of registers -//! (called variables) significantly simplifying the code generation process. -//! Compiler allocates these virtual registers to physical registers after the -//! code generation is done. This requires some extra effort - Compiler has to -//! generate information for each node (instruction, function declaration, -//! function call) in the code, perform a variable liveness analysis and -//! translate the code having variables into code having only registers. +//! The second concept, also referred as a high level concept, is called +//! `Compiler`. Compiler lets you use virtually unlimited number of registers +//! (it calls them variables), which significantly simplifies the code generation +//! process. Compiler allocates these virtual registers to physical registers +//! after the code generation is done. This requires some extra effort - Compiler +//! has to generate information for each node (instruction, function declaration, +//! function call, etc...) in the code, perform a variable liveness analysis and +//! translate the code using variables to a code that uses only physical registers. //! -//! In addition, Compiler understands functions and function calling conventions. +//! In addition, Compiler understands functions and their calling conventions. //! It has been designed in a way that the code generated is always a function -//! having prototype like in a programming language. By having a function -//! prototype the Compiler is able to insert prolog and epilog to a function -//! being generated and it is able to call a function inside a generated one. +//! having a prototype like a real programming language. By having a function +//! prototype the Compiler is able to insert prolog and epilog sequence to the +//! function being generated and it's able to also generate a necessary code +//! to call other function from your own code. //! -//! There is no conclusion on which concept is better. Assembler brings full -//! control on how the code is generated, while Compiler makes the generation -//! more portable. +//! There is no conclusion on which concept is better. `Assembler` brings full +//! control and the best performance, while `Compiler` makes the code-generation +//! more fun and more portable. //! -//! @section AsmJit_Main_CodeGeneration Code Generation +//! \section AsmJit_Main_Sections Documentation Sections //! -//! - \ref asmjit_base_general "Assembler core" - Operands, intrinsics and low-level assembler. -//! - \ref asmjit_compiler "Compiler" - High level code generation. -//! - \ref asmjit_cpuinfo "Cpu Information" - Get information about host processor. -//! - \ref asmjit_logging "Logging" - Logging and error handling. -//! - \ref AsmJit_MemoryManagement "Memory Management" - Virtual memory management. +//! AsmJit documentation is structured into the following sections: +//! - \ref asmjit_base "Base" - Base API (architecture independent). +//! - \ref asmjit_x86 "X86/X64" - X86/X64 API. //! -//! @section AsmJit_Main_HomePage AsmJit Homepage +//! \section AsmJit_Main_HomePage AsmJit Homepage //! //! - https://github.com/kobalicek/asmjit @@ -70,89 +70,116 @@ // [asmjit_base] // ============================================================================ -//! \defgroup asmjit_base AsmJit +//! \defgroup asmjit_base AsmJit Base API (architecture independent) //! -//! \brief AsmJit. - -// ============================================================================ -// [asmjit_base_general] -// ============================================================================ - -//! \defgroup asmjit_base_general AsmJit General API -//! \ingroup asmjit_base +//! \brief Base API. //! -//! \brief AsmJit general API. +//! Base API contains all classes that are platform and architecture independent. //! -//! Contains all `asmjit` classes and helper functions that are architecture -//! independent or abstract. Abstract classes are implemented by the backend, -//! for example `Assembler` is implemented by `X86Assembler`. +//! Code-Generation and Operands +//! ---------------------------- +//! +//! List of the most useful code-generation and operand classes: +//! - \ref asmjit::Assembler - Low-level code-generation. +//! - \ref asmjit::CodeGen - Astract code-generation that serializes to `Assembler`: +//! - \ref asmjit::Compiler - High-level code-generation. +//! - \ref asmjit::Runtime - Describes where the code is stored and how it's executed: +//! - \ref asmjit::HostRuntime - Runtime that runs on the host machine: +//! - \ref asmjit::JitRuntime - Runtime designed for JIT code generation and execution. +//! - \ref asmjit::StaticRuntime - Runtime for code that starts at a specific address. +//! - \ref asmjit::Stream - Stream is a list of \ref HLNode objects stored as a double +//! linked list: +//! - \ref asmjit::HLNode - Base node interface: +//! - \ref asmjit::HLInst - Instruction node. +//! - \ref asmjit::HLData - Data node. +//! - \ref asmjit::HLAlign - Align directive node. +//! - \ref asmjit::HLLabel - Label node. +//! - \ref asmjit::HLComment - Comment node. +//! - \ref asmjit::HLSentinel - Sentinel node. +//! - \ref asmjit::HLHint - Instruction node. +//! - \ref asmjit::HLFunc - Function declaration node. +//! - \ref asmjit::HLRet - Function return node. +//! - \ref asmjit::HLCall - Function call node. +//! - \ref asmjit::HLCallArg - Function call argument node. +//! - \ref asmjit::Operand - base class for all operands: +//! - \ref asmjit::Reg - Register operand (`Assembler` only). +//! - \ref asmjit::Var - Variable operand (`Compiler` only). +//! - \ref asmjit::Mem - Memory operand. +//! - \ref asmjit::Imm - Immediate operand. +//! - \ref asmjit::Label - Label operand. //! -//! - See `Assembler` for low level code generation documentation. -//! - See `Compiler` for high level code generation documentation. -//! - See `Operand` for operand's overview. +//! The following snippet shows how to setup a basic JIT code generation: +//! +//! ~~~ +//! using namespace asmjit; +//! +//! int main(int argc, char* argv[]) { +//! // JIT runtime is designed for JIT code generation and execution. +//! JitRuntime runtime; +//! +//! // Assembler instance requires to know the runtime to function. +//! X86Assembler a(&runtime); +//! +//! // Compiler (if you indend to use it) requires an assembler instance. +//! X86Compiler c(&a); +//! +//! return 0; +//! } +//! ~~~ //! //! Logging and Error Handling //! -------------------------- //! -//! AsmJit contains robust interface that can be used to log the generated code -//! and to handle possible errors. Base logging interface is defined in `Logger` -//! class that is abstract and can be overridden. AsmJit contains two loggers -//! that can be used out of the box - `FileLogger` that logs into a pure C -//! `FILE*` stream and `StringLogger` that just concatenates all log messages -//! by using a `StringBuilder` class. +//! AsmJit contains a robust interface that can be used to log the generated code +//! and to handle possible errors. Base logging interface is provided by \ref +//! Logger, which is abstract and can be used as a base for your own logger. +//! AsmJit also implements some trivial logging concepts out of the box to +//! simplify the development. \ref FileLogger logs into a C `FILE*` stream and +//! \ref StringLogger concatenates all log messages into a single string. //! -//! The following snippet shows how to setup a logger that logs to `stderr`: +//! The following snippet shows how to setup a basic logger and error handler: //! //! ~~~ -//! // `FileLogger` instance. -//! FileLogger logger(stderr); +//! using namespace asmjit; //! -//! // `Compiler` or any other `CodeGen` interface. -//! host::Compiler c; +//! struct MyErrorHandler : public ErrorHandler { +//! virtual bool handleError(Error code, const char* message, void* origin) { +//! printf("Error 0x%0.8X: %s\n", code, message); //! -//! // use `setLogger` to replace the `CodeGen` logger. -//! c.setLogger(&logger); +//! // True - error handled and code generation can continue. +//! // False - error not handled, code generation should stop. +//! return false; +//! } +//! } +//! +//! int main(int argc, char* argv[]) { +//! JitRuntime runtime; +//! FileLogger logger(stderr); +//! MyErrorHandler eh; +//! +//! X86Assembler a(&runtime); +//! a.setLogger(&logger); +//! a.setErrorHandler(&eh); +//! +//! ... +//! +//! return 0; +//! } //! ~~~ //! -//! \sa \ref Logger, \ref FileLogger, \ref StringLogger. - -// ============================================================================ -// [asmjit_base_compiler] -// ============================================================================ - -//! \defgroup asmjit_base_compiler AsmJit Compiler -//! \ingroup asmjit_base +//! AsmJit also contains an \ref ErrorHandler, which is an abstract class that +//! can be used to implement your own error handling. It can be associated with +//! \ref Assembler and used to report all errors. It's a very convenient way to +//! be aware of any error that happens during the code generation without making +//! the error handling complicated. //! -//! \brief AsmJit code-tree used by Compiler. -//! -//! AsmJit intermediate code-tree is a double-linked list that is made of nodes -//! that represent assembler instructions, directives, labels and high-level -//! constructs compiler is using to represent functions and function calls. The -//! node list can only be used together with \ref Compiler. -//! -//! TODO - -// ============================================================================ -// [asmjit_base_util] -// ============================================================================ - -//! \defgroup asmjit_base_util AsmJit Utilities -//! \ingroup asmjit_base -//! -//! \brief AsmJit utility classes. -//! -//! AsmJit contains numerous utility classes that are needed by the library -//! itself. The most useful ones have been made public and are now exported. -//! -//! POD Containers -//! -------------- -//! -//! POD containers are used by AsmJit to manage its own data structures. The -//! following classes can be used by AsmJit consumers: -//! -//! - \ref PodVector - Simple growing array-like container for POD data. -//! - \ref StringBuilder - Simple string builder that can append string -//! and integers. +//! List of the most useful logging and error handling classes: +//! - \ref asmjit::Logger - abstract logging interface: +//! - \ref asmjit::FileLogger - A logger that logs to `FILE*`. +//! - \ref asmjit::StringLogger - A logger that concatenates to a single string. +//! - \ref asmjit::ErrorHandler - Easy way to handle \ref Assembler and \ref +//! Compiler +//! errors. //! //! Zone Memory Allocator //! --------------------- @@ -163,51 +190,60 @@ //! is to increment a pointer and return its previous address. See \ref Zone //! for more details. //! -//! CPU Ticks -//! --------- +//! The whole AsmJit library is based on zone memory allocation for performance +//! reasons. It has many other benefits, but the performance was the main one +//! when designing the library. //! -//! CPU Ticks is a simple helper that can be used to do basic benchmarks. See -//! \ref CpuTicks class for more details. +//! POD Containers +//! -------------- //! -//! Integer Utilities +//! POD containers are used by AsmJit to manage its own data structures. The +//! following classes can be used by AsmJit consumers: +//! +//! - \ref asmjit::BitArray - A fixed bit-array that is used internally. +//! - \ref asmjit::PodVector - A simple array-like container for storing +//! POD data. +//! - \ref asmjit::PodList - A single linked list. +//! - \ref asmjit::StringBuilder - A string builder that can append strings +//! and integers. +//! +//! Utility Functions //! ----------------- //! -//! Integer utilities are all implemented by a static class \ref IntUtil. -//! There are utilities for bit manipulation and bit counting, utilities to get -//! an integer minimum / maximum and various other helpers required to perform -//! alignment checks and binary casting from float to integer and vica versa. +//! Utility functions are implementated static class \ref Utils. There are +//! utilities for bit manipulation and bit counting, utilities to get an +//! integer minimum / maximum and various other helpers required to perform +//! alignment checks and binary casting from float to integer and vice versa. //! -//! Vector Utilities -//! ---------------- +//! String utilities are also implemented by a static class \ref Utils. They +//! are mostly used by AsmJit internals and not really important to end users. +//! +//! SIMD Utilities +//! -------------- //! //! SIMD code generation often requires to embed constants after each function -//! or a block of functions generated. AsmJit contains classes `Vec64`, -//! `Vec128` and `Vec256` that can be used to prepare data useful when -//! generating SIMD code. +//! or at the end of the whole code block. AsmJit contains `Vec64`, `Vec128` +//! and `Vec256` classes that can be used to prepare data useful when generating +//! SIMD code. //! -//! X86/X64 code generator contains member functions `dmm`, `dxmm` and `dymm` +//! X86/X64 code generators contain member functions `dmm`, `dxmm`, and `dymm`, //! which can be used to embed 64-bit, 128-bit and 256-bit data structures into -//! machine code (both assembler and compiler are supported). -//! -//! \note Compiler contains a constant pool, which should be used instead of -//! embedding constants manually after the function body. +//! the machine code. // ============================================================================ // [asmjit_x86] // ============================================================================ -//! \defgroup asmjit_x86 X86/X64 +//! \defgroup asmjit_x86 AsmJit X86/X64 API //! -//! \brief X86/X64 module - -// ============================================================================ -// [asmjit_x86_general] -// ============================================================================ - -//! \defgroup asmjit_x86_general X86/X64 General API -//! \ingroup asmjit_x86 +//! \brief X86/X64 API //! -//! \brief X86/X64 general API. +//! X86/X64 Code Generation +//! ----------------------- +//! +//! X86/X64 code generation is realized throught: +//! - \ref X86Assembler - low-level code generation. +//! - \ref X86Compiler - high-level code generation. //! //! X86/X64 Registers //! ----------------- @@ -216,16 +252,17 @@ //! be used directly (like `eax`, `mm`, `xmm`, ...) or created through //! these functions: //! -//! - `asmjit::gpb_lo()` - Get Gpb-lo register. -//! - `asmjit::gpb_hi()` - Get Gpb-hi register. -//! - `asmjit::gpw()` - Get Gpw register. -//! - `asmjit::gpd()` - Get Gpd register. -//! - `asmjit::gpq()` - Get Gpq Gp register. -//! - `asmjit::gpz()` - Get Gpd/Gpq register. -//! - `asmjit::fp()` - Get Fp register. -//! - `asmjit::mm()` - Get Mm register. -//! - `asmjit::xmm()` - Get Xmm register. -//! - `asmjit::ymm()` - Get Ymm register. +//! - `asmjit::x86::gpb_lo()` - Get an 8-bit Gpb low register. +//! - `asmjit::x86::gpb_hi()` - Get an 8-hi Gpb hugh register. +//! - `asmjit::x86::gpw()` - Get a 16-bit Gpw register. +//! - `asmjit::x86::gpd()` - Get a 32-bit Gpd register. +//! - `asmjit::x86::gpq()` - Get a 64-bit Gpq Gp register. +//! - `asmjit::x86::gpz()` - Get a 32-bit or 64-bit Gpd/Gpq register. +//! - `asmjit::x86::fp()` - Get a 80-bit Fp register. +//! - `asmjit::x86::mm()` - Get a 64-bit Mm register. +//! - `asmjit::x86::xmm()` - Get a 128-bit Xmm register. +//! - `asmjit::x86::ymm()` - Get a 256-bit Ymm register. +//! - `asmjit::x86::amm()` - Get a 512-bit Zmm register. //! //! X86/X64 Addressing //! ------------------ @@ -235,32 +272,33 @@ //! `BaseMem` class. These functions are used to make operands that represents //! memory addresses: //! -//! - `asmjit::ptr()` - Address size not specified. -//! - `asmjit::byte_ptr()` - 1 byte. -//! - `asmjit::word_ptr()` - 2 bytes (Gpw size). -//! - `asmjit::dword_ptr()` - 4 bytes (Gpd size). -//! - `asmjit::qword_ptr()` - 8 bytes (Gpq/Mm size). -//! - `asmjit::tword_ptr()` - 10 bytes (FPU). -//! - `asmjit::oword_ptr()` - 16 bytes (Xmm size). -//! - `asmjit::yword_ptr()` - 32 bytes (Ymm size). -//! - `asmjit::zword_ptr()` - 64 bytes (Zmm size). +//! - `asmjit::x86::ptr()` - Address size not specified. +//! - `asmjit::x86::byte_ptr()` - 1 byte. +//! - `asmjit::x86::word_ptr()` - 2 bytes (Gpw size). +//! - `asmjit::x86::dword_ptr()` - 4 bytes (Gpd size). +//! - `asmjit::x86::qword_ptr()` - 8 bytes (Gpq/Mm size). +//! - `asmjit::x86::tword_ptr()` - 10 bytes (FPU size). +//! - `asmjit::x86::oword_ptr()` - 16 bytes (Xmm size). +//! - `asmjit::x86::yword_ptr()` - 32 bytes (Ymm size). +//! - `asmjit::x86::zword_ptr()` - 64 bytes (Zmm size). //! -//! Most useful function to make pointer should be `asmjit::ptr()`. It creates -//! pointer to the target with unspecified size. Unspecified size works in all -//! intrinsics where are used registers (this means that size is specified by -//! register operand or by instruction itself). For example `asmjit::ptr()` -//! can't be used with `Assembler::inc()` instruction. In this case size must -//! be specified and it's also reason to make difference between pointer sizes. +//! Most useful function to make pointer should be `asmjit::x86::ptr()`. It +//! creates a pointer to the target with an unspecified size. Unspecified size +//! works in all intrinsics where are used registers (this means that size is +//! specified by register operand or by instruction itself). For example +//! `asmjit::x86::ptr()` can't be used with `Assembler::inc()` instruction. In +//! this case the size must be specified and it's also reason to differentiate +//! between pointer sizes. //! -//! Supported are simple address forms `[base + displacement]` and complex -//! address forms `[base + index * scale + displacement]`. +//! X86 and X86 support simple address forms like `[base + displacement]` and +//! also complex address forms like `[base + index * scale + displacement]`. //! //! X86/X64 Immediates //! ------------------ //! //! Immediate values are constants thats passed directly after instruction -//! opcode. To create such value use `imm()` or `imm_u()` methods to create -//! signed or unsigned immediate value. +//! opcode. To create such value use `asmjit::imm()` or `asmjit::imm_u()` +//! methods to create a signed or unsigned immediate value. //! //! X86/X64 CPU Information //! ----------------------- @@ -290,7 +328,7 @@ //! use certain CPU features. For example there used to be a SSE/SSE2 detection //! in the past and today there is often AVX/AVX2 detection. //! -//! The example below shows how to detect SSE2: +//! The example below shows how to detect SSE4.1: //! //! ~~~ //! using namespace asmjit; @@ -298,11 +336,11 @@ //! // Get `X86CpuInfo` global instance. //! const X86CpuInfo* cpuInfo = X86CpuInfo::getHost(); //! -//! if (cpuInfo->hasFeature(kX86CpuFeatureSSE2)) { -//! // Processor has SSE2. +//! if (cpuInfo->hasFeature(kX86CpuFeatureSSE4_1)) { +//! // Processor has SSE4.1. //! } -//! else if (cpuInfo->hasFeature(kX86CpuFeatureMMX)) { -//! // Processor doesn't have SSE2, but has MMX. +//! else if (cpuInfo->hasFeature(kX86CpuFeatureSSE2)) { +//! // Processor doesn't have SSE4.1, but has SSE2. //! } //! else { //! // Processor is archaic; it's a wonder AsmJit works here! @@ -314,56 +352,21 @@ //! ~~~ //! using namespace asmjit; //! -//! // Call cpuid, first two arguments are passed in Eax/Ecx. +//! // Call CPUID, first two arguments are passed in EAX/ECX. //! X86CpuId out; //! X86CpuUtil::callCpuId(0, 0, &out); //! -//! // If Eax argument is 0, Ebx, Ecx and Edx registers are filled with a cpu vendor. +//! // If EAX argument is 0, EBX, ECX and EDX registers are filled with a CPU vendor. //! char cpuVendor[13]; //! ::memcpy(cpuVendor, &out.ebx, 4); //! ::memcpy(cpuVendor + 4, &out.edx, 4); //! ::memcpy(cpuVendor + 8, &out.ecx, 4); //! vendor[12] = '\0'; //! -//! // Print a CPU vendor retrieved from CPUID. -//! ::printf("%s", cpuVendor); +//! // Print the CPU vendor retrieved from CPUID. +//! ::printf("CPU Vendor: %s\n", cpuVendor); //! ~~~ -// ============================================================================ -// [asmjit_x86_compiler] -// ============================================================================ - -//! \defgroup asmjit_x86_compiler X86/X64 Code-Tree -//! \ingroup asmjit_x86 -//! -//! \brief X86/X64 code-tree and helpers. - -// ============================================================================ -// [asmjit_x86_inst] -// ============================================================================ - -//! \defgroup asmjit_x86_inst X86/X64 Instructions -//! \ingroup asmjit_x86 -//! -//! \brief X86/X64 low-level instruction definitions. - -// ============================================================================ -// [asmjit_x86_util] -// ============================================================================ - -//! \defgroup asmjit_x86_util X86/X64 Utilities -//! \ingroup asmjit_x86 -//! -//! \brief X86/X64 utility classes. - -// ============================================================================ -// [asmjit_contrib] -// ============================================================================ - -//! \defgroup asmjit_contrib Contributions -//! -//! \brief Contributions. - // [Dependencies - Base] #include "./base.h" diff --git a/src/asmjit/base.h b/src/asmjit/base.h index 73ec4b3..774fa8c 100644 --- a/src/asmjit/base.h +++ b/src/asmjit/base.h @@ -12,23 +12,23 @@ #include "./build.h" #include "./base/assembler.h" -#include "./base/codegen.h" -#include "./base/compiler.h" #include "./base/constpool.h" #include "./base/containers.h" #include "./base/cpuinfo.h" -#include "./base/cputicks.h" -#include "./base/error.h" #include "./base/globals.h" -#include "./base/intutil.h" -#include "./base/lock.h" #include "./base/logger.h" #include "./base/operand.h" #include "./base/runtime.h" -#include "./base/string.h" +#include "./base/utils.h" #include "./base/vectypes.h" #include "./base/vmem.h" #include "./base/zone.h" +#if !defined(ASMJIT_DISABLE_COMPILER) +#include "./base/hlstream.h" +#include "./base/compiler.h" +#include "./base/compilerfunc.h" +#endif // !ASMJIT_DISABLE_COMPILER + // [Guard] #endif // _ASMJIT_BASE_H diff --git a/src/asmjit/base/assembler.cpp b/src/asmjit/base/assembler.cpp index 7982006..e0d3a19 100644 --- a/src/asmjit/base/assembler.cpp +++ b/src/asmjit/base/assembler.cpp @@ -9,7 +9,7 @@ // [Dependencies - AsmJit] #include "../base/assembler.h" -#include "../base/intutil.h" +#include "../base/utils.h" #include "../base/vmem.h" // [Dependenceis - C] @@ -21,35 +21,116 @@ namespace asmjit { // ============================================================================ -// [asmjit::Assembler - Construction / Destruction] +// [asmjit::ErrorHandler] // ============================================================================ -Assembler::Assembler(Runtime* runtime) : - CodeGen(runtime), - _buffer(NULL), - _end(NULL), - _cursor(NULL), - _trampolineSize(0), - _comment(NULL), - _unusedLinks(NULL) {} +ErrorHandler::ErrorHandler() {} +ErrorHandler::~ErrorHandler() {} -Assembler::~Assembler() { - reset(true); +ErrorHandler* ErrorHandler::addRef() const { + return const_cast(this); +} +void ErrorHandler::release() {} + +// ============================================================================ +// [asmjit::CodeGen] +// ============================================================================ + +CodeGen::CodeGen() + : _assembler(NULL), + _hlId(0), + _arch(kArchNone), + _regSize(0), + _finalized(false), + _reserved(0), + _lastError(kErrorNotInitialized) {} +CodeGen::~CodeGen() {} + +Error CodeGen::setLastError(Error error, const char* message) { + // Special case, reset the last error the error is `kErrorOk`. + if (error == kErrorOk) { + _lastError = kErrorOk; + return kErrorOk; + } + + // Don't do anything if the code-generator doesn't have associated assembler. + Assembler* assembler = getAssembler(); + if (assembler == NULL) + return error; + + if (message == NULL) + message = DebugUtils::errorAsString(error); + + // Logging is skipped if the error is handled by `ErrorHandler. + ErrorHandler* eh = assembler->getErrorHandler(); + ASMJIT_TLOG("[ERROR (CodeGen)] %s (0x%0.8u) %s\n", message, + static_cast(error), + !eh ? "(Possibly unhandled?)" : ""); + + if (eh != NULL && eh->handleError(error, message, this)) + return error; + +#if !defined(ASMJIT_DISABLE_LOGGER) + Logger* logger = assembler->getLogger(); + if (logger != NULL) + logger->logFormat(kLoggerStyleComment, + "*** ERROR (CodeGen): %s (0x%0.8u).\n", message, + static_cast(error)); +#endif // !ASMJIT_DISABLE_LOGGER + + // The handler->handleError() function may throw an exception or longjmp() + // to terminate the execution of `setLastError()`. This is the reason why + // we have delayed changing the `_error` member until now. + _lastError = error; + return error; } // ============================================================================ -// [asmjit::Assembler - Clear / Reset] +// [asmjit::Assembler - Construction / Destruction] +// ============================================================================ + +Assembler::Assembler(Runtime* runtime) + : _runtime(runtime), + _logger(NULL), + _errorHandler(NULL), + _arch(kArchNone), + _regSize(0), + _reserved(0), + _features(Utils::mask(kAssemblerFeatureOptimizedAlign)), + _instOptions(0), + _lastError(runtime ? kErrorOk : kErrorNotInitialized), + _hlIdGenerator(0), + _hlAttachedCount(0), + _zoneAllocator(8192 - Zone::kZoneOverhead), + _buffer(NULL), + _end(NULL), + _cursor(NULL), + _trampolinesSize(0), + _comment(NULL), + _unusedLinks(NULL), + _labelList(), + _relocList() {} + +Assembler::~Assembler() { + reset(true); + + if (_errorHandler != NULL) + _errorHandler->release(); +} + +// ============================================================================ +// [asmjit::Assembler - Reset] // ============================================================================ void Assembler::reset(bool releaseMemory) { - // CodeGen members. - _baseAddress = kNoBaseAddress; + _features = Utils::mask(kAssemblerFeatureOptimizedAlign); _instOptions = 0; - _error = kErrorOk; + _lastError = kErrorOk; + _hlIdGenerator = 0; + _hlAttachedCount = 0; - _baseZone.reset(releaseMemory); + _zoneAllocator.reset(releaseMemory); - // Assembler members. if (releaseMemory && _buffer != NULL) { ASMJIT_FREE(_buffer); _buffer = NULL; @@ -57,7 +138,7 @@ void Assembler::reset(bool releaseMemory) { } _cursor = _buffer; - _trampolineSize = 0; + _trampolinesSize = 0; _comment = NULL; _unusedLinks = NULL; @@ -66,6 +147,57 @@ void Assembler::reset(bool releaseMemory) { _relocList.reset(releaseMemory); } +// ============================================================================ +// [asmjit::Assembler - Logging & Error Handling] +// ============================================================================ + +Error Assembler::setLastError(Error error, const char* message) { + // Special case, reset the last error the error is `kErrorOk`. + if (error == kErrorOk) { + _lastError = kErrorOk; + return kErrorOk; + } + + if (message == NULL) + message = DebugUtils::errorAsString(error); + + // Logging is skipped if the error is handled by `ErrorHandler. + ErrorHandler* eh = _errorHandler; + ASMJIT_TLOG("[ERROR (Assembler)] %s (0x%0.8u) %s\n", message, + static_cast(error), + !eh ? "(Possibly unhandled?)" : ""); + + if (eh != NULL && eh->handleError(error, message, this)) + return error; + +#if !defined(ASMJIT_DISABLE_LOGGER) + Logger* logger = _logger; + if (logger != NULL) + logger->logFormat(kLoggerStyleComment, + "*** ERROR (Assembler): %s (0x%0.8u).\n", message, + static_cast(error)); +#endif // !ASMJIT_DISABLE_LOGGER + + // The handler->handleError() function may throw an exception or longjmp() + // to terminate the execution of `setLastError()`. This is the reason why + // we have delayed changing the `_error` member until now. + _lastError = error; + return error; +} + +Error Assembler::setErrorHandler(ErrorHandler* handler) { + ErrorHandler* oldHandler = _errorHandler; + + if (oldHandler != NULL) + oldHandler->release(); + + if (handler != NULL) + handler = handler->addRef(); + + _errorHandler = handler; + return kErrorOk; +} + // ============================================================================ // [asmjit::Assembler - Buffer] // ============================================================================ @@ -75,8 +207,8 @@ Error Assembler::_grow(size_t n) { size_t after = getOffset() + n; // Overflow. - if (n > IntUtil::maxUInt() - capacity) - return setError(kErrorNoHeapMemory); + if (n > IntTraits::maxValue() - capacity) + return setLastError(kErrorNoHeapMemory); // Grow is called when allocation is needed, so it shouldn't happen, but on // the other hand it is simple to catch and it's not an error. @@ -98,7 +230,7 @@ Error Assembler::_grow(size_t n) { // Overflow. if (oldCapacity > capacity) - return setError(kErrorNoHeapMemory); + return setLastError(kErrorNoHeapMemory); } while (capacity - kMemAllocOverhead < after); capacity -= kMemAllocOverhead; @@ -117,7 +249,7 @@ Error Assembler::_reserve(size_t n) { newBuffer = static_cast(ASMJIT_REALLOC(_buffer, n)); if (newBuffer == NULL) - return setError(kErrorNoHeapMemory); + return setLastError(kErrorNoHeapMemory); size_t offset = getOffset(); @@ -132,41 +264,23 @@ Error Assembler::_reserve(size_t n) { // [asmjit::Assembler - Label] // ============================================================================ -Error Assembler::_registerIndexedLabels(size_t index) { - size_t i = _labelList.getLength(); - if (index < i) - return kErrorOk; +Error Assembler::_newLabelId() { + LabelData* data = _zoneAllocator.allocT(); - if (_labelList._grow(index - i) != kErrorOk) - return setError(kErrorNoHeapMemory); + data->offset = -1; + data->links = NULL; + data->hlId = 0; + data->hlData = NULL; - LabelData data; - data.offset = -1; - data.links = NULL; + uint32_t id = OperandUtil::makeLabelId(static_cast(_labelList.getLength())); + Error error = _labelList.append(data); - do { - _labelList.append(data); - } while (++i < index); + if (error != kErrorOk) { + setLastError(kErrorNoHeapMemory); + return kInvalidValue; + } - return kErrorOk; -} - -Error Assembler::_newLabel(Label* dst) { - dst->_label.op = kOperandTypeLabel; - dst->_label.size = 0; - dst->_label.id = OperandUtil::makeLabelId(static_cast(_labelList.getLength())); - - LabelData data; - data.offset = -1; - data.links = NULL; - - if (_labelList.append(data) != kErrorOk) - goto _NoMemory; - return kErrorOk; - -_NoMemory: - dst->_label.id = kInvalidValue; - return setError(kErrorNoHeapMemory); + return id; } LabelLink* Assembler::_newLabelLink() { @@ -176,7 +290,7 @@ LabelLink* Assembler::_newLabelLink() { _unusedLinks = link->prev; } else { - link = _baseZone.allocT(); + link = _zoneAllocator.allocT(); if (link == NULL) return NULL; } @@ -196,11 +310,11 @@ Error Assembler::bind(const Label& label) { // Label can be bound only once. if (data->offset != -1) - return setError(kErrorLabelAlreadyBound); + return setLastError(kErrorLabelAlreadyBound); #if !defined(ASMJIT_DISABLE_LOGGER) if (_logger) { - StringBuilderT<256> sb; + StringBuilderTmp<256> sb; sb.setFormat("L%u:", index); size_t binSize = 0; @@ -241,7 +355,7 @@ Error Assembler::bind(const Label& label) { } else { ASMJIT_ASSERT(size == 1); - if (IntUtil::isInt8(patchedValue)) + if (Utils::isInt8(patchedValue)) setByteAt(offset, static_cast(patchedValue & 0xFF)); else error = kErrorIllegalDisplacement; @@ -267,7 +381,7 @@ Error Assembler::bind(const Label& label) { data->links = NULL; if (error != kErrorOk) - return setError(error); + return setLastError(error); _comment = NULL; return error; @@ -281,7 +395,7 @@ Error Assembler::embed(const void* data, uint32_t size) { if (getRemainingSpace() < size) { Error error = _grow(size); if (error != kErrorOk) - return setError(error); + return setLastError(error); } uint8_t* cursor = getCursor(); @@ -302,10 +416,7 @@ Error Assembler::embed(const void* data, uint32_t size) { size_t Assembler::relocCode(void* dst, Ptr baseAddress) const { if (baseAddress == kNoBaseAddress) - baseAddress = hasBaseAddress() ? getBaseAddress() : static_cast((uintptr_t)dst); - else if (getBaseAddress() != baseAddress) - return 0; - + baseAddress = static_cast((uintptr_t)dst); return _relocCode(dst, baseAddress); } @@ -315,14 +426,14 @@ size_t Assembler::relocCode(void* dst, Ptr baseAddress) const { void* Assembler::make() { // Do nothing on error condition or if no instruction has been emitted. - if (_error != kErrorOk || getCodeSize() == 0) + if (_lastError != kErrorOk || getCodeSize() == 0) return NULL; void* p; Error error = _runtime->add(&p, this); if (error != kErrorOk) - setError(error); + setLastError(error); return p; } @@ -349,6 +460,10 @@ Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const return _emit(code, o0, o1, o2, NA); } +Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { + return _emit(code, o0, o1, o2, o3); +} + Error Assembler::emit(uint32_t code, int o0) { return _emit(code, Imm(o0), NA, NA, NA); } diff --git a/src/asmjit/base/assembler.h b/src/asmjit/base/assembler.h index 402d5a1..43a6d45 100644 --- a/src/asmjit/base/assembler.h +++ b/src/asmjit/base/assembler.h @@ -9,9 +9,7 @@ #define _ASMJIT_BASE_ASSEMBLER_H // [Dependencies - AsmJit] -#include "../base/codegen.h" #include "../base/containers.h" -#include "../base/error.h" #include "../base/logger.h" #include "../base/operand.h" #include "../base/runtime.h" @@ -22,9 +20,51 @@ namespace asmjit { -//! \addtogroup asmjit_base_general +//! \addtogroup asmjit_base //! \{ +// ============================================================================ +// [asmjit::AssemblerFeatures] +// ============================================================================ + +//! Features of \ref Assembler. +ASMJIT_ENUM(AssemblerFeatures) { + //! Emit optimized code-alignment sequences (`Assembler` and `Compiler`). + //! + //! Default `true`. + //! + //! X86/X64 + //! ------- + //! + //! Default align sequence used by X86/X64 architecture is one-byte 0x90 + //! opcode that is mostly shown by disassemblers as nop. However there are + //! more optimized align sequences for 2-11 bytes that may execute faster. + //! If this feature is enabled asmjit will generate specialized sequences + //! for alignment between 1 to 11 bytes. Also when `X86Compiler` is used, + //! it can add REX prefixes into the code to make some instructions greater + //! so no alignment sequence is needed. + kAssemblerFeatureOptimizedAlign = 0, + + //! Emit jump-prediction hints (`Assembler` and `Compiler`). + //! + //! Default `false`. + //! + //! X86/X64 + //! ------- + //! + //! Jump prediction is usually based on the direction of the jump. If the + //! jump is backward it is usually predicted as taken; and if the jump is + //! forward it is usually predicted as not-taken. The reason is that loops + //! generally use backward jumps and conditions usually use forward jumps. + //! However this behavior can be overridden by using instruction prefixes. + //! If this option is enabled these hints will be emitted. + //! + //! This feature is disabled by default, because the only processor that + //! used to take into consideration prediction hints was P4. Newer processors + //! implement heuristics for branch prediction that ignores any static hints. + kAssemblerFeaturePredictedJumps = 1 +}; + // ============================================================================ // [asmjit::InstId] // ============================================================================ @@ -44,30 +84,43 @@ ASMJIT_ENUM(InstOptions) { //! No instruction options. kInstOptionNone = 0x00000000, - //! Emit short form of the instruction. + //! Emit short form of the instruction (X86/X64 only). //! - //! X86/X64: + //! X86/X64 Specific + //! ---------------- //! //! Short form is mostly related to jmp and jcc instructions, but can be used //! by other instructions supporting 8-bit or 32-bit immediates. This option //! can be dangerous if the short jmp/jcc is required, but not encodable due - //! to large displacement, in such case an error happens and the whole - //! assembler/compiler stream is unusable. + //! to a large displacement, in such case an error is reported. kInstOptionShortForm = 0x00000001, - //! Emit long form of the instruction. + //! Emit long form of the instruction (X86/X64 only). //! - //! X86/X64: + //! X86/X64 Specific + //! ---------------- //! - //! Long form is mosrlt related to jmp and jcc instructions, but like the + //! Long form is mostly related to jmp and jcc instructions, but like the //! `kInstOptionShortForm` option it can be used by other instructions //! supporting both 8-bit and 32-bit immediates. kInstOptionLongForm = 0x00000002, //! Condition is likely to be taken. + //! + //! X86/X64 Specific + //! ---------------- + //! + //! This option has no effect at the moment. Intel stopped supporting + //! conditional hints after P4 and AMD has never supported them. kInstOptionTaken = 0x00000004, //! Condition is unlikely to be taken. + //! + //! X86/X64 Specific + //! ---------------- + //! + //! This option has no effect at the moment. Intel stopped supporting + //! conditional hints after P4 and AMD has never supported them. kInstOptionNotTaken = 0x00000008, //! Don't follow the jump (Compiler-only). @@ -76,6 +129,36 @@ ASMJIT_ENUM(InstOptions) { kInstOptionUnfollow = 0x00000010 }; +// ============================================================================ +// [asmjit::AlignMode] +// ============================================================================ + +//! Code aligning mode. +ASMJIT_ENUM(AlignMode) { + //! Align by emitting a sequence that can be executed (code). + kAlignCode = 0, + //! Align by emitting a sequence that shouldn't be executed (data). + kAlignData = 1, + //! Align by emitting a sequence of zeros. + kAlignZero = 2 +}; + +// ============================================================================ +// [asmjit::RelocMode] +// ============================================================================ + +//! Relocation mode. +ASMJIT_ENUM(RelocMode) { + //! Relocate an absolute address to an absolute address. + kRelocAbsToAbs = 0, + //! Relocate a relative address to an absolute address. + kRelocRelToAbs = 1, + //! Relocate an absolute address to a relative address. + kRelocAbsToRel = 2, + //! Relocate an absolute address to a relative address or use trampoline. + kRelocTrampoline = 3 +}; + // ============================================================================ // [asmjit::LabelLink] // ============================================================================ @@ -90,7 +173,7 @@ struct LabelLink { intptr_t offset; //! Inlined displacement. intptr_t displacement; - //! RelocId if link must be absolute when relocated. + //! RelocId in case the link has to be absolute after relocated. intptr_t relocId; }; @@ -106,17 +189,24 @@ struct LabelData { intptr_t offset; //! Label links chain. LabelLink* links; + + //! An ID of a code-generator that created this label. + uint64_t hlId; + //! Pointer to the data the code-generator associated with the label. + void* hlData; }; + // ============================================================================ // [asmjit::RelocData] // ============================================================================ //! \internal //! -//! Code relocation data (relative vs absolute addresses). +//! Code relocation data (relative vs. absolute addresses). //! -//! X86/X64: +//! X86/X64 Specific +//! ---------------- //! //! X86 architecture uses 32-bit absolute addressing model by memory operands, //! but 64-bit mode uses relative addressing model (RIP + displacement). In @@ -135,17 +225,198 @@ struct RelocData { Ptr data; }; +// ============================================================================ +// [asmjit::ErrorHandler] +// ============================================================================ + +//! Error handler. +//! +//! Error handler can be used to override the default behavior of `CodeGen` +//! error handling and propagation. See `handleError()` on how to override it. +//! +//! Please note that `addRef` and `release` functions are used, but there is +//! no reference counting implemented by default, reimplement to change the +//! default behavior. +struct ASMJIT_VIRTAPI ErrorHandler { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `ErrorHandler` instance. + ASMJIT_API ErrorHandler(); + //! Destroy the `ErrorHandler` instance. + ASMJIT_API virtual ~ErrorHandler(); + + // -------------------------------------------------------------------------- + // [AddRef / Release] + // -------------------------------------------------------------------------- + + //! Reference this error handler. + //! + //! \note This member function is provided for convenience. The default + //! implementation does nothing. If you are working in environment where + //! multiple `ErrorHandler` instances are used by a different code generators + //! you may provide your own functionality for reference counting. In that + //! case `addRef()` and `release()` functions should be overridden. + ASMJIT_API virtual ErrorHandler* addRef() const; + + //! Release this error handler. + //! + //! \note This member function is provided for convenience. See `addRef()` + //! for more detailed information related to reference counting. + ASMJIT_API virtual void release(); + + // -------------------------------------------------------------------------- + // [Handle Error] + // -------------------------------------------------------------------------- + + //! Error handler (pure). + //! + //! Error handler is called when an error happened. An error can happen in + //! many places, but error handler is mostly used by `Assembler` and + //! `Compiler` classes to report anything that may cause incorrect code + //! generation. There are multiple ways how the error handler can be used + //! and each has it's pros/cons. + //! + //! AsmJit library doesn't use exceptions and can be compiled with or without + //! exception handling support. Even if the AsmJit library is compiled without + //! exceptions it is exception-safe and handleError() can report an incoming + //! error by throwing an exception of any type. It's guaranteed that the + //! exception won't be catched by AsmJit and will be propagated to the code + //! calling AsmJit `Assembler` or `Compiler` methods. Alternative to + //! throwing an exception is using `setjmp()` and `longjmp()` pair available + //! in the standard C library. + //! + //! If the exception or setjmp() / longjmp() mechanism is used, the state of + //! the `BaseAssember` or `Compiler` is unchanged and if it's possible the + //! execution (instruction serialization) can continue. However if the error + //! happened during any phase that translates or modifies the stored code + //! (for example relocation done by `Assembler` or analysis/translation + //! done by `Compiler`) the execution can't continue and the error will + //! be also stored in `Assembler` or `Compiler`. + //! + //! Finally, if no exceptions nor setjmp() / longjmp() mechanisms were used, + //! you can still implement a compatible handling by returning from your + //! error handler. Returning `true` means that error was reported and AsmJit + //! should continue execution, but `false` sets the error immediately to the + //! `Assembler` or `Compiler` and execution shouldn't continue (this is the + //! default behavior in case no error handler is used). + virtual bool handleError(Error code, const char* message, void* origin) = 0; +}; + +// ============================================================================ +// [asmjit::CodeGen] +// ============================================================================ + +//! Interface to implement an external code generator (i.e. `Compiler`). +struct ASMJIT_VIRTAPI CodeGen { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_API CodeGen(); + ASMJIT_API virtual ~CodeGen(); + + // -------------------------------------------------------------------------- + // [Attach / Reset] + // -------------------------------------------------------------------------- + + //! \internal + //! + //! Called to attach this code generator to the `assembler`. + virtual Error attach(Assembler* assembler) = 0; + + //! Reset the code-generator (also detaches if attached). + virtual void reset(bool releaseMemory) = 0; + + // -------------------------------------------------------------------------- + // [Finalize] + // -------------------------------------------------------------------------- + + //! Finalize the code-generation. + //! + //! The finalization has two passes: + //! - serializes code to the attached assembler. + //! - resets the `CodeGen` (detaching from the `Assembler as well) so it can + //! be reused or destroyed. + virtual Error finalize() = 0; + + // -------------------------------------------------------------------------- + // [Runtime / Assembler] + // -------------------------------------------------------------------------- + + //! Get the `Runtime` instance that is associated with the code-generator. + ASMJIT_INLINE Runtime* getRuntime() const { return _runtime; } + //! Get the `Assembler` instance that is associated with the code-generator. + ASMJIT_INLINE Assembler* getAssembler() const { return _assembler; } + + // -------------------------------------------------------------------------- + // [Architecture] + // -------------------------------------------------------------------------- + + //! Get the target architecture. + ASMJIT_INLINE uint32_t getArch() const { return _arch; } + //! Get the default register size - 4 or 8 bytes, depends on the target. + ASMJIT_INLINE uint32_t getRegSize() const { return _regSize; } + + // -------------------------------------------------------------------------- + // [Error Handling] + // -------------------------------------------------------------------------- + + //! Get the last error code. + ASMJIT_INLINE Error getLastError() const { return _lastError; } + //! Set the last error code and propagate it through the error handler. + ASMJIT_API Error setLastError(Error error, const char* message = NULL); + //! Clear the last error code. + ASMJIT_INLINE void resetLastError() { _lastError = kErrorOk; } + + // -------------------------------------------------------------------------- + // [CodeGen] + // -------------------------------------------------------------------------- + + //! Get the code-generator ID, provided by `Assembler` when attached to it. + ASMJIT_INLINE uint64_t getHLId() const { return _hlId; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Associated runtime. + Runtime* _runtime; + //! Associated assembler. + Assembler* _assembler; + + //! High-level ID, provided by `Assembler`. + //! + //! If multiple high-evel code generators are associated with a single + //! assembler the `_hlId` member can be used to distinguish between them and + //! to provide a mechanism to check whether the high-level code generator is + //! accessing the resource it really owns. + uint64_t _hlId; + + //! Target architecture ID. + uint8_t _arch; + //! Target architecture GP register size in bytes (4 or 8). + uint8_t _regSize; + //! The code generator has been finalized. + uint8_t _finalized; + //! \internal + uint8_t _reserved; + //! Last error code. + uint32_t _lastError; +}; + // ============================================================================ // [asmjit::Assembler] // ============================================================================ //! Base assembler. //! -//! This class implements the base interface to an assembler. The architecture -//! specific API is implemented by backends. +//! This class implements the base interface that is used by architecture +//! specific assemblers. //! //! \sa Compiler. -struct ASMJIT_VCLASS Assembler : public CodeGen { +struct ASMJIT_VIRTAPI Assembler { ASMJIT_NO_COPY(Assembler) // -------------------------------------------------------------------------- @@ -167,35 +438,160 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { ASMJIT_API void reset(bool releaseMemory = false); // -------------------------------------------------------------------------- - // [Buffer] + // [Runtime] // -------------------------------------------------------------------------- - //! Get capacity of the code buffer. - ASMJIT_INLINE size_t getCapacity() const { - return (size_t)(_end - _buffer); + //! Get the runtime associated with the assembler. + //! + //! NOTE: Runtime is persistent across `reset()` calls. + ASMJIT_INLINE Runtime* getRuntime() const { return _runtime; } + + // -------------------------------------------------------------------------- + // [Architecture] + // -------------------------------------------------------------------------- + + //! Get the target architecture. + ASMJIT_INLINE uint32_t getArch() const { return _arch; } + //! Get the default register size - 4 or 8 bytes, depends on the target. + ASMJIT_INLINE uint32_t getRegSize() const { return _regSize; } + + // -------------------------------------------------------------------------- + // [Logging] + // -------------------------------------------------------------------------- + +#if !defined(ASMJIT_DISABLE_LOGGER) + //! Get whether the assembler has a logger. + ASMJIT_INLINE bool hasLogger() const { return _logger != NULL; } + //! Get the logger. + ASMJIT_INLINE Logger* getLogger() const { return _logger; } + //! Set the logger to `logger`. + ASMJIT_INLINE void setLogger(Logger* logger) { _logger = logger; } +#endif // !ASMJIT_DISABLE_LOGGER + + // -------------------------------------------------------------------------- + // [Error Handling] + // -------------------------------------------------------------------------- + + //! Get the error handler. + ASMJIT_INLINE ErrorHandler* getErrorHandler() const { return _errorHandler; } + //! Set the error handler. + ASMJIT_API Error setErrorHandler(ErrorHandler* handler); + //! Clear the error handler. + ASMJIT_INLINE Error resetErrorHandler() { return setErrorHandler(NULL); } + + //! Get the last error code. + ASMJIT_INLINE Error getLastError() const { return _lastError; } + //! Set the last error code and propagate it through the error handler. + ASMJIT_API Error setLastError(Error error, const char* message = NULL); + //! Clear the last error code. + ASMJIT_INLINE void resetLastError() { _lastError = kErrorOk; } + + // -------------------------------------------------------------------------- + // [External CodeGen] + // -------------------------------------------------------------------------- + + //! \internal + //! + //! Called after the code generator `cg` has been attached to the assembler. + ASMJIT_INLINE void _attached(CodeGen* cg) { + cg->_runtime = getRuntime(); + cg->_assembler = this; + cg->_hlId = _nextExternalId(); + _hlAttachedCount++; } - //! Get the number of remaining bytes (space between cursor and the end of - //! the buffer). - ASMJIT_INLINE size_t getRemainingSpace() const { - return (size_t)(_end - _cursor); + //! \internal + //! + //! Called after the code generator `cg` has been detached from the assembler. + ASMJIT_INLINE void _detached(CodeGen* cg) { + cg->_runtime = NULL; + cg->_assembler = NULL; + cg->_hlId = 0; + _hlAttachedCount--; } - //! Get buffer. - ASMJIT_INLINE uint8_t* getBuffer() const { - return _buffer; + //! \internal + //! + //! Return a new code-gen ID (always greater than zero). + ASMJIT_INLINE uint64_t _nextExternalId() { + ASMJIT_ASSERT(_hlIdGenerator != ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF)); + return ++_hlIdGenerator; } - //! Get the end of the buffer (points to the first byte that is outside). - ASMJIT_INLINE uint8_t* getEnd() const { - return _end; + // -------------------------------------------------------------------------- + // [Assembler Features] + // -------------------------------------------------------------------------- + + //! Get code-generator features. + ASMJIT_INLINE uint32_t getFeatures() const { return _features; } + //! Set code-generator features. + ASMJIT_INLINE void setFeatures(uint32_t features) { _features = features; } + + //! Get code-generator `feature`. + ASMJIT_INLINE bool hasFeature(uint32_t feature) const { + ASMJIT_ASSERT(feature < 32); + return (_features & (1 << feature)) != 0; } - //! Get the current position in the buffer. - ASMJIT_INLINE uint8_t* getCursor() const { - return _cursor; + //! Set code-generator `feature` to `value`. + ASMJIT_INLINE void setFeature(uint32_t feature, bool value) { + ASMJIT_ASSERT(feature < 32); + feature = static_cast(value) << feature; + _features = (_features & ~feature) | feature; } + // -------------------------------------------------------------------------- + // [Instruction Options] + // -------------------------------------------------------------------------- + + //! Get options of the next instruction. + ASMJIT_INLINE uint32_t getInstOptions() const { return _instOptions; } + //! Set options of the next instruction. + ASMJIT_INLINE void setInstOptions(uint32_t instOptions) { _instOptions = instOptions; } + + //! Get options of the next instruction and reset them. + ASMJIT_INLINE uint32_t getInstOptionsAndReset() { + uint32_t instOptions = _instOptions; + _instOptions = 0; + return instOptions; + }; + + // -------------------------------------------------------------------------- + // [Code-Buffer] + // -------------------------------------------------------------------------- + + //! Grow the code-buffer. + //! + //! The internal code-buffer will grow at least by `n` bytes so `n` bytes can + //! be added to it. If `n` is zero or `getOffset() + n` is not greater than + //! the current capacity of the code-buffer this function does nothing. + ASMJIT_API Error _grow(size_t n); + //! Reserve the code-buffer to at least `n` bytes. + ASMJIT_API Error _reserve(size_t n); + + //! Get capacity of the code-buffer. + ASMJIT_INLINE size_t getCapacity() const { return (size_t)(_end - _buffer); } + //! Get the number of remaining bytes in code-buffer. + ASMJIT_INLINE size_t getRemainingSpace() const { return (size_t)(_end - _cursor); } + + //! Get current offset in buffer, same as `getOffset() + getTramplineSize()`. + ASMJIT_INLINE size_t getCodeSize() const { return getOffset() + getTrampolinesSize(); } + + //! Get size of all possible trampolines. + //! + //! Trampolines are needed to successfuly generate relative jumps to absolute + //! addresses. This value is only non-zero if jmp of call instructions were + //! used with immediate operand (this means jumping or calling an absolute + //! address directly). + ASMJIT_INLINE size_t getTrampolinesSize() const { return _trampolinesSize; } + + //! Get code-buffer. + ASMJIT_INLINE uint8_t* getBuffer() const { return _buffer; } + //! Get the end of the code-buffer (points to the first byte that is invalid). + ASMJIT_INLINE uint8_t* getEnd() const { return _end; } + + //! Get the current position in the code-buffer. + ASMJIT_INLINE uint8_t* getCursor() const { return _cursor; } //! Set the current position in the buffer. ASMJIT_INLINE void setCursor(uint8_t* cursor) { ASMJIT_ASSERT(cursor >= _buffer && cursor <= _end); @@ -203,12 +599,8 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { } //! Get the current offset in the buffer. - ASMJIT_INLINE size_t getOffset() const { - return (size_t)(_cursor - _buffer); - } - - //! Set the current offset in the buffer to `offset` and get the previous - //! offset value. + ASMJIT_INLINE size_t getOffset() const { return (size_t)(_cursor - _buffer); } + //! Set the current offset in the buffer to `offset` and return the previous value. ASMJIT_INLINE size_t setOffset(size_t offset) { ASMJIT_ASSERT(offset < getCapacity()); @@ -217,16 +609,6 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { return oldOffset; } - //! Grow the internal buffer. - //! - //! The internal buffer will grow at least by `n` bytes so `n` bytes can be - //! added to it. If `n` is zero or `getOffset() + n` is not greater than the - //! current capacity of the buffer this function does nothing. - ASMJIT_API Error _grow(size_t n); - - //! Reserve the internal buffer to at least `n` bytes. - ASMJIT_API Error _reserve(size_t n); - //! Get BYTE at position `pos`. ASMJIT_INLINE uint8_t getByteAt(size_t pos) const { ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer)); @@ -300,27 +682,21 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { } // -------------------------------------------------------------------------- - // [GetCodeSize] + // [Embed] // -------------------------------------------------------------------------- - //! Get current offset in buffer, same as `getOffset() + getTramplineSize()`. - ASMJIT_INLINE size_t getCodeSize() const { - return getOffset() + getTrampolineSize(); - } + //! Embed raw data into the code-buffer. + ASMJIT_API virtual Error embed(const void* data, uint32_t size); // -------------------------------------------------------------------------- - // [GetTrampolineSize] + // [Align] // -------------------------------------------------------------------------- - //! Get size of all possible trampolines. + //! Align target buffer to the `offset` specified. //! - //! Trampolines are needed to successfuly generate relative jumps to absolute - //! addresses. This value is only non-zero if jmp of call instructions were - //! used with immediate operand (this means jumping or calling an absolute - //! address directly). - ASMJIT_INLINE size_t getTrampolineSize() const { - return _trampolineSize; - } + //! The sequence that is used to fill the gap between the aligned location + //! and the current depends on `alignMode`, see \ref AlignMode. + virtual Error align(uint32_t alignMode, uint32_t offset) = 0; // -------------------------------------------------------------------------- // [Label] @@ -331,11 +707,11 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { return _labelList.getLength(); } - //! Get whether the `label` is valid (created by the assembler). + //! Get whether the `label` is valid (i.e. registered). ASMJIT_INLINE bool isLabelValid(const Label& label) const { return isLabelValid(label.getId()); } - //! \overload + //! Get whether the label `id` is valid (i.e. registered). ASMJIT_INLINE bool isLabelValid(uint32_t id) const { return static_cast(id) < _labelList.getLength(); } @@ -353,17 +729,17 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { ASMJIT_INLINE bool isLabelBound(uint32_t id) const { ASMJIT_ASSERT(isLabelValid(id)); - return _labelList[id].offset != -1; + return _labelList[id]->offset != -1; } - //! Get `label` offset or -1 if the label is not yet bound. + //! Get a `label` offset or -1 if the label is not yet bound. ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const { return getLabelOffset(label.getId()); } //! \overload ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const { ASMJIT_ASSERT(isLabelValid(id)); - return _labelList[id].offset; + return _labelList[id]->offset; } //! Get `LabelData` by `label`. @@ -373,18 +749,13 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { //! \overload ASMJIT_INLINE LabelData* getLabelData(uint32_t id) const { ASMJIT_ASSERT(isLabelValid(id)); - return const_cast(&_labelList[id]); + return const_cast(_labelList[id]); } //! \internal //! - //! Register labels for other code generator, i.e. `Compiler`. - ASMJIT_API Error _registerIndexedLabels(size_t index); - - //! \internal - //! - //! Create and initialize a new `Label`. - ASMJIT_API Error _newLabel(Label* dst); + //! Create a new label and return its ID. + ASMJIT_API uint32_t _newLabelId(); //! \internal //! @@ -392,48 +763,26 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { ASMJIT_API LabelLink* _newLabelLink(); //! Create and return a new `Label`. - ASMJIT_INLINE Label newLabel() { - Label result(NoInit); - _newLabel(&result); - return result; - } + ASMJIT_INLINE Label newLabel() { return Label(_newLabelId()); } - //! Bind label to the current offset. + //! Bind the `label` to the current offset. //! //! \note Label can be bound only once! ASMJIT_API virtual Error bind(const Label& label); - // -------------------------------------------------------------------------- - // [Embed] - // -------------------------------------------------------------------------- - - //! Embed data into the code buffer. - ASMJIT_API virtual Error embed(const void* data, uint32_t size); - - // -------------------------------------------------------------------------- - // [Align] - // -------------------------------------------------------------------------- - - //! Align target buffer to `m` bytes. - //! - //! Typical usage of this is to align labels at start of the inner loops. - //! - //! Inserts `nop()` instructions or CPU optimized NOPs. - virtual Error align(uint32_t mode, uint32_t offset) = 0; - // -------------------------------------------------------------------------- // [Reloc] // -------------------------------------------------------------------------- - //! Relocate the code to `baseAddress` and copy to `dst`. + //! Relocate the code to `baseAddress` and copy it to `dst`. //! //! \param dst Contains the location where the relocated code should be //! copied. The pointer can be address returned by virtual memory allocator //! or any other address that has sufficient space. //! - //! \param base Base address used for relocation. The `JitRuntime` always - //! sets the `base` address to be the same as `dst`, but other runtimes, for - //! example `StaticRuntime`, do not have to follow this rule. + //! \param baseAddress Base address used for relocation. The `JitRuntime` + //! always sets the `baseAddress` address to be the same as `dst`, but other + //! runtimes, for example `StaticRuntime`, do not have to follow this rule. //! //! \retval The number bytes actually used. If the code generator reserved //! space for possible trampolines, but didn't use it, the number of bytes @@ -459,6 +808,9 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { // [Emit] // -------------------------------------------------------------------------- + //! Emit an instruction (virtual). + virtual Error _emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) = 0; + //! Emit an instruction. ASMJIT_API Error emit(uint32_t code); //! \overload @@ -468,11 +820,9 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { //! \overload ASMJIT_API Error emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2); //! \overload - ASMJIT_INLINE Error emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { - return _emit(code, o0, o1, o2, o3); - } + ASMJIT_API Error emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3); - //! Emit an instruction with integer immediate operand. + //! Emit an instruction that has an immediate operand. ASMJIT_API Error emit(uint32_t code, int o0); //! \overload ASMJIT_API Error emit(uint32_t code, const Operand& o0, int o1); @@ -490,27 +840,54 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { //! \overload ASMJIT_API Error emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int64_t o3); - //! Emit an instruction (virtual). - virtual Error _emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) = 0; - // -------------------------------------------------------------------------- // [Members] // -------------------------------------------------------------------------- - //! Buffer where the code is emitted (either live or temporary). - //! - //! This is actually the base pointer of the buffer, to get the current - //! position (cursor) look at the `_cursor` member. + //! Associated runtime. + Runtime* _runtime; + +#if !defined(ASMJIT_DISABLE_LOGGER) + //! Associated logger. + Logger* _logger; +#else + //! Makes libraries built with or without logging support binary compatible. + void* _logger; +#endif // ASMJIT_DISABLE_LOGGER + //! Associated error handler, triggered by \ref setLastError(). + ErrorHandler* _errorHandler; + + //! Target architecture ID. + uint8_t _arch; + //! Target architecture GP register size in bytes (4 or 8). + uint8_t _regSize; + //! \internal + uint16_t _reserved; + + //! Assembler features, used by \ref hasFeature() and \ref setFeature(). + uint32_t _features; + //! Options affecting the next instruction. + uint32_t _instOptions; + //! Last error code. + uint32_t _lastError; + + //! CodeGen ID generator. + uint64_t _hlIdGenerator; + //! Count of high-level code generators attached. + size_t _hlAttachedCount; + + //! General purpose zone allocator. + Zone _zoneAllocator; + + //! Start of the code-buffer. uint8_t* _buffer; - //! The end of the buffer (points to the first invalid byte). - //! - //! The end of the buffer is calculated as _buffer + size. + //! End of the code-buffer (points to the first invalid byte). uint8_t* _end; //! The current position in code `_buffer`. uint8_t* _cursor; - //! Size of possible trampolines. - uint32_t _trampolineSize; + //! Size of all possible trampolines. + uint32_t _trampolinesSize; //! Inline comment that will be logged by the next instruction and set to NULL. const char* _comment; @@ -518,7 +895,7 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { LabelLink* _unusedLinks; //! LabelData list. - PodVector _labelList; + PodVector _labelList; //! RelocData list. PodVector _relocList; }; @@ -529,8 +906,10 @@ struct ASMJIT_VCLASS Assembler : public CodeGen { // [Defined-Later] // ============================================================================ -ASMJIT_INLINE Label::Label(Assembler& a) - : Operand(NoInit) { a._newLabel(this); } +ASMJIT_INLINE Label::Label(Assembler& a) : Operand(NoInit) { + reset(); + _label.id = a._newLabelId(); +} } // asmjit namespace diff --git a/src/asmjit/base/codegen.cpp b/src/asmjit/base/codegen.cpp deleted file mode 100644 index 55192ed..0000000 --- a/src/asmjit/base/codegen.cpp +++ /dev/null @@ -1,111 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies - AsmJit] -#include "../base/codegen.h" -#include "../base/intutil.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::CodeGen - Construction / Destruction] -// ============================================================================ - -CodeGen::CodeGen(Runtime* runtime) : - _runtime(runtime), - _logger(NULL), - _errorHandler(NULL), - _baseAddress(runtime->getBaseAddress()), - _arch(kArchNone), - _regSize(0), - _reserved(0), - _features(IntUtil::mask(kCodeGenOptimizedAlign)), - _instOptions(0), - _error(kErrorOk), - _baseZone(16384 - kZoneOverhead) {} - -CodeGen::~CodeGen() { - if (_errorHandler != NULL) - _errorHandler->release(); -} - -// ============================================================================ -// [asmjit::CodeGen - Logging] -// ============================================================================ - -#if !defined(ASMJIT_DISABLE_LOGGER) -Error CodeGen::setLogger(Logger* logger) { - _logger = logger; - return kErrorOk; -} -#endif // !ASMJIT_DISABLE_LOGGER - -// ============================================================================ -// [asmjit::CodeGen - Error] -// ============================================================================ - -Error CodeGen::setError(Error error, const char* message) { - if (error == kErrorOk) { - _error = kErrorOk; - return kErrorOk; - } - - if (message == NULL) { -#if !defined(ASMJIT_DISABLE_NAMES) - message = ErrorUtil::asString(error); -#else - static const char noMessage[] = ""; - message = noMessage; -#endif // ASMJIT_DISABLE_NAMES - } - - // Error handler is called before logger so logging can be skipped if error - // has been handled. - ErrorHandler* handler = _errorHandler; - ASMJIT_TLOG("[ERROR] %s %s\n", message, !handler ? "(Possibly unhandled?)" : ""); - - if (handler != NULL && handler->handleError(error, message)) - return error; - -#if !defined(ASMJIT_DISABLE_LOGGER) - Logger* logger = _logger; - if (logger != NULL) { - logger->logFormat(kLoggerStyleComment, - "*** ERROR: %s (%u).\n", message, static_cast(error)); - } -#endif // !ASMJIT_DISABLE_LOGGER - - // The handler->handleError() function may throw an exception or longjmp() - // to terminate the execution of setError(). This is the reason why we have - // delayed changing the _error member until now. - _error = error; - - return error; -} - -Error CodeGen::setErrorHandler(ErrorHandler* handler) { - ErrorHandler* oldHandler = _errorHandler; - - if (oldHandler != NULL) - oldHandler->release(); - - if (handler != NULL) - handler = handler->addRef(); - - _errorHandler = handler; - return kErrorOk; -} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" diff --git a/src/asmjit/base/codegen.h b/src/asmjit/base/codegen.h deleted file mode 100644 index 5e2db43..0000000 --- a/src/asmjit/base/codegen.h +++ /dev/null @@ -1,337 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CODEGEN_H -#define _ASMJIT_BASE_CODEGEN_H - -// [Dependencies - AsmJit] -#include "../base/error.h" -#include "../base/logger.h" -#include "../base/runtime.h" -#include "../base/zone.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base_general -//! \{ - -// ============================================================================ -// [asmjit::CodeGenFeatures] -// ============================================================================ - -//! Features of \ref CodeGen. -ASMJIT_ENUM(CodeGenFeatures) { - //! Emit optimized code-alignment sequences (`Assembler` and `Compiler`). - //! - //! Default `true`. - //! - //! X86/X64 - //! ------- - //! - //! Default align sequence used by X86/X64 architecture is one-byte 0x90 - //! opcode that is mostly shown by disassemblers as nop. However there are - //! more optimized align sequences for 2-11 bytes that may execute faster. - //! If this feature is enabled asmjit will generate specialized sequences - //! for alignment between 1 to 11 bytes. Also when `X86Compiler` is used, - //! it can add REX prefixes into the code to make some instructions greater - //! so no alignment sequence is needed. - kCodeGenOptimizedAlign = 0, - - //! Emit jump-prediction hints (`Assembler` and `Compiler`). - //! - //! Default `false`. - //! - //! X86/X64 - //! ------- - //! - //! Jump prediction is usually based on the direction of the jump. If the - //! jump is backward it is usually predicted as taken; and if the jump is - //! forward it is usually predicted as not-taken. The reason is that loops - //! generally use backward jumps and conditions usually use forward jumps. - //! However this behavior can be overridden by using instruction prefixes. - //! If this option is enabled these hints will be emitted. - //! - //! This feature is disabled by default, because the only processor that - //! used to take into consideration prediction hints was P4. Newer processors - //! implement heuristics for branch prediction that ignores any static hints. - kCodeGenPredictedJumps = 1, - - //! Schedule instructions so they can be executed faster (`Compiler` only). - //! - //! Default `false` - has to be explicitly enabled as the scheduler needs - //! some time to run. - //! - //! X86/X64 - //! ------- - //! - //! If scheduling is enabled AsmJit will try to reorder instructions to - //! minimize dependency chain. Scheduler always runs after the registers are - //! allocated so it doesn't change count of register allocs/spills. - //! - //! This feature is highly experimental and untested. - kCodeGenEnableScheduler = 2 -}; - -// ============================================================================ -// [asmjit::AlignMode] -// ============================================================================ - -//! Code aligning mode. -ASMJIT_ENUM(AlignMode) { - //! Align by emitting a sequence that can be executed (code). - kAlignCode = 0, - //! Align by emitting sequence that shouldn't be executed (data). - kAlignData = 1 -}; - -// ============================================================================ -// [asmjit::RelocMode] -// ============================================================================ - -//! Relocation mode. -ASMJIT_ENUM(RelocMode) { - //! Relocate an absolute address to an absolute address. - kRelocAbsToAbs = 0, - //! Relocate a relative address to an absolute address. - kRelocRelToAbs = 1, - //! Relocate an absolute address to a relative address. - kRelocAbsToRel = 2, - //! Relocate an absolute address to a relative address or use trampoline. - kRelocTrampoline = 3 -}; - -// ============================================================================ -// [asmjit::CodeGen] -// ============================================================================ - -//! Abstract class defining basics of \ref Assembler and \ref Compiler. -struct ASMJIT_VCLASS CodeGen { - ASMJIT_NO_COPY(CodeGen) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CodeGen` instance. - ASMJIT_API CodeGen(Runtime* runtime); - //! Destroy the `CodeGen` instance. - ASMJIT_API virtual ~CodeGen(); - - // -------------------------------------------------------------------------- - // [Runtime] - // -------------------------------------------------------------------------- - - //! Get runtime. - ASMJIT_INLINE Runtime* getRuntime() const { - return _runtime; - } - - // -------------------------------------------------------------------------- - // [Logger] - // -------------------------------------------------------------------------- - -#if !defined(ASMJIT_DISABLE_LOGGER) - //! Get whether the code generator has a logger. - ASMJIT_INLINE bool hasLogger() const { - return _logger != NULL; - } - - //! Get logger. - ASMJIT_INLINE Logger* getLogger() const { - return _logger; - } - - //! Set logger to `logger`. - ASMJIT_API Error setLogger(Logger* logger); -#endif // !ASMJIT_DISABLE_LOGGER - - // -------------------------------------------------------------------------- - // [Arch] - // -------------------------------------------------------------------------- - - //! Get target architecture. - ASMJIT_INLINE uint32_t getArch() const { - return _arch; - } - - //! Get default register size (4 or 8 bytes). - ASMJIT_INLINE uint32_t getRegSize() const { - return _regSize; - } - - // -------------------------------------------------------------------------- - // [BaseAddress] - // -------------------------------------------------------------------------- - - //! Get whether the code-generator has a base address. - //! - //! \sa \ref getBaseAddress() - ASMJIT_INLINE bool hasBaseAddress() const { - return _baseAddress != kNoBaseAddress; - } - - //! Get the base address. - ASMJIT_INLINE Ptr getBaseAddress() const { - return _baseAddress; - } - - //! Set the base address to `baseAddress`. - ASMJIT_INLINE void setBaseAddress(Ptr baseAddress) { - _baseAddress = baseAddress; - } - - //! Reset the base address. - ASMJIT_INLINE void resetBaseAddress() { - setBaseAddress(kNoBaseAddress); - } - - // -------------------------------------------------------------------------- - // [LastError / ErrorHandler] - // -------------------------------------------------------------------------- - - //! Get last error code. - ASMJIT_INLINE Error getError() const { - return _error; - } - - //! Set last error code and propagate it through the error handler. - ASMJIT_API Error setError(Error error, const char* message = NULL); - - //! Clear the last error code. - ASMJIT_INLINE void resetError() { - _error = kErrorOk; - } - - //! Get error handler. - ASMJIT_INLINE ErrorHandler* getErrorHandler() const { - return _errorHandler; - } - - //! Set error handler. - ASMJIT_API Error setErrorHandler(ErrorHandler* handler); - - //! Clear error handler. - ASMJIT_INLINE Error resetErrorHandler() { - return setErrorHandler(NULL); - } - - // -------------------------------------------------------------------------- - // [Code-Generation Features] - // -------------------------------------------------------------------------- - - //! Get code-generator `feature`. - ASMJIT_INLINE bool hasFeature(uint32_t feature) const { - ASMJIT_ASSERT(feature < 32); - - return (_features & (1 << feature)) != 0; - } - - //! Set code-generator `feature` to `value`. - ASMJIT_INLINE void setFeature(uint32_t feature, bool value) { - ASMJIT_ASSERT(feature < 32); - - feature = static_cast(value) << feature; - _features = (_features & ~feature) | feature; - } - - //! Get code-generator features. - ASMJIT_INLINE uint32_t getFeatures() const { - return _features; - } - - //! Set code-generator features. - ASMJIT_INLINE void setFeatures(uint32_t features) { - _features = features; - } - - // -------------------------------------------------------------------------- - // [Instruction Options] - // -------------------------------------------------------------------------- - - //! Get options of the next instruction. - ASMJIT_INLINE uint32_t getInstOptions() const { - return _instOptions; - } - - //! Get options of the next instruction and reset them. - ASMJIT_INLINE uint32_t getInstOptionsAndReset() { - uint32_t instOptions = _instOptions; - _instOptions = 0; - return instOptions; - }; - - //! Set options of the next instruction. - ASMJIT_INLINE void setInstOptions(uint32_t instOptions) { - _instOptions = instOptions; - } - - // -------------------------------------------------------------------------- - // [Make] - // -------------------------------------------------------------------------- - - //! Make is a convenience method to make and relocate the current code and - //! add it to the associated `Runtime`. - //! - //! What is needed is only to cast the returned pointer to your function type - //! and then use it. If there was an error during `make()` `NULL` is returned - //! and the last error code can be obtained by calling `getError()`. - virtual void* make() = 0; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Target runtime. - Runtime* _runtime; - -#if !defined(ASMJIT_DISABLE_LOGGER) - //! Logger. - Logger* _logger; -#else - //! \internal - //! - //! Makes libraries built with or without logging support binary compatible. - void* _logger; -#endif // ASMJIT_DISABLE_LOGGER - - //! Error handler, called by \ref setError(). - ErrorHandler* _errorHandler; - - //! Base address (-1 if unknown/not used). - Ptr _baseAddress; - - //! Target architecture ID. - uint8_t _arch; - //! Target architecture GP register size in bytes (4 or 8). - uint8_t _regSize; - //! \internal - uint16_t _reserved; - - //! Code-Generation features, used by \ref hasFeature() and \ref setFeature(). - uint32_t _features; - //! Options affecting the next instruction. - uint32_t _instOptions; - - //! Last error code. - uint32_t _error; - - //! Base zone. - Zone _baseZone; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_CODEGEN_H diff --git a/src/asmjit/base/compiler.cpp b/src/asmjit/base/compiler.cpp index ad09197..8835fd7 100644 --- a/src/asmjit/base/compiler.cpp +++ b/src/asmjit/base/compiler.cpp @@ -14,10 +14,10 @@ // [Dependencies - AsmJit] #include "../base/assembler.h" #include "../base/compiler.h" -#include "../base/context_p.h" +#include "../base/compilercontext_p.h" #include "../base/cpuinfo.h" -#include "../base/intutil.h" #include "../base/logger.h" +#include "../base/utils.h" // [Dependencies - C] #include @@ -32,55 +32,55 @@ namespace asmjit { // ============================================================================ static const char noName[1] = { '\0' }; -enum { kBaseCompilerDefaultLookAhead = 64 }; +enum { kCompilerDefaultLookAhead = 64 }; // ============================================================================ // [asmjit::Compiler - Construction / Destruction] // ============================================================================ -Compiler::Compiler(Runtime* runtime) : - CodeGen(runtime), - _assembler(NULL), +Compiler::Compiler() : + _features(0), + _maxLookAhead(kCompilerDefaultLookAhead), + _instOptions(0), + _tokenGenerator(0), _nodeFlowId(0), _nodeFlags(0), - _maxLookAhead(kBaseCompilerDefaultLookAhead), _targetVarMapping(NULL), _firstNode(NULL), _lastNode(NULL), _cursor(NULL), _func(NULL), - _varZone(4096 - kZoneOverhead), - _stringZone(4096 - kZoneOverhead), - _localConstZone(4096 - kZoneOverhead), - _localConstPool(&_localConstZone), - _globalConstPool(&_baseZone) {} - -Compiler::~Compiler() { - reset(true); - - if (_assembler != NULL) - delete _assembler; -} + _zoneAllocator(8192 - Zone::kZoneOverhead), + _varAllocator(4096 - Zone::kZoneOverhead), + _stringAllocator(4096 - Zone::kZoneOverhead), + _constAllocator(4096 - Zone::kZoneOverhead), + _localConstPool(&_constAllocator), + _globalConstPool(&_zoneAllocator) {} +Compiler::~Compiler() {} // ============================================================================ -// [asmjit::Compiler - Clear / Reset] +// [asmjit::Compiler - Attach / Reset] // ============================================================================ void Compiler::reset(bool releaseMemory) { - // CodeGen members. - _baseAddress = kNoBaseAddress; + Assembler* assembler = getAssembler(); + if (assembler != NULL) + assembler->_detached(this); + + _arch = kArchNone; + _regSize = 0; + _finalized = false; + _lastError = kErrorNotInitialized; + + _features = 0; + _maxLookAhead = kCompilerDefaultLookAhead; + _instOptions = 0; - _error = kErrorOk; + _tokenGenerator = 0; - _baseZone.reset(releaseMemory); - - // Compiler members. _nodeFlowId = 0; _nodeFlags = 0; - if (_assembler != NULL) - _assembler->reset(releaseMemory); - _firstNode = NULL; _lastNode = NULL; @@ -93,25 +93,79 @@ void Compiler::reset(bool releaseMemory) { _localConstPoolLabel.reset(); _globalConstPoolLabel.reset(); - _varZone.reset(releaseMemory); - _stringZone.reset(releaseMemory); - _localConstZone.reset(releaseMemory); + _zoneAllocator.reset(releaseMemory); + _varAllocator.reset(releaseMemory); + _stringAllocator.reset(releaseMemory); + _constAllocator.reset(releaseMemory); - _targetList.reset(releaseMemory); _varList.reset(releaseMemory); } // ============================================================================ -// [asmjit::Compiler - Node Management] +// [asmjit::Compiler - Node-Factory] // ============================================================================ -Node* Compiler::setCursor(Node* node) { - Node* old = _cursor; - _cursor = node; - return old; +HLData* Compiler::newDataNode(const void* data, uint32_t size) { + if (size > HLData::kInlineBufferSize) { + void* clonedData = _stringAllocator.alloc(size); + if (clonedData == NULL) + return NULL; + + if (data != NULL) + ::memcpy(clonedData, data, size); + data = clonedData; + } + + return newNode(const_cast(data), size); } -Node* Compiler::addNode(Node* node) { +HLAlign* Compiler::newAlignNode(uint32_t alignMode, uint32_t offset) { + return newNode(alignMode, offset); +} + +HLLabel* Compiler::newLabelNode() { + Assembler* assembler = getAssembler(); + if (assembler == NULL) return NULL; + + uint32_t id = assembler->_newLabelId(); + LabelData* ld = assembler->getLabelData(id); + + HLLabel* node = newNode(id); + if (node == NULL) return NULL; + + // These have to be zero now. + ASMJIT_ASSERT(ld->hlId == 0); + ASMJIT_ASSERT(ld->hlData == NULL); + + ld->hlId = _hlId; + ld->hlData = node; + + return node; +} + +HLComment* Compiler::newCommentNode(const char* str) { + if (str != NULL && str[0]) { + str = _stringAllocator.sdup(str); + if (str == NULL) + return NULL; + } + + return newNode(str); +} + +HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) { + if (var.getId() == kInvalidValue) + return NULL; + + VarData* vd = getVd(var); + return newNode(vd, hint, value); +} + +// ============================================================================ +// [asmjit::Compiler - Code-Stream] +// ============================================================================ + +HLNode* Compiler::addNode(HLNode* node) { ASMJIT_ASSERT(node != NULL); ASMJIT_ASSERT(node->_prev == NULL); ASMJIT_ASSERT(node->_next == NULL); @@ -128,8 +182,8 @@ Node* Compiler::addNode(Node* node) { } } else { - Node* prev = _cursor; - Node* next = _cursor->_next; + HLNode* prev = _cursor; + HLNode* next = _cursor->_next; node->_prev = prev; node->_next = next; @@ -145,14 +199,14 @@ Node* Compiler::addNode(Node* node) { return node; } -Node* Compiler::addNodeBefore(Node* node, Node* ref) { +HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) { ASMJIT_ASSERT(node != NULL); ASMJIT_ASSERT(node->_prev == NULL); ASMJIT_ASSERT(node->_next == NULL); ASMJIT_ASSERT(ref != NULL); - Node* prev = ref->_prev; - Node* next = ref; + HLNode* prev = ref->_prev; + HLNode* next = ref; node->_prev = prev; node->_next = next; @@ -166,14 +220,14 @@ Node* Compiler::addNodeBefore(Node* node, Node* ref) { return node; } -Node* Compiler::addNodeAfter(Node* node, Node* ref) { +HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) { ASMJIT_ASSERT(node != NULL); ASMJIT_ASSERT(node->_prev == NULL); ASMJIT_ASSERT(node->_next == NULL); ASMJIT_ASSERT(ref != NULL); - Node* prev = ref; - Node* next = ref->_next; + HLNode* prev = ref; + HLNode* next = ref->_next; node->_prev = prev; node->_next = next; @@ -187,17 +241,17 @@ Node* Compiler::addNodeAfter(Node* node, Node* ref) { return node; } -static ASMJIT_INLINE void BaseCompiler_nodeRemoved(Compiler* self, Node* node_) { +static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) { if (node_->isJmpOrJcc()) { - JumpNode* node = static_cast(node_); - TargetNode* target = node->getTarget(); + HLJump* node = static_cast(node_); + HLLabel* label = node->getTarget(); - if (target != NULL) { + if (label != NULL) { // Disconnect. - JumpNode** pPrev = &target->_from; + HLJump** pPrev = &label->_from; for (;;) { ASMJIT_ASSERT(*pPrev != NULL); - JumpNode* current = *pPrev; + HLJump* current = *pPrev; if (current == NULL) break; @@ -210,14 +264,14 @@ static ASMJIT_INLINE void BaseCompiler_nodeRemoved(Compiler* self, Node* node_) pPrev = ¤t->_jumpNext; } - target->subNumRefs(); + label->subNumRefs(); } } } -Node* Compiler::removeNode(Node* node) { - Node* prev = node->_prev; - Node* next = node->_next; +HLNode* Compiler::removeNode(HLNode* node) { + HLNode* prev = node->_prev; + HLNode* next = node->_next; if (_firstNode == node) _firstNode = next; @@ -234,19 +288,19 @@ Node* Compiler::removeNode(Node* node) { if (_cursor == node) _cursor = prev; - BaseCompiler_nodeRemoved(this, node); + Compiler_nodeRemoved(this, node); return node; } -void Compiler::removeNodes(Node* first, Node* last) { +void Compiler::removeNodes(HLNode* first, HLNode* last) { if (first == last) { removeNode(first); return; } - Node* prev = first->_prev; - Node* next = last->_next; + HLNode* prev = first->_prev; + HLNode* next = last->_next; if (_firstNode == first) _firstNode = next; @@ -258,9 +312,9 @@ void Compiler::removeNodes(Node* first, Node* last) { else next->_prev = prev; - Node* node = first; + HLNode* node = first; for (;;) { - Node* next = node->getNext(); + HLNode* next = node->getNext(); ASMJIT_ASSERT(next != NULL); node->_prev = NULL; @@ -268,7 +322,7 @@ void Compiler::removeNodes(Node* first, Node* last) { if (_cursor == node) _cursor = prev; - BaseCompiler_nodeRemoved(this, node); + Compiler_nodeRemoved(this, node); if (node == last) break; @@ -276,76 +330,62 @@ void Compiler::removeNodes(Node* first, Node* last) { } } +HLNode* Compiler::setCursor(HLNode* node) { + HLNode* old = _cursor; + _cursor = node; + return old; +} + // ============================================================================ // [asmjit::Compiler - Align] // ============================================================================ -AlignNode* Compiler::newAlign(uint32_t mode, uint32_t offset) { - AlignNode* node = newNode(mode, offset); +Error Compiler::align(uint32_t alignMode, uint32_t offset) { + HLAlign* node = newAlignNode(alignMode, offset); if (node == NULL) - goto _NoMemory; - return node; + return setLastError(kErrorNoHeapMemory); -_NoMemory: - setError(kErrorNoHeapMemory); - return NULL; -} - -AlignNode* Compiler::addAlign(uint32_t mode, uint32_t offset) { - AlignNode* node = newAlign(mode, offset); - if (node == NULL) - return NULL; - return static_cast(addNode(node)); -} - -// ============================================================================ -// [asmjit::Compiler - Target] -// ============================================================================ - -TargetNode* Compiler::newTarget() { - TargetNode* node = newNode( - OperandUtil::makeLabelId(static_cast(_targetList.getLength()))); - - if (node == NULL || _targetList.append(node) != kErrorOk) - goto _NoMemory; - return node; - -_NoMemory: - setError(kErrorNoHeapMemory); - return NULL; -} - -TargetNode* Compiler::addTarget() { - TargetNode* node = newTarget(); - if (node == NULL) - return NULL; - return static_cast(addNode(node)); + addNode(node); + return kErrorOk; } // ============================================================================ // [asmjit::Compiler - Label] // ============================================================================ -Error Compiler::_newLabel(Label* dst) { - dst->_init_packed_op_sz_b0_b1_id(kOperandTypeLabel, 0, 0, 0, kInvalidValue); - dst->_init_packed_d2_d3(0, 0); +HLLabel* Compiler::getHLLabel(uint32_t id) const { + Assembler* assembler = getAssembler(); + if (assembler == NULL) return NULL; - TargetNode* node = newTarget(); - if (node == NULL) - goto _NoMemory; + LabelData* ld = assembler->getLabelData(id); + if (ld->hlId == _hlId) + return static_cast(ld->hlData); + else + return NULL; +} - dst->_label.id = node->getLabelId(); - return kErrorOk; +bool Compiler::isLabelValid(uint32_t id) const { + Assembler* assembler = getAssembler(); + if (assembler == NULL) return false; -_NoMemory: - return setError(kErrorNoHeapMemory); + return static_cast(id) < assembler->getLabelsCount(); +} + +uint32_t Compiler::_newLabelId() { + HLLabel* node = newLabelNode(); + if (node == NULL) { + setLastError(kErrorNoHeapMemory); + return kInvalidValue; + } + + return node->getLabelId(); } Error Compiler::bind(const Label& label) { - uint32_t index = label.getId(); - ASMJIT_ASSERT(index < _targetList.getLength()); - - addNode(_targetList[index]); + HLLabel* node = getHLLabel(label); + if (node == NULL) + return setLastError(kErrorInvalidState); + addNode(node); return kErrorOk; } @@ -353,67 +393,37 @@ Error Compiler::bind(const Label& label) { // [asmjit::Compiler - Embed] // ============================================================================ -EmbedNode* Compiler::newEmbed(const void* data, uint32_t size) { - EmbedNode* node; - - if (size > EmbedNode::kInlineBufferSize) { - void* clonedData = _stringZone.alloc(size); - if (clonedData == NULL) - goto _NoMemory; - - if (data != NULL) - ::memcpy(clonedData, data, size); - data = clonedData; - } - - node = newNode(const_cast(data), size); +Error Compiler::embed(const void* data, uint32_t size) { + HLData* node = newDataNode(data, size); if (node == NULL) - goto _NoMemory; - return node; + return setLastError(kErrorNoHeapMemory); -_NoMemory: - setError(kErrorNoHeapMemory); - return NULL; + addNode(node); + return kErrorOk; } -EmbedNode* Compiler::addEmbed(const void* data, uint32_t size) { - EmbedNode* node = newEmbed(data, size); - if (node == NULL) - return node; - return static_cast(addNode(node)); +Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) { + if (label.getId() == kInvalidValue) + return kErrorInvalidState; + + align(kAlignData, static_cast(pool.getAlignment())); + bind(label); + + HLData* embedNode = newDataNode(NULL, static_cast(pool.getSize())); + if (embedNode == NULL) + return kErrorNoHeapMemory; + + pool.fill(embedNode->getData()); + addNode(embedNode); + + return kErrorOk; } // ============================================================================ // [asmjit::Compiler - Comment] // ============================================================================ -CommentNode* Compiler::newComment(const char* str) { - CommentNode* node; - - if (str != NULL && str[0]) { - str = _stringZone.sdup(str); - if (str == NULL) - goto _NoMemory; - } - - node = newNode(str); - if (node == NULL) - goto _NoMemory; - return node; - -_NoMemory: - setError(kErrorNoHeapMemory); - return NULL; -} - -CommentNode* Compiler::addComment(const char* str) { - CommentNode* node = newComment(str); - if (node == NULL) - return NULL; - return static_cast(addNode(node)); -} - -CommentNode* Compiler::comment(const char* fmt, ...) { +Error Compiler::comment(const char* fmt, ...) { char buf[256]; char* p = buf; @@ -425,36 +435,29 @@ CommentNode* Compiler::comment(const char* fmt, ...) { } p[0] = '\0'; - return addComment(buf); + + HLComment* node = newCommentNode(buf); + if (node == NULL) + return setLastError(kErrorNoHeapMemory); + + addNode(node); + return kErrorOk; } // ============================================================================ // [asmjit::Compiler - Hint] // ============================================================================ -HintNode* Compiler::newHint(Var& var, uint32_t hint, uint32_t value) { - if (var.getId() == kInvalidValue) - return NULL; - VarData* vd = getVd(var); - - HintNode* node = newNode(vd, hint, value); - if (node == NULL) - goto _NoMemory; - return node; - -_NoMemory: - setError(kErrorNoHeapMemory); - return NULL; -} - -HintNode* Compiler::addHint(Var& var, uint32_t hint, uint32_t value) { +Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) { if (var.getId() == kInvalidValue) return NULL; - HintNode* node = newHint(var, hint, value); + HLHint* node = newHintNode(var, hint, value); if (node == NULL) - return NULL; - return static_cast(addNode(node)); + return setLastError(kErrorNoHeapMemory); + + addNode(node); + return kErrorOk; } // ============================================================================ @@ -462,17 +465,19 @@ HintNode* Compiler::addHint(Var& var, uint32_t hint, uint32_t value) { // ============================================================================ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char* name) { - VarData* vd = reinterpret_cast(_varZone.alloc(sizeof(VarData))); + VarData* vd = reinterpret_cast(_varAllocator.alloc(sizeof(VarData))); if (vd == NULL) goto _NoMemory; vd->_name = noName; vd->_id = OperandUtil::makeVarId(static_cast(_varList.getLength())); - vd->_contextId = kInvalidValue; + vd->_localId = kInvalidValue; +#if !defined(ASMJIT_DISABLE_LOGGER) if (name != NULL && name[0] != '\0') { - vd->_name = _stringZone.sdup(name); + vd->_name = _stringAllocator.sdup(name); } +#endif // !ASMJIT_DISABLE_LOGGER vd->_type = static_cast(type); vd->_class = static_cast(c); @@ -487,7 +492,7 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char* vd->_saveOnUnuse = false; vd->_modified = false; vd->_reserved0 = 0; - vd->_alignment = static_cast(IntUtil::iMin(size, 64)); + vd->_alignment = static_cast(Utils::iMin(size, 64)); vd->_size = size; vd->_homeMask = 0; @@ -507,44 +512,44 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char* return vd; _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -void Compiler::alloc(Var& var) { +Error Compiler::alloc(Var& var) { if (var.getId() == kInvalidValue) - return; - addHint(var, kVarHintAlloc, kInvalidValue); + return kErrorOk; + return _hint(var, kVarHintAlloc, kInvalidValue); } -void Compiler::alloc(Var& var, uint32_t regIndex) { +Error Compiler::alloc(Var& var, uint32_t regIndex) { if (var.getId() == kInvalidValue) - return; - addHint(var, kVarHintAlloc, regIndex); + return kErrorOk; + return _hint(var, kVarHintAlloc, regIndex); } -void Compiler::alloc(Var& var, const Reg& reg) { +Error Compiler::alloc(Var& var, const Reg& reg) { if (var.getId() == kInvalidValue) - return; - addHint(var, kVarHintAlloc, reg.getRegIndex()); + return kErrorOk; + return _hint(var, kVarHintAlloc, reg.getRegIndex()); } -void Compiler::save(Var& var) { +Error Compiler::save(Var& var) { if (var.getId() == kInvalidValue) - return; - addHint(var, kVarHintSave, kInvalidValue); + return kErrorOk; + return _hint(var, kVarHintSave, kInvalidValue); } -void Compiler::spill(Var& var) { +Error Compiler::spill(Var& var) { if (var.getId() == kInvalidValue) - return; - addHint(var, kVarHintSpill, kInvalidValue); + return kErrorOk; + return _hint(var, kVarHintSpill, kInvalidValue); } -void Compiler::unuse(Var& var) { +Error Compiler::unuse(Var& var) { if (var.getId() == kInvalidValue) - return; - addHint(var, kVarHintUnuse, kInvalidValue); + return kErrorOk; + return _hint(var, kVarHintUnuse, kInvalidValue); } uint32_t Compiler::getPriority(Var& var) const { @@ -582,45 +587,27 @@ void Compiler::setSaveOnUnuse(Var& var, bool value) { vd->_saveOnUnuse = value; } -void Compiler::rename(Var& var, const char* name) { +void Compiler::rename(Var& var, const char* fmt, ...) { if (var.getId() == kInvalidValue) return; VarData* vd = getVdById(var.getId()); vd->_name = noName; - if (name != NULL && name[0] != '\0') { - vd->_name = _stringZone.sdup(name); + if (fmt != NULL && fmt[0] != '\0') { + char buf[64]; + + va_list ap; + va_start(ap, fmt); + + vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); + buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; + + vd->_name = _stringAllocator.sdup(buf); + va_end(ap); } } -// ============================================================================ -// [asmjit::Compiler - Assembler] -// ============================================================================ - -Assembler* Compiler::getAssembler() { - Assembler* a = _assembler; - - if (a != NULL) { - a->reset(false); - } - else { - a = _newAssembler(); - _assembler = a; - } - -#if !defined(ASMJIT_DISABLE_LOGGER) - Logger* logger = _logger; - if (logger != NULL) - a->setLogger(logger); -#endif // !ASMJIT_DISABLE_LOGGER - - a->setBaseAddress(_baseAddress); - a->setFeatures(_features); - - return a; -} - } // asmjit namespace // [Api-End] diff --git a/src/asmjit/base/compiler.h b/src/asmjit/base/compiler.h index c5be74a..3980c91 100644 --- a/src/asmjit/base/compiler.h +++ b/src/asmjit/base/compiler.h @@ -13,12 +13,12 @@ // [Dependencies - AsmJit] #include "../base/assembler.h" -#include "../base/codegen.h" +#include "../base/compilerfunc.h" #include "../base/constpool.h" #include "../base/containers.h" -#include "../base/error.h" -#include "../base/intutil.h" +#include "../base/hlstream.h" #include "../base/operand.h" +#include "../base/utils.h" #include "../base/zone.h" // [Api-Begin] @@ -30,25 +30,39 @@ namespace asmjit { // [Forward Declarations] // ============================================================================ -struct Compiler; - struct VarAttr; struct VarData; struct VarMap; struct VarState; -struct Node; -struct EndNode; -struct InstNode; -struct JumpNode; +//! \addtogroup asmjit_base +//! \{ + +// ============================================================================ +// [asmjit::CompilerFeatures] +// ============================================================================ + +ASMJIT_ENUM(CompilerFeatures) { + //! Schedule instructions so they can be executed faster (`Compiler` only). + //! + //! Default `false` - has to be explicitly enabled as the scheduler needs + //! some time to run. + //! + //! X86/X64 + //! ------- + //! + //! If scheduling is enabled AsmJit will try to reorder instructions to + //! minimize the dependency chain. Scheduler always runs after the registers + //! are allocated so it doesn't change count of register allocs/spills. + //! + //! This feature is highly experimental and untested. + kCompilerFeatureEnableScheduler = 0 +}; // ============================================================================ // [asmjit::ConstScope] // ============================================================================ -//! \addtogroup asmjit_base_compiler -//! \{ - //! Scope of the constant. ASMJIT_ENUM(ConstScope) { //! Local constant, always embedded right after the current function. @@ -57,2635 +71,14 @@ ASMJIT_ENUM(ConstScope) { kConstScopeGlobal = 1 }; -// ============================================================================ -// [asmjit::VarType] -// ============================================================================ - -ASMJIT_ENUM(VarType) { - //! Variable is 8-bit signed integer. - kVarTypeInt8 = 0, - //! Variable is 8-bit unsigned integer. - kVarTypeUInt8 = 1, - //! Variable is 16-bit signed integer. - kVarTypeInt16 = 2, - //! Variable is 16-bit unsigned integer. - kVarTypeUInt16 = 3, - //! Variable is 32-bit signed integer. - kVarTypeInt32 = 4, - //! Variable is 32-bit unsigned integer. - kVarTypeUInt32 = 5, - //! Variable is 64-bit signed integer. - kVarTypeInt64 = 6, - //! Variable is 64-bit unsigned integer. - kVarTypeUInt64 = 7, - - //! Variable is target `intptr_t`, not compatible with host `intptr_t`. - kVarTypeIntPtr = 8, - //! Variable is target `uintptr_t`, not compatible with host `uintptr_t`. - kVarTypeUIntPtr = 9, - - //! Variable is 32-bit floating point (single precision). - kVarTypeFp32 = 10, - //! Variable is 64-bit floating point (double precision). - kVarTypeFp64 = 11, - - //! \internal - _kVarTypeIntStart = kVarTypeInt8, - //! \internal - _kVarTypeIntEnd = kVarTypeUIntPtr, - - //! \internal - _kVarTypeFpStart = kVarTypeFp32, - //! \internal - _kVarTypeFpEnd = kVarTypeFp64 -}; - -// ============================================================================ -// [asmjit::VarFlags] -// ============================================================================ - -//! \internal -//! -//! X86/X64 variable flags. -ASMJIT_ENUM(VarFlags) { - //! Variable contains single-precision floating-point(s). - kVarFlagSp = 0x10, - //! Variable contains double-precision floating-point(s). - kVarFlagDp = 0x20, - //! Variable is packed, i.e. packed floats, doubles, ... - kVarFlagPacked = 0x40 -}; - -// ============================================================================ -// [asmjit::VarAttrFlags] -// ============================================================================ - -//! Variable attribute flags. -ASMJIT_ENUM(VarAttrFlags) { - //! Variable is accessed through register on input. - kVarAttrInReg = 0x00000001, - //! Variable is accessed through register on output. - kVarAttrOutReg = 0x00000002, - //! Variable is accessed through register on input & output. - kVarAttrInOutReg = 0x00000003, - - //! Variable is accessed through memory on input. - kVarAttrInMem = 0x00000004, - //! Variable is accessed through memory on output. - kVarAttrOutMem = 0x00000008, - //! Variable is accessed through memory on input & output. - kVarAttrInOutMem = 0x0000000C, - - //! Register allocator can decide if input will be in register or memory. - kVarAttrInDecide = 0x00000010, - //! Register allocator can decide if output will be in register or memory. - kVarAttrOutDecide = 0x00000020, - //! Register allocator can decide if in/out will be in register or memory. - kVarAttrInOutDecide = 0x00000030, - - //! Variable is converted to other type/class on the input. - kVarAttrInConv = 0x00000040, - //! Variable is converted from other type/class on the output. - kVarAttrOutConv = 0x00000080, - //! Combination of `kVarAttrInConv` and `kVarAttrOutConv`. - kVarAttrInOutConv = 0x000000C0, - - //! Variable is a function call operand. - kVarAttrInCall = 0x00000100, - //! Variable is a function argument passed in register. - kVarAttrInArg = 0x00000200, - - //! Variable is a function return value passed in register. - kVarAttrOutRet = 0x00000400, - //! Variable should be spilled. - kVarAttrSpill = 0x00000800, - //! Variable should be unused at the end of the instruction/node. - kVarAttrUnuse = 0x00001000, - - //! \internal - //! - //! All in-flags. - kVarAttrInAll = - kVarAttrInReg | - kVarAttrInMem | - kVarAttrInDecide | - kVarAttrInCall | - kVarAttrInArg, - - //! \internal - //! - //! All out-flags. - kVarAttrOutAll = - kVarAttrOutReg | - kVarAttrOutMem | - kVarAttrOutDecide | - kVarAttrOutRet, - - //! Variable is already allocated on the input. - kVarAttrAllocInDone = 0x00400000, - //! Variable is already allocated on the output. - kVarAttrAllocOutDone = 0x00800000 -}; - -// ============================================================================ -// [asmjit::VarHint] -// ============================================================================ - -//! Variable hint (used by `Compiler)`. -//! -//! \sa Compiler. -ASMJIT_ENUM(VarHint) { - //! Alloc variable. - kVarHintAlloc = 0, - //! Spill variable. - kVarHintSpill = 1, - //! Save variable if modified. - kVarHintSave = 2, - //! Save variable if modified and mark it as unused. - kVarHintSaveAndUnuse = 3, - //! Mark variable as unused. - kVarHintUnuse = 4 -}; - -// ============================================================================ -// [asmjit::kVarState] -// ============================================================================ - -// TODO: Rename `kVarState` or `VarState`. - -//! State of variable. -//! -//! \note Variable states are used only during register allocation. -ASMJIT_ENUM(kVarState) { - //! Variable is currently not used. - kVarStateNone = 0, - //! Variable is currently allocated in register. - kVarStateReg = 1, - //! Variable is currently allocated in memory (or has been spilled). - kVarStateMem = 2 -}; - -// ============================================================================ -// [asmjit::FuncConv] -// ============================================================================ - -//! Function calling convention. -//! -//! For a platform specific calling conventions, see: -//! - `X86FuncConv` - X86/X64 calling conventions. -ASMJIT_ENUM(FuncConv) { - //! Calling convention is invalid (can't be used). - kFuncConvNone = 0 - -#if defined(ASMJIT_DOCGEN) - , - - //! Default calling convention for current platform / operating system. - kFuncConvHost = DependsOnHost, - - //! Default C calling convention based on current compiler's settings. - kFuncConvHostCDecl = DependsOnHost, - - //! Compatibility for `__stdcall` calling convention. - //! - //! \note This enumeration is always set to a value which is compatible with - //! current compilers __stdcall calling convention. In 64-bit mode the value - //! is compatible with `kX86FuncConvW64` or `kX86FuncConvU64`. - kFuncConvHostStdCall = DependsOnHost, - - //! Compatibility for `__fastcall` calling convention. - //! - //! \note This enumeration is always set to a value which is compatible with - //! current compilers `__fastcall` calling convention. In 64-bit mode the value - //! is compatible with `kX86FuncConvW64` or `kX86FuncConvU64`. - kFuncConvHostFastCall = DependsOnHost -#endif // ASMJIT_DOCGEN -}; - -// ============================================================================ -// [asmjit::FuncHint] -// ============================================================================ - -//! Function hints. -//! -//! For a platform specific calling conventions, see: -//! - `X86FuncHint` - X86/X64 function hints. -ASMJIT_ENUM(FuncHint) { - //! Make a naked function (default true). - //! - //! Naked function is function without using standard prolog/epilog sequence). - //! - //! X86/X64 Specific - //! ---------------- - //! - //! Common prolog sequence is: - //! - //! ~~~ - //! push zbp - //! mov zsp, zbp - //! sub zsp, StackAdjustment - //! ~~~ - //! - //! which is an equivalent to: - //! - //! ~~~ - //! enter StackAdjustment, 0 - //! ~~~ - //! - //! Common epilog sequence is: - //! - //! ~~~ - //! mov zsp, zbp - //! pop zbp - //! ~~~ - //! - //! which is an equavalent to: - //! - //! ~~~ - //! leave - //! ~~~ - //! - //! Naked functions can omit the prolog/epilog sequence. The advantage of - //! doing such modification is that EBP/RBP register can be used by the - //! register allocator which can result in less spills/allocs. - kFuncHintNaked = 0, - - //! Generate compact function prolog/epilog if possible. - //! - //! X86/X64 Specific - //! ---------------- - //! - //! Use shorter, but possible slower prolog/epilog sequence to save/restore - //! registers. - kFuncHintCompact = 1 -}; - -// ============================================================================ -// [asmjit::FuncFlags] -// ============================================================================ - -//! Function flags. -//! -//! For a platform specific calling conventions, see: -//! - `X86FuncFlags` - X86/X64 function flags. -ASMJIT_ENUM(FuncFlags) { - //! Whether the function is using naked (minimal) prolog / epilog. - kFuncFlagIsNaked = 0x00000001, - - //! Whether an another function is called from this function. - kFuncFlagIsCaller = 0x00000002, - - //! Whether the stack is not aligned to the required stack alignment, - //! thus it has to be aligned manually. - kFuncFlagIsStackMisaligned = 0x00000004, - - //! Whether the stack pointer is adjusted by the stack size needed - //! to save registers and function variables. - //! - //! X86/X64 Specific - //! ---------------- - //! - //! Stack pointer (ESP/RSP) is adjusted by 'sub' instruction in prolog and by - //! 'add' instruction in epilog (only if function is not naked). If function - //! needs to perform manual stack alignment more instructions are used to - //! adjust the stack (like "and zsp, -Alignment"). - kFuncFlagIsStackAdjusted = 0x00000008, - - //! Whether the function is finished using `Compiler::endFunc()`. - kFuncFlagIsFinished = 0x80000000 -}; - -// ============================================================================ -// [asmjit::FuncDir] -// ============================================================================ - -//! Function arguments direction. -ASMJIT_ENUM(FuncDir) { - //! Arguments are passed left to right. - //! - //! This arguments direction is unusual in C, however it's used in Pascal. - kFuncDirLtr = 0, - - //! Arguments are passed right ro left - //! - //! This is the default argument direction in C. - kFuncDirRtl = 1 -}; - -// ============================================================================ -// [asmjit::FuncArgIndex] -// ============================================================================ - -//! Function argument index (lo/hi). -ASMJIT_ENUM(FuncArgIndex) { - //! Maxumum number of function arguments supported by AsmJit. - kFuncArgCount = 16, - //! Extended maximum number of arguments (used internally). - kFuncArgCountLoHi = kFuncArgCount * 2, - - //! Index to the LO part of function argument (default). - //! - //! This value is typically omitted and added only if there is HI argument - //! accessed. - kFuncArgLo = 0, - //! Index to the HI part of function argument. - //! - //! HI part of function argument depends on target architecture. On x86 it's - //! typically used to transfer 64-bit integers (they form a pair of 32-bit - //! integers). - kFuncArgHi = kFuncArgCount -}; - -// ============================================================================ -// [asmjit::FuncRet] -// ============================================================================ - -//! Function return value (lo/hi) specification. -ASMJIT_ENUM(FuncRet) { - //! Index to the LO part of function return value. - kFuncRetLo = 0, - //! Index to the HI part of function return value. - kFuncRetHi = 1 -}; - -// ============================================================================ -// [asmjit::kFuncStackInvalid] -// ============================================================================ - -enum { - //! Invalid stack offset in function or function parameter. - kFuncStackInvalid = -1 -}; - -// ============================================================================ -// [asmjit::NodeType] -// ============================================================================ - -//! Type of node, see \ref Node. -ASMJIT_ENUM(NodeType) { - //! Invalid node (internal, can't be used). - kNodeTypeNone = 0, - //! Node is an .align directive, see \ref AlignNode. - kNodeTypeAlign, - //! Node is an embedded data, see \ref EmbedNode. - kNodeTypeEmbed, - //! Node is a comment, see \ref CommentNode. - kNodeTypeComment, - //! Node is a variable hint (alloc, spill, use, unuse), see \ref HintNode. - kNodeTypeHint, - //! Node is a label, see \ref TargetNode. - kNodeTypeTarget, - //! Node is an instruction, see \ref InstNode. - kNodeTypeInst, - //! Node is a function declaration, see \ref FuncNode. - kNodeTypeFunc, - //! Node is an end of the function, see \ref EndNode. - kNodeTypeEnd, - //! Node is a return, see \ref RetNode. - kNodeTypeRet, - //! Node is a function call, see \ref CallNode. - kNodeTypeCall, - //! Node is a function call argument moved on stack, see \ref SArgNode. - kNodeTypeSArg -}; - -// ============================================================================ -// [asmjit::NodeFlags] -// ============================================================================ - -ASMJIT_ENUM(NodeFlags) { - //! Whether the node has been translated, thus contains only registers. - kNodeFlagIsTranslated = 0x0001, - - //! Whether the node was scheduled - possibly reordered, but basically this - //! is a mark that is set by scheduler after the node has been visited. - kNodeFlagIsScheduled = 0x0002, - - //! Whether the node is informative only and can be safely removed. - kNodeFlagIsInformative = 0x0004, - - //! Whether the `InstNode` is a jump. - kNodeFlagIsJmp = 0x0008, - //! Whether the `InstNode` is a conditional jump. - kNodeFlagIsJcc = 0x0010, - - //! Whether the `InstNode` is an unconditinal jump or conditional - //! jump that is likely to be taken. - kNodeFlagIsTaken = 0x0020, - - //! Whether the `Node` will return from a function. - //! - //! This flag is used by both `EndNode` and `RetNode`. - kNodeFlagIsRet = 0x0040, - - //! Whether the instruction is special. - kNodeFlagIsSpecial = 0x0080, - - //! Whether the instruction is an FPU instruction. - kNodeFlagIsFp = 0x0100 -}; - -// ============================================================================ -// [asmjit::MemCell] -// ============================================================================ - -struct MemCell { - ASMJIT_NO_COPY(MemCell) - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get cell offset. - ASMJIT_INLINE int32_t getOffset() const { return _offset; } - //! Set cell offset. - ASMJIT_INLINE void setOffset(int32_t offset) { _offset = offset; } - - //! Get cell size. - ASMJIT_INLINE uint32_t getSize() const { return _size; } - //! Set cell size. - ASMJIT_INLINE void setSize(uint32_t size) { _size = size; } - - //! Get cell alignment. - ASMJIT_INLINE uint32_t getAlignment() const { return _alignment; } - //! Set cell alignment. - ASMJIT_INLINE void setAlignment(uint32_t alignment) { _alignment = alignment; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Next active cell. - MemCell* _next; - - //! Offset, relative to base-offset. - int32_t _offset; - //! Size. - uint32_t _size; - //! Alignment. - uint32_t _alignment; -}; - -// ============================================================================ -// [asmjit::Var] -// ============================================================================ - -//! Base class for all variables. -struct Var : public Operand { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE Var() : Operand(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeVar, 0, 0, 0, kInvalidValue); - _init_packed_d2_d3(kInvalidValue, kInvalidValue); - } - - ASMJIT_INLINE Var(const Var& other) : Operand(other) {} - - explicit ASMJIT_INLINE Var(const _NoInit&) : Operand(NoInit) {} - - // -------------------------------------------------------------------------- - // [Var Specific] - // -------------------------------------------------------------------------- - - //! Clone `Var` operand. - ASMJIT_INLINE Var clone() const { - return Var(*this); - } - - //! Reset Var operand. - ASMJIT_INLINE void reset() { - _init_packed_op_sz_b0_b1_id(kOperandTypeVar, 0, kInvalidReg, kInvalidReg, kInvalidValue); - _init_packed_d2_d3(kInvalidValue, kInvalidValue); - } - - //! Get whether the variable has been initialized by `Compiler`. - ASMJIT_INLINE bool isInitialized() const { - return _vreg.id != kInvalidValue; - } - - //! Get variable type. - ASMJIT_INLINE uint32_t getVarType() const { - return _vreg.vType; - } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE Var& operator=(const Var& other) { _copy(other); return *this; } - - ASMJIT_INLINE bool operator==(const Var& other) const { return _packed[0] == other._packed[0]; } - ASMJIT_INLINE bool operator!=(const Var& other) const { return !operator==(other); } -}; - -// ============================================================================ -// [asmjit::VarBits] -// ============================================================================ - -//! Bit-array used by variable-liveness analysis. -struct VarBits { - // -------------------------------------------------------------------------- - // [Enums] - // -------------------------------------------------------------------------- - - enum { - kEntitySize = static_cast(sizeof(uintptr_t)), - kEntityBits = kEntitySize * 8 - }; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE uintptr_t getBit(uint32_t index) const { - return (data[index / kEntityBits] >> (index % kEntityBits)) & 1; - } - - ASMJIT_INLINE void setBit(uint32_t index) { - data[index / kEntityBits] |= static_cast(1) << (index % kEntityBits); - } - - ASMJIT_INLINE void delBit(uint32_t index) { - data[index / kEntityBits] &= ~(static_cast(1) << (index % kEntityBits)); - } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool copyBits(const VarBits* s0, uint32_t len) { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t t = s0->data[i]; - data[i] = t; - r |= t; - } - return r != 0; - } - - ASMJIT_INLINE bool addBits(const VarBits* s0, uint32_t len) { - return addBits(this, s0, len); - } - - ASMJIT_INLINE bool addBits(const VarBits* s0, const VarBits* s1, uint32_t len) { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t t = s0->data[i] | s1->data[i]; - data[i] = t; - r |= t; - } - return r != 0; - } - - ASMJIT_INLINE bool andBits(const VarBits* s1, uint32_t len) { - return andBits(this, s1, len); - } - - ASMJIT_INLINE bool andBits(const VarBits* s0, const VarBits* s1, uint32_t len) { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t t = s0->data[i] & s1->data[i]; - data[i] = t; - r |= t; - } - return r != 0; - } - - ASMJIT_INLINE bool delBits(const VarBits* s1, uint32_t len) { - return delBits(this, s1, len); - } - - ASMJIT_INLINE bool delBits(const VarBits* s0, const VarBits* s1, uint32_t len) { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t t = s0->data[i] & ~s1->data[i]; - data[i] = t; - r |= t; - } - return r != 0; - } - - ASMJIT_INLINE bool _addBitsDelSource(VarBits* s1, uint32_t len) { - return _addBitsDelSource(this, s1, len); - } - - ASMJIT_INLINE bool _addBitsDelSource(const VarBits* s0, VarBits* s1, uint32_t len) { - uintptr_t r = 0; - for (uint32_t i = 0; i < len; i++) { - uintptr_t a = s0->data[i]; - uintptr_t b = s1->data[i]; - - this->data[i] = a | b; - b &= ~a; - - s1->data[i] = b; - r |= b; - } - return r != 0; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uintptr_t data[1]; -}; - -// ============================================================================ -// [asmjit::VarData] -// ============================================================================ - -//! Base variable data. -struct VarData { - // -------------------------------------------------------------------------- - // [Accessors - Base] - // -------------------------------------------------------------------------- - - //! Get variable name. - ASMJIT_INLINE const char* getName() const { - return _name; - } - - //! Get variable id. - ASMJIT_INLINE uint32_t getId() const { - return _id; - } - - //! Get variable type. - ASMJIT_INLINE uint32_t getType() const { - return _type; - } - - //! Get variable class. - ASMJIT_INLINE uint32_t getClass() const { - return _class; - } - - // -------------------------------------------------------------------------- - // [Accessors - ContextId] - // -------------------------------------------------------------------------- - - //! Get whether the variable has context id. - ASMJIT_INLINE bool hasContextId() const { - return _contextId != kInvalidValue; - } - - //! Get context variable id (used only by `Context)`. - ASMJIT_INLINE uint32_t getContextId() const { - return _contextId; - } - - //! Set context variable id (used only by `Context)`. - ASMJIT_INLINE void setContextId(uint32_t contextId) { - _contextId = contextId; - } - - //! Reset context variable id (used only by `Context)`. - ASMJIT_INLINE void resetContextId() { - _contextId = kInvalidValue; - } - - // -------------------------------------------------------------------------- - // [Accessors - Priority] - // -------------------------------------------------------------------------- - - //! Get variable priority, used by compiler to decide which variable to spill. - ASMJIT_INLINE uint32_t getPriority() const { - return _priority; - } - - //! Set variable priority. - ASMJIT_INLINE void setPriority(uint32_t priority) { - ASMJIT_ASSERT(priority <= 0xFF); - _priority = static_cast(priority); - } - - // -------------------------------------------------------------------------- - // [Accessors - State] - // -------------------------------------------------------------------------- - - //! Get variable state, only used by `Context`. - ASMJIT_INLINE uint32_t getState() const { - return _state; - } - - //! Set variable state, only used by `Context`. - ASMJIT_INLINE void setState(uint32_t state) { - ASMJIT_ASSERT(state <= 0xFF); - _state = static_cast(state); - } - - // -------------------------------------------------------------------------- - // [Accessors - RegIndex] - // -------------------------------------------------------------------------- - - //! Get register index. - ASMJIT_INLINE uint32_t getRegIndex() const { - return _regIndex; - } - - //! Set register index. - ASMJIT_INLINE void setRegIndex(uint32_t regIndex) { - ASMJIT_ASSERT(regIndex <= 0xFF); - _regIndex = static_cast(regIndex); - } - - //! Reset register index. - ASMJIT_INLINE void resetRegIndex() { - _regIndex = static_cast(kInvalidReg); - } - - // -------------------------------------------------------------------------- - // [Accessors - HomeIndex/Mask] - // -------------------------------------------------------------------------- - - //! Get home registers mask. - ASMJIT_INLINE uint32_t getHomeMask() const { - return _homeMask; - } - - //! Add a home register index to the home registers mask. - ASMJIT_INLINE void addHomeIndex(uint32_t regIndex) { - _homeMask |= IntUtil::mask(regIndex); - } - - // -------------------------------------------------------------------------- - // [Accessors - Flags] - // -------------------------------------------------------------------------- - - //! Get variable flags. - ASMJIT_INLINE uint32_t getFlags() const { - return _flags; - } - - //! Get whether the VarData is only memory allocated on the stack. - ASMJIT_INLINE bool isStack() const { return static_cast(_isStack); } - - //! Get whether the variable is a function argument passed through memory. - ASMJIT_INLINE bool isMemArg() const { return static_cast(_isMemArg); } - - //! Get variable content can be calculated by a simple instruction. - ASMJIT_INLINE bool isCalculated() const { return static_cast(_isCalculated); } - //! Get whether to save variable when it's unused (spill). - ASMJIT_INLINE bool saveOnUnuse() const { return static_cast(_saveOnUnuse); } - - //! Get whether the variable was changed. - ASMJIT_INLINE bool isModified() const { return static_cast(_modified); } - //! Set whether the variable was changed. - ASMJIT_INLINE void setModified(bool modified) { _modified = modified; } - - //! Get variable alignment. - ASMJIT_INLINE uint32_t getAlignment() const { return _alignment; } - //! Get variable size. - ASMJIT_INLINE uint32_t getSize() const { return _size; } - - //! Get home memory offset. - ASMJIT_INLINE int32_t getMemOffset() const { return _memOffset; } - //! Set home memory offset. - ASMJIT_INLINE void setMemOffset(int32_t offset) { _memOffset = offset; } - - //! Get home memory cell. - ASMJIT_INLINE MemCell* getMemCell() const { return _memCell; } - //! Set home memory cell. - ASMJIT_INLINE void setMemCell(MemCell* cell) { _memCell = cell; } - - // -------------------------------------------------------------------------- - // [Accessors - Temporary Usage] - // -------------------------------------------------------------------------- - - //! Get temporary VarAttr. - ASMJIT_INLINE VarAttr* getVa() const { return _va; } - //! Set temporary VarAttr. - ASMJIT_INLINE void setVa(VarAttr* va) { _va = va; } - //! Reset temporary VarAttr. - ASMJIT_INLINE void resetVa() { _va = NULL; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Variable name. - const char* _name; - - //! Variable id. - uint32_t _id; - //! Context variable id, used by `Context` only, initially `kInvalidValue`. - uint32_t _contextId; - - //! Variable type. - uint8_t _type; - //! Variable class. - uint8_t _class; - //! Variable flags. - uint8_t _flags; - //! Variable priority. - uint8_t _priority; - - //! Variable state (connected with actual `VarState)`. - uint8_t _state; - //! Actual register index (only used by `Context)`, during translate. - uint8_t _regIndex; - - //! Whether the variable is only used as memory allocated on the stack. - uint8_t _isStack : 1; - //! Whether the variable is a function argument passed through memory. - uint8_t _isMemArg : 1; - //! Whether variable content can be calculated by a simple instruction. - //! - //! This is used mainly by MMX and SSE2 code. This flag indicates that - //! register allocator should never reserve memory for this variable, because - //! the content can be generated by a single instruction (for example PXOR). - uint8_t _isCalculated : 1; - //! Save on unuse (at end of the variable scope). - uint8_t _saveOnUnuse : 1; - //! Whether variable was changed (connected with actual `VarState)`. - uint8_t _modified : 1; - //! \internal - uint8_t _reserved0 : 3; - //! Variable natural alignment. - uint8_t _alignment; - - //! Variable size. - uint32_t _size; - - //! Mask of all registers variable has been allocated to. - uint32_t _homeMask; - - //! Home memory offset. - int32_t _memOffset; - //! Home memory cell, used by `Context` (initially NULL). - MemCell* _memCell; - - //! Register read access statistics. - uint32_t rReadCount; - //! Register write access statistics. - uint32_t rWriteCount; - - //! Memory read statistics. - uint32_t mReadCount; - //! Memory write statistics. - uint32_t mWriteCount; - - // -------------------------------------------------------------------------- - // [Members - Temporary Usage] - // -------------------------------------------------------------------------- - - // These variables are only used during register allocation. They are - // initialized by init() phase and reset by cleanup() phase. - - union { - //! Temporary link to VarAttr* used by the `Context` used in - //! various phases, but always set back to NULL when finished. - //! - //! This temporary data is designed to be used by algorithms that need to - //! store some data into variables themselves during compilation. But it's - //! expected that after variable is compiled & translated the data is set - //! back to zero/null. Initial value is NULL. - VarAttr* _va; - - //! \internal - //! - //! Same as `_va` just provided as `uintptr_t`. - uintptr_t _vaUInt; - }; -}; - -// ============================================================================ -// [asmjit::VarAttr] -// ============================================================================ - -struct VarAttr { - // -------------------------------------------------------------------------- - // [Setup] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void setup(VarData* vd, uint32_t flags = 0, uint32_t inRegs = 0, uint32_t allocableRegs = 0) { - _vd = vd; - _flags = flags; - _varCount = 0; - _inRegIndex = kInvalidReg; - _outRegIndex = kInvalidReg; - _reserved = 0; - _inRegs = inRegs; - _allocableRegs = allocableRegs; - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get VarData. - ASMJIT_INLINE VarData* getVd() const { return _vd; } - //! Set VarData. - ASMJIT_INLINE void setVd(VarData* vd) { _vd = vd; } - - //! Get flags. - ASMJIT_INLINE uint32_t getFlags() const { return _flags; } - //! Set flags. - ASMJIT_INLINE void setFlags(uint32_t flags) { _flags = flags; } - - //! Get whether `flag` is on. - ASMJIT_INLINE bool hasFlag(uint32_t flag) { return (_flags & flag) != 0; } - //! Add `flags`. - ASMJIT_INLINE void orFlags(uint32_t flags) { _flags |= flags; } - //! Mask `flags`. - ASMJIT_INLINE void andFlags(uint32_t flags) { _flags &= flags; } - //! Clear `flags`. - ASMJIT_INLINE void andNotFlags(uint32_t flags) { _flags &= ~flags; } - - //! Get how many times the variable is used by the instruction/node. - ASMJIT_INLINE uint32_t getVarCount() const { return _varCount; } - //! Set how many times the variable is used by the instruction/node. - ASMJIT_INLINE void setVarCount(uint32_t count) { _varCount = static_cast(count); } - //! Add how many times the variable is used by the instruction/node. - ASMJIT_INLINE void addVarCount(uint32_t count = 1) { _varCount += static_cast(count); } - - //! Get whether the variable has to be allocated in a specific input register. - ASMJIT_INLINE uint32_t hasInRegIndex() const { return _inRegIndex != kInvalidReg; } - //! Get the input register index or `kInvalidReg`. - ASMJIT_INLINE uint32_t getInRegIndex() const { return _inRegIndex; } - //! Set the input register index. - ASMJIT_INLINE void setInRegIndex(uint32_t index) { _inRegIndex = static_cast(index); } - //! Reset the input register index. - ASMJIT_INLINE void resetInRegIndex() { _inRegIndex = kInvalidReg; } - - //! Get whether the variable has to be allocated in a specific output register. - ASMJIT_INLINE uint32_t hasOutRegIndex() const { return _outRegIndex != kInvalidReg; } - //! Get the output register index or `kInvalidReg`. - ASMJIT_INLINE uint32_t getOutRegIndex() const { return _outRegIndex; } - //! Set the output register index. - ASMJIT_INLINE void setOutRegIndex(uint32_t index) { _outRegIndex = static_cast(index); } - //! Reset the output register index. - ASMJIT_INLINE void resetOutRegIndex() { _outRegIndex = kInvalidReg; } - - //! Get whether the mandatory input registers are in used. - ASMJIT_INLINE bool hasInRegs() const { return _inRegs != 0; } - //! Get mandatory input registers (mask). - ASMJIT_INLINE uint32_t getInRegs() const { return _inRegs; } - //! Set mandatory input registers (mask). - ASMJIT_INLINE void setInRegs(uint32_t mask) { _inRegs = mask; } - //! Add mandatory input registers (mask). - ASMJIT_INLINE void addInRegs(uint32_t mask) { _inRegs |= mask; } - //! And mandatory input registers (mask). - ASMJIT_INLINE void andInRegs(uint32_t mask) { _inRegs &= mask; } - //! Clear mandatory input registers (mask). - ASMJIT_INLINE void delInRegs(uint32_t mask) { _inRegs &= ~mask; } - - //! Get allocable input registers (mask). - ASMJIT_INLINE uint32_t getAllocableRegs() const { return _allocableRegs; } - //! Set allocable input registers (mask). - ASMJIT_INLINE void setAllocableRegs(uint32_t mask) { _allocableRegs = mask; } - //! Add allocable input registers (mask). - ASMJIT_INLINE void addAllocableRegs(uint32_t mask) { _allocableRegs |= mask; } - //! And allocable input registers (mask). - ASMJIT_INLINE void andAllocableRegs(uint32_t mask) { _allocableRegs &= mask; } - //! Clear allocable input registers (mask). - ASMJIT_INLINE void delAllocableRegs(uint32_t mask) { _allocableRegs &= ~mask; } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE VarAttr& operator=(const VarAttr& other) { - ::memcpy(this, &other, sizeof(VarAttr)); - return *this; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - VarData* _vd; - //! Flags. - uint32_t _flags; - - union { - struct { - //! How many times the variable is used by the instruction/node. - uint8_t _varCount; - //! Input register index or `kInvalidReg` if it's not given. - //! - //! Even if the input register index is not given (i.e. it may by any - //! register), register allocator should assign an index that will be - //! used to persist a variable into this specific index. It's helpful - //! in situations where one variable has to be allocated in multiple - //! registers to determine the register which will be persistent. - uint8_t _inRegIndex; - //! Output register index or `kInvalidReg` if it's not given. - //! - //! Typically `kInvalidReg` if variable is only used on input. - uint8_t _outRegIndex; - //! \internal - uint8_t _reserved; - }; - - //! \internal - //! - //! Packed data #0. - uint32_t _packed; - }; - - //! Mandatory input registers. - //! - //! Mandatory input registers are required by the instruction even if - //! there are duplicates. This schema allows us to allocate one variable - //! in one or more register when needed. Required mostly by instructions - //! that have implicit register operands (imul, cpuid, ...) and function - //! call. - uint32_t _inRegs; - - //! Allocable input registers. - //! - //! Optional input registers is a mask of all allocable registers for a given - //! variable where we have to pick one of them. This mask is usually not used - //! when _inRegs is set. If both masks are used then the register - //! allocator tries first to find an intersection between these and allocates - //! an extra slot if not found. - uint32_t _allocableRegs; -}; - -// ============================================================================ -// [asmjit::VarMap] -// ============================================================================ - -//! Variables' map related to a single node (instruction / other node). -struct VarMap { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get count of variables (all). - ASMJIT_INLINE uint32_t getVaCount() const { - return _vaCount; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Variables count. - uint32_t _vaCount; -}; - -// ============================================================================ -// [asmjit::VarState] -// ============================================================================ - -//! Variables' state. -struct VarState {}; - -// ============================================================================ -// [asmjit::TypeId / VarMapping] -// ============================================================================ - -//! Function builder 'void' type. -struct Void {}; - -//! Function builder 'int8_t' type. -struct Int8Type {}; -//! Function builder 'uint8_t' type. -struct UInt8Type {}; - -//! Function builder 'int16_t' type. -struct Int16Type {}; -//! Function builder 'uint16_t' type. -struct UInt16Type {}; - -//! Function builder 'int32_t' type. -struct Int32Type {}; -//! Function builder 'uint32_t' type. -struct UInt32Type {}; - -//! Function builder 'int64_t' type. -struct Int64Type {}; -//! Function builder 'uint64_t' type. -struct UInt64Type {}; - -//! Function builder 'intptr_t' type. -struct IntPtrType {}; -//! Function builder 'uintptr_t' type. -struct UIntPtrType {}; - -//! Function builder 'float' type. -struct FloatType {}; -//! Function builder 'double' type. -struct DoubleType {}; - -#if !defined(ASMJIT_DOCGEN) -template -struct TypeId { - // Left empty to report any type, which is not known to asmjit. -}; - -template -struct TypeId { - enum { kId = kVarTypeIntPtr }; -}; - -#define ASMJIT_TYPE_ID(_T_, _Id_) \ - template<> \ - struct TypeId<_T_> { enum { kId = _Id_ }; } - -ASMJIT_TYPE_ID(void , kInvalidVar); -ASMJIT_TYPE_ID(char , IntTraits::kIsSigned ? kVarTypeInt8 : kVarTypeUInt8); -ASMJIT_TYPE_ID(signed char , kVarTypeInt8); -ASMJIT_TYPE_ID(unsigned char, kVarTypeUInt8); -ASMJIT_TYPE_ID(int16_t , kVarTypeInt16); -ASMJIT_TYPE_ID(uint16_t , kVarTypeUInt16); -ASMJIT_TYPE_ID(int32_t , kVarTypeInt32); -ASMJIT_TYPE_ID(uint32_t , kVarTypeUInt32); -ASMJIT_TYPE_ID(int64_t , kVarTypeInt64); -ASMJIT_TYPE_ID(uint64_t , kVarTypeUInt64); -ASMJIT_TYPE_ID(float , kVarTypeFp32); -ASMJIT_TYPE_ID(double , kVarTypeFp64); - -ASMJIT_TYPE_ID(Void , kInvalidVar); -ASMJIT_TYPE_ID(Int8Type , kVarTypeInt8); -ASMJIT_TYPE_ID(UInt8Type , kVarTypeUInt8); -ASMJIT_TYPE_ID(Int16Type , kVarTypeInt16); -ASMJIT_TYPE_ID(UInt16Type , kVarTypeUInt16); -ASMJIT_TYPE_ID(Int32Type , kVarTypeInt32); -ASMJIT_TYPE_ID(UInt32Type , kVarTypeUInt32); -ASMJIT_TYPE_ID(Int64Type , kVarTypeInt64); -ASMJIT_TYPE_ID(UInt64Type , kVarTypeUInt64); -ASMJIT_TYPE_ID(IntPtrType , kVarTypeIntPtr); -ASMJIT_TYPE_ID(UIntPtrType , kVarTypeUIntPtr); -ASMJIT_TYPE_ID(FloatType , kVarTypeFp32); -ASMJIT_TYPE_ID(DoubleType , kVarTypeFp64); -#endif // !ASMJIT_DOCGEN - -// ============================================================================ -// [asmjit::FuncInOut] -// ============================================================================ - -//! Function in/out - argument or return value translated from `FuncPrototype`. -struct FuncInOut { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE uint32_t getVarType() const { return _varType; } - - ASMJIT_INLINE bool hasRegIndex() const { return _regIndex != kInvalidReg; } - ASMJIT_INLINE uint32_t getRegIndex() const { return _regIndex; } - - ASMJIT_INLINE bool hasStackOffset() const { return _stackOffset != kFuncStackInvalid; } - ASMJIT_INLINE int32_t getStackOffset() const { return static_cast(_stackOffset); } - - //! Get whether the argument / return value is assigned. - ASMJIT_INLINE bool isSet() const { - return (_regIndex != kInvalidReg) | (_stackOffset != kFuncStackInvalid); - } - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - //! Reset the function argument to "unassigned state". - ASMJIT_INLINE void reset() { _packed = 0xFFFFFFFF; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - union { - struct { - //! Variable type, see `VarType`. - uint8_t _varType; - //! Register index if argument / return value is a register. - uint8_t _regIndex; - //! Stack offset if argument / return value is on the stack. - int16_t _stackOffset; - }; - - //! All members packed into single 32-bit integer. - uint32_t _packed; - }; -}; - -// ============================================================================ -// [asmjit::FuncPrototype] -// ============================================================================ - -//! Function prototype. -//! -//! Function prototype contains information about function return type, count -//! of arguments and their types. Function prototype is a low level structure -//! which doesn't contain platform specific or calling convention specific -//! information. Function prototype is used to create a `FuncDecl`. -struct FuncPrototype { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get function return value. - ASMJIT_INLINE uint32_t getRet() const { return _ret; } - - //! Get function arguments' IDs. - ASMJIT_INLINE const uint32_t* getArgList() const { return _argList; } - //! Get count of function arguments. - ASMJIT_INLINE uint32_t getArgCount() const { return _argCount; } - - //! Get argument at index `id`. - ASMJIT_INLINE uint32_t getArg(uint32_t id) const { - ASMJIT_ASSERT(id < _argCount); - return _argList[id]; - } - - //! Set function definition - return type and arguments. - ASMJIT_INLINE void _setPrototype(uint32_t ret, const uint32_t* argList, uint32_t argCount) { - _ret = ret; - _argList = argList; - _argCount = argCount; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint32_t _ret; - uint32_t _argCount; - const uint32_t* _argList; -}; - -// ============================================================================ -// [asmjit::FuncBuilderX] -// ============================================================================ - -//! Custom function builder for up to 32 function arguments. -struct FuncBuilderX : public FuncPrototype { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE FuncBuilderX() { - _setPrototype(kInvalidVar, _builderArgList, 0); - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Set return type to `retType`. - ASMJIT_INLINE void setRet(uint32_t retType) { - _ret = retType; - } - - ASMJIT_INLINE void setArg(uint32_t id, uint32_t type) { - ASMJIT_ASSERT(id < _argCount); - _builderArgList[id] = type; - } - - ASMJIT_INLINE void addArg(uint32_t type) { - ASMJIT_ASSERT(_argCount < kFuncArgCount); - _builderArgList[_argCount++] = type; - } - - template - ASMJIT_INLINE void setRetT() { - setRet(TypeId::kId); - } - - template - ASMJIT_INLINE void setArgT(uint32_t id) { - setArg(id, TypeId::kId); - } - - template - ASMJIT_INLINE void addArgT() { - addArg(TypeId::kId); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint32_t _builderArgList[kFuncArgCount]; -}; - -//! \internal -#define T(_Type_) TypeId<_Type_>::kId - -//! Function prototype (no args). -template -struct FuncBuilder0 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder0() { - _setPrototype(T(RET), NULL, 0); - } -}; - -//! Function prototype (1 argument). -template -struct FuncBuilder1 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder1() { - static const uint32_t args[] = { T(P0) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (2 arguments). -template -struct FuncBuilder2 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder2() { - static const uint32_t args[] = { T(P0), T(P1) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (3 arguments). -template -struct FuncBuilder3 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder3() { - static const uint32_t args[] = { T(P0), T(P1), T(P2) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (4 arguments). -template -struct FuncBuilder4 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder4() { - static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (5 arguments). -template -struct FuncBuilder5 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder5() { - static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (6 arguments). -template -struct FuncBuilder6 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder6() { - static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (7 arguments). -template -struct FuncBuilder7 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder7() { - static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (8 arguments). -template -struct FuncBuilder8 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder8() { - static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (9 arguments). -template -struct FuncBuilder9 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder9() { - static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7), T(P8) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -//! Function prototype (10 arguments). -template -struct FuncBuilder10 : public FuncPrototype { - ASMJIT_INLINE FuncBuilder10() { - static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7), T(P8), T(P9) }; - _setPrototype(T(RET), args, ASMJIT_ARRAY_SIZE(args)); - } -}; - -#undef T - -// ============================================================================ -// [asmjit::FuncDecl] -// ============================================================================ - -//! Function declaration. -struct FuncDecl { - // -------------------------------------------------------------------------- - // [Accessors - Calling Convention] - // -------------------------------------------------------------------------- - - //! Get function calling convention, see `FuncConv`. - ASMJIT_INLINE uint32_t getConvention() const { return _convention; } - - //! Get whether the callee pops the stack. - ASMJIT_INLINE uint32_t getCalleePopsStack() const { return _calleePopsStack; } - - //! Get direction of arguments passed on the stack. - //! - //! Direction should be always `kFuncDirRtl`. - //! - //! \note This is related to used calling convention, it's not affected by - //! number of function arguments or their types. - ASMJIT_INLINE uint32_t getDirection() const { return _direction; } - - //! Get stack size needed for function arguments passed on the stack. - ASMJIT_INLINE uint32_t getArgStackSize() const { return _argStackSize; } - //! Get size of "Red Zone". - ASMJIT_INLINE uint32_t getRedZoneSize() const { return _redZoneSize; } - //! Get size of "Spill Zone". - ASMJIT_INLINE uint32_t getSpillZoneSize() const { return _spillZoneSize; } - - // -------------------------------------------------------------------------- - // [Accessors - Arguments and Return] - // -------------------------------------------------------------------------- - - //! Get whether the function has a return value. - ASMJIT_INLINE bool hasRet() const { return _retCount != 0; } - //! Get count of function return values. - ASMJIT_INLINE uint32_t getRetCount() const { return _retCount; } - - //! Get function return value. - ASMJIT_INLINE FuncInOut& getRet(uint32_t index = kFuncRetLo) { return _retList[index]; } - //! Get function return value. - ASMJIT_INLINE const FuncInOut& getRet(uint32_t index = kFuncRetLo) const { return _retList[index]; } - - //! Get count of function arguments. - ASMJIT_INLINE uint32_t getArgCount() const { return _argCount; } - - //! Get function arguments array. - ASMJIT_INLINE FuncInOut* getArgList() { return _argList; } - //! Get function arguments array (const). - ASMJIT_INLINE const FuncInOut* getArgList() const { return _argList; } - - //! Get function argument at index `index`. - ASMJIT_INLINE FuncInOut& getArg(size_t index) { - ASMJIT_ASSERT(index < kFuncArgCountLoHi); - return _argList[index]; - } - - //! Get function argument at index `index`. - ASMJIT_INLINE const FuncInOut& getArg(size_t index) const { - ASMJIT_ASSERT(index < kFuncArgCountLoHi); - return _argList[index]; - } - - ASMJIT_INLINE void resetArg(size_t index) { - ASMJIT_ASSERT(index < kFuncArgCountLoHi); - _argList[index].reset(); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Calling convention. - uint8_t _convention; - //! Whether a callee pops stack. - uint8_t _calleePopsStack : 1; - //! Direction for arguments passed on the stack, see `FuncDir`. - uint8_t _direction : 1; - //! Reserved #0 (alignment). - uint8_t _reserved0 : 6; - - //! Count of arguments in `_argList`. - uint8_t _argCount; - //! Count of return value(s). - uint8_t _retCount; - - //! Count of bytes consumed by arguments on the stack (aligned). - uint32_t _argStackSize; - - //! Size of "Red Zone". - //! - //! \note Used by AMD64-ABI (128 bytes). - uint16_t _redZoneSize; - - //! Size of "Spill Zone". - //! - //! \note Used by WIN64-ABI (32 bytes). - uint16_t _spillZoneSize; - - //! Function arguments (including HI arguments) mapped to physical - //! registers and stack offset. - FuncInOut _argList[kFuncArgCountLoHi]; - - //! Function return value(s). - FuncInOut _retList[2]; -}; - -// ============================================================================ -// [asmjit::Node] -// ============================================================================ - -//! Base node. -//! -//! `Every` node represents an abstract instruction, directive, label, or -//! macro-instruction generated by compiler. -struct Node { - ASMJIT_NO_COPY(Node) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create new `Node`. - //! - //! \note Always use compiler to create nodes. - ASMJIT_INLINE Node(Compiler* compiler, uint32_t type); // Defined-Later. - - //! Destroy `Node`. - ASMJIT_INLINE ~Node() {} - - // -------------------------------------------------------------------------- - // [Accessors - List] - // -------------------------------------------------------------------------- - - //! Get previous node in the compiler stream. - ASMJIT_INLINE Node* getPrev() const { - return _prev; - } - - //! Get next node in the compiler stream. - ASMJIT_INLINE Node* getNext() const { - return _next; - } - - // -------------------------------------------------------------------------- - // [Accessors - Comment] - // -------------------------------------------------------------------------- - - //! Get comment string. - ASMJIT_INLINE const char* getComment() const { - return _comment; - } - - //! Set comment string to `str`. - ASMJIT_INLINE void setComment(const char* comment) { - _comment = comment; - } - - // -------------------------------------------------------------------------- - // [Accessors - Type and Flags] - // -------------------------------------------------------------------------- - - //! Get node type, see `NodeType`. - ASMJIT_INLINE uint32_t getType() const { - return _type; - } - - //! Get node flags. - ASMJIT_INLINE uint32_t getFlags() const { - return _flags; - } - - //! Get whether the instruction has flag `flag`. - ASMJIT_INLINE bool hasFlag(uint32_t flag) const { - return (static_cast(_flags) & flag) != 0; - } - - //! Set node flags to `flags`. - ASMJIT_INLINE void setFlags(uint32_t flags) { - _flags = static_cast(flags); - } - - //! Add instruction `flags`. - ASMJIT_INLINE void orFlags(uint32_t flags) { - _flags |= static_cast(flags); - } - - //! And instruction `flags`. - ASMJIT_INLINE void andFlags(uint32_t flags) { - _flags &= static_cast(flags); - } - - //! Clear instruction `flags`. - ASMJIT_INLINE void andNotFlags(uint32_t flags) { - _flags &= ~static_cast(flags); - } - - //! Get whether the node has beed fetched. - ASMJIT_INLINE bool isFetched() const { - return _flowId != 0; - } - - //! Get whether the node has been translated. - ASMJIT_INLINE bool isTranslated() const { - return hasFlag(kNodeFlagIsTranslated); - } - - //! Get whether the node has been translated. - ASMJIT_INLINE bool isScheduled() const { - return hasFlag(kNodeFlagIsScheduled); - } - - //! Get whether the node is informative only and can be safely removed after - //! translation. - //! - //! Informative nodes are comments and hints. - ASMJIT_INLINE bool isInformative() const { - return hasFlag(kNodeFlagIsInformative); - } - - //! Whether the node is `InstNode` and unconditional jump. - ASMJIT_INLINE bool isJmp() const { return hasFlag(kNodeFlagIsJmp); } - //! Whether the node is `InstNode` and conditional jump. - ASMJIT_INLINE bool isJcc() const { return hasFlag(kNodeFlagIsJcc); } - //! Whether the node is `InstNode` and conditional/unconditional jump. - ASMJIT_INLINE bool isJmpOrJcc() const { return hasFlag(kNodeFlagIsJmp | kNodeFlagIsJcc); } - //! Whether the node is `InstNode` and return. - ASMJIT_INLINE bool isRet() const { return hasFlag(kNodeFlagIsRet); } - - //! Get whether the node is `InstNode` and the instruction is special. - ASMJIT_INLINE bool isSpecial() const { return hasFlag(kNodeFlagIsSpecial); } - //! Get whether the node is `InstNode` and the instruction uses x87-FPU. - ASMJIT_INLINE bool isFp() const { return hasFlag(kNodeFlagIsFp); } - - // -------------------------------------------------------------------------- - // [Accessors - FlowId] - // -------------------------------------------------------------------------- - - //! Get flow index. - ASMJIT_INLINE uint32_t getFlowId() const { return _flowId; } - //! Set flow index. - ASMJIT_INLINE void setFlowId(uint32_t flowId) { _flowId = flowId; } - - // -------------------------------------------------------------------------- - // [Accessors - VarMap] - // -------------------------------------------------------------------------- - - //! Get whether node contains variable allocation instructions. - ASMJIT_INLINE bool hasMap() const { - return _map != NULL; - } - - //! Get variable allocation instructions. - ASMJIT_INLINE VarMap* getMap() const { - return _map; - } - - //! Get variable allocation instructions casted to `T*`. - template - ASMJIT_INLINE T* getMap() const { - return static_cast(_map); - } - - //! Set variable allocation instructions. - ASMJIT_INLINE void setMap(VarMap* map) { - _map = map; - } - - // -------------------------------------------------------------------------- - // [Accessors - VarState] - // -------------------------------------------------------------------------- - - //! Get node state. - ASMJIT_INLINE VarState* getState() const { - return _state; - } - - //! Get node state casted to `T*`. - template - ASMJIT_INLINE T* getState() const { - return static_cast(_state); - } - - //! Set node state. - ASMJIT_INLINE void setState(VarState* state) { - _state = state; - } - - // -------------------------------------------------------------------------- - // [Accessors - Liveness] - // -------------------------------------------------------------------------- - - //! Get whether the node has variable liveness bits. - ASMJIT_INLINE bool hasLiveness() const { - return _liveness != NULL; - } - - //! Get variable liveness bits. - ASMJIT_INLINE VarBits* getLiveness() const { - return _liveness; - } - - //! Set variable liveness bits. - ASMJIT_INLINE void setLiveness(VarBits* liveness) { - _liveness = liveness; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Previous node. - Node* _prev; - //! Next node. - Node* _next; - - //! Node type, see `NodeType`. - uint8_t _type; - //! Operands count (if the node has operands, otherwise zero). - uint8_t _opCount; - //! Node flags, different meaning for every node type. - uint16_t _flags; - - //! Flow index. - uint32_t _flowId; - - //! Inline comment string, initially set to NULL. - const char* _comment; - - //! Variable mapping (VarAttr to VarData), initially NULL, filled during - //! fetch phase. - VarMap* _map; - - //! Variable liveness bits (initially NULL, filled by analysis phase). - VarBits* _liveness; - - //! Saved state. - //! - //! Initially NULL, not all nodes have saved state, only branch/flow control - //! nodes. - VarState* _state; -}; - -// ============================================================================ -// [asmjit::AlignNode] -// ============================================================================ - -//! Align node. -struct AlignNode : public Node { - ASMJIT_NO_COPY(AlignNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `AlignNode` instance. - ASMJIT_INLINE AlignNode(Compiler* compiler, uint32_t mode, uint32_t offset) : - Node(compiler, kNodeTypeAlign) { - - _mode = mode; - _offset = offset; - } - - //! Destroy the `AlignNode` instance. - ASMJIT_INLINE ~AlignNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get alignment mode. - ASMJIT_INLINE uint32_t getMode() const { - return _mode; - } - - //! Set alignment mode. - ASMJIT_INLINE void setMode(uint32_t mode) { - _mode = mode; - } - - //! Get align offset in bytes. - ASMJIT_INLINE uint32_t getOffset() const { - return _offset; - } - - //! Set align offset in bytes to `offset`. - ASMJIT_INLINE void setOffset(uint32_t offset) { - _offset = offset; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Alignment mode, see \ref AlignMode. - uint32_t _mode; - //! Alignment offset in bytes. - uint32_t _offset; -}; - -// ============================================================================ -// [asmjit::EmbedNode] -// ============================================================================ - -//! Embed node. -//! -//! Embed node is used to embed data into final assembler stream. The data is -//! considered to be RAW; No analysis is performed on RAW data. -struct EmbedNode : public Node { - ASMJIT_NO_COPY(EmbedNode) - - // -------------------------------------------------------------------------- - // [Enums] - // -------------------------------------------------------------------------- - - enum { kInlineBufferSize = 8 }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `EmbedNode` instance. - ASMJIT_INLINE EmbedNode(Compiler* compiler, void* data, uint32_t size) : - Node(compiler, kNodeTypeEmbed) { - - _size = size; - if (size <= kInlineBufferSize) { - if (data != NULL) - ::memcpy(_data.buf, data, size); - } - else { - _data.ptr = static_cast(data); - } - } - - //! Destroy the `EmbedNode` instance. - ASMJIT_INLINE ~EmbedNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get pointer to data. - uint8_t* getData() { return getSize() <= kInlineBufferSize ? const_cast(_data.buf) : _data.ptr; } - //! Get size of data. - uint32_t getSize() const { return _size; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Size of the embedded data. - uint32_t _size; - - union { - //! data buffer. - uint8_t buf[kInlineBufferSize]; - //! Data buffer. - uint8_t* ptr; - } _data; -}; - -// ============================================================================ -// [asmjit::CommentNode] -// ============================================================================ - -//! Comment node. -//! -//! Comments allows to comment your assembler stream for better debugging -//! and visualization. Comments are usually ignored in release builds unless -//! the logger is present. -struct CommentNode : public Node { - ASMJIT_NO_COPY(CommentNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CommentNode` instance. - ASMJIT_INLINE CommentNode(Compiler* compiler, const char* comment) : Node(compiler, kNodeTypeComment) { - orFlags(kNodeFlagIsInformative); - _comment = comment; - } - - //! Destroy the `CommentNode` instance. - ASMJIT_INLINE ~CommentNode() {} -}; - -// ============================================================================ -// [asmjit::HintNode] -// ============================================================================ - -//! Hint node. -struct HintNode : public Node { - ASMJIT_NO_COPY(HintNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `HintNode` instance. - ASMJIT_INLINE HintNode(Compiler* compiler, VarData* vd, uint32_t hint, uint32_t value) : - Node(compiler, kNodeTypeHint) { - - orFlags(kNodeFlagIsInformative); - _vd = vd; - _hint = hint; - _value = value; - } - - //! Destroy the `HintNode` instance. - ASMJIT_INLINE ~HintNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get variable. - ASMJIT_INLINE VarData* getVd() const { return _vd; } - - //! Get hint it (see `kVarHint)`. - ASMJIT_INLINE uint32_t getHint() const{ return _hint; } - //! Set hint it (see `kVarHint)`. - ASMJIT_INLINE void setHint(uint32_t hint) { _hint = hint; } - - //! Get hint value. - ASMJIT_INLINE uint32_t getValue() const { return _value; } - //! Set hint value. - ASMJIT_INLINE void setValue(uint32_t value) { _value = value; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Variable. - VarData* _vd; - //! Hint id. - uint32_t _hint; - //! Value. - uint32_t _value; -}; - -// ============================================================================ -// [asmjit::TargetNode] -// ============================================================================ - -//! label node. -struct TargetNode : public Node { - ASMJIT_NO_COPY(TargetNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `TargetNode` instance. - ASMJIT_INLINE TargetNode(Compiler* compiler, uint32_t labelId) : Node(compiler, kNodeTypeTarget) { - _id = labelId; - _numRefs = 0; - _offset = -1; - _from = NULL; - } - - //! Destroy the `TargetNode` instance. - ASMJIT_INLINE ~TargetNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get target label. - ASMJIT_INLINE Label getLabel() const { return Label(_id); } - //! Get target label id. - ASMJIT_INLINE uint32_t getLabelId() const { return _id; } - - //! Get first jmp instruction. - ASMJIT_INLINE JumpNode* getFrom() const { return _from; } - - //! Get whether the node has assigned state. - ASMJIT_INLINE bool hasState() const { return _state != NULL; } - //! Get state for this target. - ASMJIT_INLINE VarState* getState() const { return _state; } - //! Set state for this target. - ASMJIT_INLINE void setState(VarState* state) { _state = state; } - - //! Get number of jumps to this target. - ASMJIT_INLINE uint32_t getNumRefs() const { return _numRefs; } - //! Set number of jumps to this target. - ASMJIT_INLINE void setNumRefs(uint32_t i) { _numRefs = i; } - - //! Add number of jumps to this target. - ASMJIT_INLINE void addNumRefs(uint32_t i = 1) { _numRefs += i; } - //! Subtract number of jumps to this target. - ASMJIT_INLINE void subNumRefs(uint32_t i = 1) { _numRefs -= i; } - - //! Get the label offset. - //! - //! \note Only valid after the content has been serialized to the `Assembler`. - ASMJIT_INLINE intptr_t getOffset() const { return _offset; } - - //! Set the label offset. - ASMJIT_INLINE void setOffset(intptr_t offset) { _offset = offset; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Label id. - uint32_t _id; - //! Count of jumps here. - uint32_t _numRefs; - - //! Label offset, after serialization. - intptr_t _offset; - //! First jump instruction that points to this target (label). - JumpNode* _from; -}; - -// ============================================================================ -// [asmjit::InstNode] -// ============================================================================ - -//! Instruction node. -struct InstNode : public Node { - ASMJIT_NO_COPY(InstNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `InstNode` instance. - ASMJIT_INLINE InstNode(Compiler* compiler, uint32_t instId, uint32_t instOptions, Operand* opList, uint32_t opCount) : - Node(compiler, kNodeTypeInst) { - - _instId = static_cast(instId); - _reserved = 0; - _instOptions = instOptions; - - _opCount = static_cast(opCount); - _opList = opList; - - _updateMemOp(); - } - - //! Destroy the `InstNode` instance. - ASMJIT_INLINE ~InstNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get instruction ID, see `X86InstId`. - ASMJIT_INLINE uint32_t getInstId() const { - return _instId; - } - - //! Set instruction ID to `instId`. - //! - //! Please do not modify instruction code if you don't know what are you - //! doing. Incorrect instruction code or operands can cause assertion failure. - ASMJIT_INLINE void setInstId(uint32_t instId) { - _instId = static_cast(instId); - } - - //! Whether the instruction is an unconditional jump or whether the - //! instruction is a conditional jump which is likely to be taken. - ASMJIT_INLINE bool isTaken() const { - return hasFlag(kNodeFlagIsTaken); - } - - //! Get emit options. - ASMJIT_INLINE uint32_t getOptions() const { - return _instOptions; - } - //! Set emit options. - ASMJIT_INLINE void setOptions(uint32_t options) { - _instOptions = options; - } - //! Add emit options. - ASMJIT_INLINE void addOptions(uint32_t options) { - _instOptions |= options; - } - //! Mask emit options. - ASMJIT_INLINE void andOptions(uint32_t options) { - _instOptions &= options; - } - //! Clear emit options. - ASMJIT_INLINE void delOptions(uint32_t options) { - _instOptions &= ~options; - } - - //! Get operands list. - ASMJIT_INLINE Operand* getOpList() { - return _opList; - } - //! \overload - ASMJIT_INLINE const Operand* getOpList() const { - return _opList; - } - - //! Get operands count. - ASMJIT_INLINE uint32_t getOpCount() const { - return _opCount; - } - - //! Get whether the instruction contains a memory operand. - ASMJIT_INLINE bool hasMemOp() const { - return _memOpIndex != 0xFF; - } - - //! Set memory operand index (in opList), 0xFF means that instruction - //! doesn't have a memory operand. - ASMJIT_INLINE void setMemOpIndex(uint32_t index) { - _memOpIndex = static_cast(index); - } - //! Reset memory operand index, setting it to 0xFF. - ASMJIT_INLINE void resetMemOpIndex() { - _memOpIndex = 0xFF; - } - - //! Get memory operand. - //! - //! Can only be called if the instruction has such operand, see `hasMemOp()`. - ASMJIT_INLINE BaseMem* getMemOp() const { - ASMJIT_ASSERT(hasMemOp()); - return static_cast(&_opList[_memOpIndex]); - } - - //! \overload - template - ASMJIT_INLINE T* getMemOp() const { - ASMJIT_ASSERT(hasMemOp()); - return static_cast(&_opList[_memOpIndex]); - } - - // -------------------------------------------------------------------------- - // [Utils] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void _updateMemOp() { - Operand* opList = getOpList(); - uint32_t opCount = getOpCount(); - - uint32_t i; - for (i = 0; i < opCount; i++) - if (opList[i].isMem()) - goto _Update; - i = 0xFF; - -_Update: - setMemOpIndex(i); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Instruction ID, see `InstId`. - uint16_t _instId; - //! \internal - uint8_t _memOpIndex; - //! \internal - uint8_t _reserved; - //! Instruction options, see `InstOptions`. - uint32_t _instOptions; - - //! Operands list. - Operand* _opList; -}; - -// ============================================================================ -// [asmjit::JumpNode] -// ============================================================================ - -//! Jump node. -struct JumpNode : public InstNode { - ASMJIT_NO_COPY(JumpNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE JumpNode(Compiler* compiler, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) : - InstNode(compiler, code, options, opList, opCount) {} - ASMJIT_INLINE ~JumpNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE TargetNode* getTarget() const { return _target; } - ASMJIT_INLINE JumpNode* getJumpNext() const { return _jumpNext; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Target node. - TargetNode* _target; - //! Next jump to the same target in a single linked-list. - JumpNode *_jumpNext; -}; - -// ============================================================================ -// [asmjit::FuncNode] -// ============================================================================ - -//! Function declaration node. -//! -//! Functions are base blocks for generating assembler output. Each generated -//! assembler stream needs standard entry and leave sequences which are compatible -//! with the operating system ABI. -//! -//! `FuncNode` can be used to generate function prolog and epilog which are -//! compatible with a given function calling convention and to allocate and -//! manage variables that can be allocated/spilled during compilation phase. -struct FuncNode : public Node { - ASMJIT_NO_COPY(FuncNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `FuncNode` instance. - //! - //! Always use `Compiler::addFunc()` to create a `FuncNode` instance. - ASMJIT_INLINE FuncNode(Compiler* compiler) : - Node(compiler, kNodeTypeFunc), - _entryNode(NULL), - _exitNode(NULL), - _decl(NULL), - _end(NULL), - _argList(NULL), - _funcHints(IntUtil::mask(kFuncHintNaked)), - _funcFlags(0), - _expectedStackAlignment(0), - _requiredStackAlignment(0), - _redZoneSize(0), - _spillZoneSize(0), - _argStackSize(0), - _memStackSize(0), - _callStackSize(0) {} - - //! Destroy the `FuncNode` instance. - ASMJIT_INLINE ~FuncNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get function entry `TargetNode`. - ASMJIT_INLINE TargetNode* getEntryNode() const { return _entryNode; } - //! Get function exit `TargetNode`. - ASMJIT_INLINE TargetNode* getExitNode() const { return _exitNode; } - - //! Get function entry label. - ASMJIT_INLINE Label getEntryLabel() const { return _entryNode->getLabel(); } - //! Get function exit label. - ASMJIT_INLINE Label getExitLabel() const { return _exitNode->getLabel(); } - - //! Get function `EndNode`. - ASMJIT_INLINE EndNode* getEnd() const { return _end; } - //! Get function declaration. - ASMJIT_INLINE FuncDecl* getDecl() const { return _decl; } - - //! Get arguments list. - ASMJIT_INLINE VarData** getArgList() const { return _argList; } - //! Get arguments count. - ASMJIT_INLINE uint32_t getArgCount() const { return _decl->getArgCount(); } - - //! Get argument at `i`. - ASMJIT_INLINE VarData* getArg(uint32_t i) const { - ASMJIT_ASSERT(i < getArgCount()); - return _argList[i]; - } - - //! Set argument at `i`. - ASMJIT_INLINE void setArg(uint32_t i, VarData* vd) { - ASMJIT_ASSERT(i < getArgCount()); - _argList[i] = vd; - } - - //! Reset argument at `i`. - ASMJIT_INLINE void resetArg(uint32_t i) { - ASMJIT_ASSERT(i < getArgCount()); - _argList[i] = NULL; - } - - //! Get function hints. - ASMJIT_INLINE uint32_t getFuncHints() const { return _funcHints; } - //! Get function flags. - ASMJIT_INLINE uint32_t getFuncFlags() const { return _funcFlags; } - - //! Get whether the _funcFlags has `flag` - ASMJIT_INLINE bool hasFuncFlag(uint32_t flag) const { return (_funcFlags & flag) != 0; } - //! Set function `flag`. - ASMJIT_INLINE void addFuncFlags(uint32_t flags) { _funcFlags |= flags; } - //! Clear function `flag`. - ASMJIT_INLINE void clearFuncFlags(uint32_t flags) { _funcFlags &= ~flags; } - - //! Get whether the function is naked. - ASMJIT_INLINE bool isNaked() const { return hasFuncFlag(kFuncFlagIsNaked); } - //! Get whether the function is also a caller. - ASMJIT_INLINE bool isCaller() const { return hasFuncFlag(kFuncFlagIsCaller); } - //! Get whether the required stack alignment is lower than expected one, - //! thus it has to be aligned manually. - ASMJIT_INLINE bool isStackMisaligned() const { return hasFuncFlag(kFuncFlagIsStackMisaligned); } - //! Get whether the stack pointer is adjusted inside function prolog/epilog. - ASMJIT_INLINE bool isStackAdjusted() const { return hasFuncFlag(kFuncFlagIsStackAdjusted); } - - //! Get whether the function is finished. - ASMJIT_INLINE bool isFinished() const { return hasFuncFlag(kFuncFlagIsFinished); } - - //! Get expected stack alignment. - ASMJIT_INLINE uint32_t getExpectedStackAlignment() const { return _expectedStackAlignment; } - //! Set expected stack alignment. - ASMJIT_INLINE void setExpectedStackAlignment(uint32_t alignment) { _expectedStackAlignment = alignment; } - - //! Get required stack alignment. - ASMJIT_INLINE uint32_t getRequiredStackAlignment() const { return _requiredStackAlignment; } - //! Set required stack alignment. - ASMJIT_INLINE void setRequiredStackAlignment(uint32_t alignment) { _requiredStackAlignment = alignment; } - - //! Update required stack alignment so it's not lower than expected - //! stack alignment. - ASMJIT_INLINE void updateRequiredStackAlignment() { - if (_requiredStackAlignment <= _expectedStackAlignment) { - _requiredStackAlignment = _expectedStackAlignment; - clearFuncFlags(kFuncFlagIsStackMisaligned); - } - else { - addFuncFlags(kFuncFlagIsStackMisaligned); - } - } - - //! Set stack "Red Zone" size. - ASMJIT_INLINE uint32_t getRedZoneSize() const { return _redZoneSize; } - //! Get stack "Red Zone" size. - ASMJIT_INLINE void setRedZoneSize(uint32_t s) { _redZoneSize = static_cast(s); } - - //! Set stack "Spill Zone" size. - ASMJIT_INLINE uint32_t getSpillZoneSize() const { return _spillZoneSize; } - //! Get stack "Spill Zone" size. - ASMJIT_INLINE void setSpillZoneSize(uint32_t s) { _spillZoneSize = static_cast(s); } - - //! Get stack size used by function arguments. - ASMJIT_INLINE uint32_t getArgStackSize() const { return _argStackSize; } - - //! Get stack size used by variables and memory allocated on the stack. - ASMJIT_INLINE uint32_t getMemStackSize() const { return _memStackSize; } - - //! Get stack size used by function calls. - ASMJIT_INLINE uint32_t getCallStackSize() const { return _callStackSize; } - //! Merge stack size used by function call with `s`. - ASMJIT_INLINE void mergeCallStackSize(uint32_t s) { if (_callStackSize < s) _callStackSize = s; } - - // -------------------------------------------------------------------------- - // [Hints] - // -------------------------------------------------------------------------- - - //! Set function hint. - ASMJIT_INLINE void setHint(uint32_t hint, uint32_t value) { - ASMJIT_ASSERT(hint <= 31); - ASMJIT_ASSERT(value <= 1); - - _funcHints &= ~(1 << hint); - _funcHints |= (value << hint); - } - - //! Get function hint. - ASMJIT_INLINE uint32_t getHint(uint32_t hint) const { - ASMJIT_ASSERT(hint <= 31); - return (_funcHints >> hint) & 0x1; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Function entry. - TargetNode* _entryNode; - //! Function exit. - TargetNode* _exitNode; - - //! Function declaration. - FuncDecl* _decl; - //! Function end. - EndNode* _end; - - //! Arguments list as `VarData`. - VarData** _argList; - - //! Function hints; - uint32_t _funcHints; - //! Function flags. - uint32_t _funcFlags; - - //! Expected stack alignment (we depend on this value). - //! - //! \note It can be global alignment given by the OS or described by an - //! target platform ABI. - uint32_t _expectedStackAlignment; - //! Required stack alignment (usually for multimedia instructions). - uint32_t _requiredStackAlignment; - - //! The "Red Zone" size - count of bytes which might be accessed without - //! adjusting the stack pointer. - uint16_t _redZoneSize; - //! Spill zone size (used by WIN64 ABI). - uint16_t _spillZoneSize; - - //! Stack size needed for function arguments. - uint32_t _argStackSize; - //! Stack size needed for all variables and memory allocated on the stack. - uint32_t _memStackSize; - //! Stack size needed to call other functions. - uint32_t _callStackSize; -}; - -// ============================================================================ -// [asmjit::EndNode] -// ============================================================================ - -//! End of function/block node. -struct EndNode : public Node { - ASMJIT_NO_COPY(EndNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `EndNode` instance. - ASMJIT_INLINE EndNode(Compiler* compiler) : Node(compiler, kNodeTypeEnd) { - _flags |= kNodeFlagIsRet; - } - - //! Destroy the `EndNode` instance. - ASMJIT_INLINE ~EndNode() {} -}; - -// ============================================================================ -// [asmjit::RetNode] -// ============================================================================ - -//! Function return node. -struct RetNode : public Node { - ASMJIT_NO_COPY(RetNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `RetNode` instance. - ASMJIT_INLINE RetNode(Compiler* compiler, const Operand& o0, const Operand& o1) : Node(compiler, kNodeTypeRet) { - _flags |= kNodeFlagIsRet; - _ret[0] = o0; - _ret[1] = o1; - } - - //! Destroy the `RetNode` instance. - ASMJIT_INLINE ~RetNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the first return operand. - ASMJIT_INLINE Operand& getFirst() { return _ret[0]; } - //! \overload - ASMJIT_INLINE const Operand& getFirst() const { return _ret[0]; } - - //! Get the second return operand. - ASMJIT_INLINE Operand& getSecond() { return _ret[1]; } - //! \overload - ASMJIT_INLINE const Operand& getSecond() const { return _ret[1]; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Ret operand(s). - Operand _ret[2]; -}; - -// ============================================================================ -// [asmjit::CallNode] -// ============================================================================ - -//! Function-call node. -struct CallNode : public Node { - ASMJIT_NO_COPY(CallNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `CallNode` instance. - ASMJIT_INLINE CallNode(Compiler* compiler, const Operand& target) : - Node(compiler, kNodeTypeCall), - _decl(NULL), - _target(target), - _args(NULL) {} - - //! Destroy the `CallNode` instance. - ASMJIT_INLINE ~CallNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get function declaration. - ASMJIT_INLINE FuncDecl* getDecl() const { return _decl; } - - //! Get target operand. - ASMJIT_INLINE Operand& getTarget() { return _target; } - //! \overload - ASMJIT_INLINE const Operand& getTarget() const { return _target; } - - //! Get return at `i`. - ASMJIT_INLINE Operand& getRet(uint32_t i = 0) { - ASMJIT_ASSERT(i < 2); - return _ret[i]; - } - //! \overload - ASMJIT_INLINE const Operand& getRet(uint32_t i = 0) const { - ASMJIT_ASSERT(i < 2); - return _ret[i]; - } - - //! Get argument at `i`. - ASMJIT_INLINE Operand& getArg(uint32_t i) { - ASMJIT_ASSERT(i < kFuncArgCountLoHi); - return _args[i]; - } - //! \overload - ASMJIT_INLINE const Operand& getArg(uint32_t i) const { - ASMJIT_ASSERT(i < kFuncArgCountLoHi); - return _args[i]; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Function declaration. - FuncDecl* _decl; - - //! Target (address of function, register, label, ...). - Operand _target; - //! Return. - Operand _ret[2]; - //! Arguments. - Operand* _args; -}; - -// ============================================================================ -// [asmjit::SArgNode] -// ============================================================================ - -//! Function-call 'argument on the stack' node. -struct SArgNode : public Node { - ASMJIT_NO_COPY(SArgNode) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `SArgNode` instance. - ASMJIT_INLINE SArgNode(Compiler* compiler, CallNode* call, VarData* sVd, VarData* cVd) : - Node(compiler, kNodeTypeSArg), - _call(call), - _sVd(sVd), - _cVd(cVd), - _args(0) {} - - //! Destroy the `SArgNode` instance. - ASMJIT_INLINE ~SArgNode() {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get the associated function-call. - ASMJIT_INLINE CallNode* getCall() const { return _call; } - //! Get source variable. - ASMJIT_INLINE VarData* getSVd() const { return _sVd; } - //! Get conversion variable. - ASMJIT_INLINE VarData* getCVd() const { return _cVd; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Associated `CallNode`. - CallNode* _call; - //! Source variable. - VarData* _sVd; - //! Temporary variable used for conversion (or NULL). - VarData* _cVd; - - //! Affected arguments bit-array. - uint32_t _args; -}; - -//! \} - // ============================================================================ // [asmjit::Compiler] // ============================================================================ -//! \addtogroup asmjit_base_general -//! \{ - -//! Base compiler. +//! Compiler interface. //! //! \sa Assembler. -struct ASMJIT_VCLASS Compiler : public CodeGen { +struct ASMJIT_VIRTAPI Compiler : public CodeGen { ASMJIT_NO_COPY(Compiler) // -------------------------------------------------------------------------- @@ -2693,190 +86,206 @@ struct ASMJIT_VCLASS Compiler : public CodeGen { // -------------------------------------------------------------------------- //! Create a new `Compiler` instance. - ASMJIT_API Compiler(Runtime* runtime); + ASMJIT_API Compiler(); //! Destroy the `Compiler` instance. ASMJIT_API virtual ~Compiler(); // -------------------------------------------------------------------------- - // [LookAhead] + // [Reset] // -------------------------------------------------------------------------- + //! \override + ASMJIT_API virtual void reset(bool releaseMemory); + + // -------------------------------------------------------------------------- + // [Compiler Features] + // -------------------------------------------------------------------------- + + //! Get code-generator features. + ASMJIT_INLINE uint32_t getFeatures() const { return _features; } + //! Set code-generator features. + ASMJIT_INLINE void setFeatures(uint32_t features) { _features = features; } + + //! Get code-generator `feature`. + ASMJIT_INLINE bool hasFeature(uint32_t feature) const { + ASMJIT_ASSERT(feature < 32); + return (_features & (1 << feature)) != 0; + } + + //! Set code-generator `feature` to `value`. + ASMJIT_INLINE void setFeature(uint32_t feature, bool value) { + ASMJIT_ASSERT(feature < 32); + feature = static_cast(value) << feature; + _features = (_features & ~feature) | feature; + } + //! Get maximum look ahead. - ASMJIT_INLINE uint32_t getMaxLookAhead() const { - return _maxLookAhead; - } - + ASMJIT_INLINE uint32_t getMaxLookAhead() const { return _maxLookAhead; } //! Set maximum look ahead to `val`. - ASMJIT_INLINE void setMaxLookAhead(uint32_t val) { - _maxLookAhead = val; - } + ASMJIT_INLINE void setMaxLookAhead(uint32_t val) { _maxLookAhead = val; } // -------------------------------------------------------------------------- - // [Clear / Reset] + // [Token ID] // -------------------------------------------------------------------------- - //! Reset the compiler. + //! \internal //! - //! If `releaseMemory` is true all buffers will be released to the system. - ASMJIT_API void reset(bool releaseMemory = false); + //! Reset the token-id generator. + ASMJIT_INLINE void _resetTokenGenerator() { _tokenGenerator = 0; } + + //! \internal + //! + //! Generate a new unique token id. + ASMJIT_INLINE uint32_t _generateUniqueToken() { return ++_tokenGenerator; } // -------------------------------------------------------------------------- - // [Nodes] + // [Instruction Options] // -------------------------------------------------------------------------- + //! Get options of the next instruction. + ASMJIT_INLINE uint32_t getInstOptions() const { return _instOptions; } + //! Set options of the next instruction. + ASMJIT_INLINE void setInstOptions(uint32_t instOptions) { _instOptions = instOptions; } + + //! Get options of the next instruction and reset them. + ASMJIT_INLINE uint32_t getInstOptionsAndReset() { + uint32_t instOptions = _instOptions; + _instOptions = 0; + return instOptions; + }; + + // -------------------------------------------------------------------------- + // [Node-Factory] + // -------------------------------------------------------------------------- + + //! \internal template ASMJIT_INLINE T* newNode() { - void* p = _baseZone.alloc(sizeof(T)); + void* p = _zoneAllocator.alloc(sizeof(T)); return new(p) T(this); } + //! \internal template ASMJIT_INLINE T* newNode(P0 p0) { - void* p = _baseZone.alloc(sizeof(T)); + void* p = _zoneAllocator.alloc(sizeof(T)); return new(p) T(this, p0); } + //! \internal template ASMJIT_INLINE T* newNode(P0 p0, P1 p1) { - void* p = _baseZone.alloc(sizeof(T)); + void* p = _zoneAllocator.alloc(sizeof(T)); return new(p) T(this, p0, p1); } + //! \internal template ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) { - void* p = _baseZone.alloc(sizeof(T)); + void* p = _zoneAllocator.alloc(sizeof(T)); return new(p) T(this, p0, p1, p2); } - //! Get first node. - ASMJIT_INLINE Node* getFirstNode() const { return _firstNode; } - //! Get last node. - ASMJIT_INLINE Node* getLastNode() const { return _lastNode; } + //! \internal + //! + //! Create a new `HLData` node. + ASMJIT_API HLData* newDataNode(const void* data, uint32_t size); + + //! \internal + //! + //! Create a new `HLAlign` node. + ASMJIT_API HLAlign* newAlignNode(uint32_t alignMode, uint32_t offset); + + //! \internal + //! + //! Create a new `HLLabel` node. + ASMJIT_API HLLabel* newLabelNode(); + + //! \internal + //! + //! Create a new `HLComment`. + ASMJIT_API HLComment* newCommentNode(const char* str); + + //! \internal + //! + //! Create a new `HLHint`. + ASMJIT_API HLHint* newHintNode(Var& var, uint32_t hint, uint32_t value); + + // -------------------------------------------------------------------------- + // [Code-Stream] + // -------------------------------------------------------------------------- + + //! Add node `node` after current and set current to `node`. + ASMJIT_API HLNode* addNode(HLNode* node); + //! Insert `node` before `ref`. + ASMJIT_API HLNode* addNodeBefore(HLNode* node, HLNode* ref); + //! Insert `node` after `ref`. + ASMJIT_API HLNode* addNodeAfter(HLNode* node, HLNode* ref); + //! Remove `node`. + ASMJIT_API HLNode* removeNode(HLNode* node); + //! Remove multiple nodes. + ASMJIT_API void removeNodes(HLNode* first, HLNode* last); + + //! Get the first node. + ASMJIT_INLINE HLNode* getFirstNode() const { return _firstNode; } + //! Get the last node. + ASMJIT_INLINE HLNode* getLastNode() const { return _lastNode; } //! Get current node. //! //! \note If this method returns `NULL` it means that nothing has been emitted //! yet. - ASMJIT_INLINE Node* getCursor() const { return _cursor; } - //! Set the current node without returning the previous node (private). - ASMJIT_INLINE void _setCursor(Node* node) { _cursor = node; } + ASMJIT_INLINE HLNode* getCursor() const { return _cursor; } + //! \internal + //! + //! Set the current node without returning the previous node. + ASMJIT_INLINE void _setCursor(HLNode* node) { _cursor = node; } //! Set the current node to `node` and return the previous one. - ASMJIT_API Node* setCursor(Node* node); - - //! Add node `node` after current and set current to `node`. - ASMJIT_API Node* addNode(Node* node); - //! Add node before `ref`. - ASMJIT_API Node* addNodeBefore(Node* node, Node* ref); - //! Add node after `ref`. - ASMJIT_API Node* addNodeAfter(Node* node, Node* ref); - //! Remove node `node`. - ASMJIT_API Node* removeNode(Node* node); - //! Remove multiple nodes. - ASMJIT_API void removeNodes(Node* first, Node* last); + ASMJIT_API HLNode* setCursor(HLNode* node); // -------------------------------------------------------------------------- // [Func] // -------------------------------------------------------------------------- //! Get current function. - ASMJIT_INLINE FuncNode* getFunc() const { return _func; } + ASMJIT_INLINE HLFunc* getFunc() const { return _func; } // -------------------------------------------------------------------------- // [Align] // -------------------------------------------------------------------------- - //! Create a new `AlignNode`. - ASMJIT_API AlignNode* newAlign(uint32_t mode, uint32_t offset); - //! Add a new `AlignNode`. - ASMJIT_API AlignNode* addAlign(uint32_t mode, uint32_t offset); - - //! Align target buffer to `m` bytes. + //! Align target buffer to the `offset` specified. //! - //! Typical usage of this is to align labels at start of the inner loops. - //! - //! Inserts `nop()` instructions or CPU optimized NOPs. - ASMJIT_INLINE AlignNode* align(uint32_t mode, uint32_t offset) { - return addAlign(mode, offset); - } - - // -------------------------------------------------------------------------- - // [Target] - // -------------------------------------------------------------------------- - - //! Create a new `TargetNode`. - ASMJIT_API TargetNode* newTarget(); - //! Add a new `TargetNode`. - ASMJIT_API TargetNode* addTarget(); - - //! Get `TargetNode` by `id`. - ASMJIT_INLINE TargetNode* getTargetById(uint32_t id) { - ASMJIT_ASSERT(OperandUtil::isLabelId(id)); - ASMJIT_ASSERT(id < _targetList.getLength()); - - return _targetList[id]; - } - - //! Get `TargetNode` by `label`. - ASMJIT_INLINE TargetNode* getTarget(const Label& label) { - return getTargetById(label.getId()); - } + //! The sequence that is used to fill the gap between the aligned location + //! and the current depends on `alignMode`, see \ref AlignMode. + ASMJIT_API Error align(uint32_t alignMode, uint32_t offset); // -------------------------------------------------------------------------- // [Label] // -------------------------------------------------------------------------- - //! Get count of created labels. - ASMJIT_INLINE size_t getLabelsCount() const { - return _targetList.getLength(); - } - - //! Get whether `label` is created. - ASMJIT_INLINE bool isLabelValid(const Label& label) const { - return isLabelValid(label.getId()); - } - - //! \overload - ASMJIT_INLINE bool isLabelValid(uint32_t id) const { - return static_cast(id) < _targetList.getLength(); - } - - //! Get `TargetNode` by `label`. - ASMJIT_INLINE TargetNode* getTargetByLabel(const Label& label) { - return getTargetByLabel(label.getId()); - } - - //! \overload - ASMJIT_INLINE TargetNode* getTargetByLabel(uint32_t id) { - ASMJIT_ASSERT(isLabelValid(id)); - return _targetList[id]; - } - - //! Get `label` offset or -1 if the label is not bound. + //! Get `HLLabel` by `id`. //! - //! This method can be only called after the code has been serialized to the - //! `Assembler`, otherwise the offset returned will be -1 (even if the label - //! has been bound). - ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const { - return getLabelOffset(label.getId()); - } + //! NOTE: The label has to be valid, see `isLabelValid()`. + ASMJIT_API HLLabel* getHLLabel(uint32_t id) const; - //! \overload - ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const { - ASMJIT_ASSERT(isLabelValid(id)); - return _targetList[id]->getOffset(); - } + //! Get `HLLabel` by `label`. + //! + //! NOTE: The label has to be valid, see `isLabelValid()`. + ASMJIT_INLINE HLLabel* getHLLabel(const Label& label) { return getHLLabel(label.getId()); } + + //! Get whether the label `id` is valid. + ASMJIT_API bool isLabelValid(uint32_t id) const; + //! Get whether the `label` is valid. + ASMJIT_INLINE bool isLabelValid(const Label& label) const { return isLabelValid(label.getId()); } //! \internal //! - //! Create and initialize a new `Label`. - ASMJIT_API Error _newLabel(Label* dst); + //! Create a new label and return its ID. + ASMJIT_API uint32_t _newLabelId(); //! Create and return a new `Label`. - ASMJIT_INLINE Label newLabel() { - Label result(NoInit); - _newLabel(&result); - return result; - } + ASMJIT_INLINE Label newLabel() { return Label(_newLabelId()); } //! Bind label to the current offset. //! @@ -2887,36 +296,28 @@ struct ASMJIT_VCLASS Compiler : public CodeGen { // [Embed] // -------------------------------------------------------------------------- - //! Create a new `EmbedNode`. - ASMJIT_API EmbedNode* newEmbed(const void* data, uint32_t size); - //! Add a new `EmbedNode`. - ASMJIT_API EmbedNode* addEmbed(const void* data, uint32_t size); - //! Embed data. - ASMJIT_INLINE EmbedNode* embed(const void* data, uint32_t size) { - return addEmbed(data, size); - } + ASMJIT_API Error embed(const void* data, uint32_t size); + + //! Embed a constant pool data, adding the following in order: + //! 1. Data alignment. + //! 2. Label. + //! 3. Constant pool data. + ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool); // -------------------------------------------------------------------------- // [Comment] // -------------------------------------------------------------------------- - //! Create a new `CommentNode`. - ASMJIT_API CommentNode* newComment(const char* str); - //! Add a new `CommentNode`. - ASMJIT_API CommentNode* addComment(const char* str); - //! Emit a single comment line. - ASMJIT_API CommentNode* comment(const char* fmt, ...); + ASMJIT_API Error comment(const char* fmt, ...); // -------------------------------------------------------------------------- // [Hint] // -------------------------------------------------------------------------- - //! Create a new `HintNode`. - ASMJIT_API HintNode* newHint(Var& var, uint32_t hint, uint32_t value); - //! Add a new `HintNode`. - ASMJIT_API HintNode* addHint(Var& var, uint32_t hint, uint32_t value); + //! Emit a new hint (purery informational node). + ASMJIT_API Error _hint(Var& var, uint32_t hint, uint32_t value); // -------------------------------------------------------------------------- // [Vars] @@ -2957,20 +358,20 @@ struct ASMJIT_VCLASS Compiler : public CodeGen { ASMJIT_API VarData* _newVd(uint32_t type, uint32_t size, uint32_t c, const char* name); //! Create a new `Var`. - virtual Error _newVar(Var* var, uint32_t type, const char* name) = 0; + virtual Error _newVar(Var* var, uint32_t type, const char* name, va_list ap) = 0; //! Alloc variable `var`. - ASMJIT_API void alloc(Var& var); + ASMJIT_API Error alloc(Var& var); //! Alloc variable `var` using `regIndex` as a register index. - ASMJIT_API void alloc(Var& var, uint32_t regIndex); + ASMJIT_API Error alloc(Var& var, uint32_t regIndex); //! Alloc variable `var` using `reg` as a register operand. - ASMJIT_API void alloc(Var& var, const Reg& reg); + ASMJIT_API Error alloc(Var& var, const Reg& reg); //! Spill variable `var`. - ASMJIT_API void spill(Var& var); + ASMJIT_API Error spill(Var& var); //! Save variable `var` if the status is `modified` at this point. - ASMJIT_API void save(Var& var); + ASMJIT_API Error save(Var& var); //! Unuse variable `var`. - ASMJIT_API void unuse(Var& var); + ASMJIT_API Error unuse(Var& var); //! Get priority of variable `var`. ASMJIT_API uint32_t getPriority(Var& var) const; @@ -2985,7 +386,7 @@ struct ASMJIT_VCLASS Compiler : public CodeGen { //! Rename variable `var` to `name`. //! //! \note Only new name will appear in the logger. - ASMJIT_API void rename(Var& var, const char* name); + ASMJIT_API void rename(Var& var, const char* fmt, ...); // -------------------------------------------------------------------------- // [Stack] @@ -3005,67 +406,51 @@ struct ASMJIT_VCLASS Compiler : public CodeGen { //! Put data to a constant-pool and get a memory reference to it. virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) = 0; - // -------------------------------------------------------------------------- - // [Assembler] - // -------------------------------------------------------------------------- - - //! Get an assembler instance that is associated with the compiler. - //! - //! \note One instance of `Assembler` is shared and has lifetime same as the - //! compiler, however, each call to `getAssembler()` resets the assembler so - //! new code can be serialized into it. - ASMJIT_API Assembler* getAssembler(); - - //! \internal - //! - //! Create a new `Assembler` instance associated with the compiler. - virtual Assembler* _newAssembler() = 0; - - // -------------------------------------------------------------------------- - // [Serialize] - // -------------------------------------------------------------------------- - - //! Serialize a compiled code to `assembler`. - virtual Error serialize(Assembler* assembler) = 0; - // -------------------------------------------------------------------------- // [Members] // -------------------------------------------------------------------------- - //! Internal assembler. - Assembler* _assembler; + //! Code-Generation features, used by \ref hasFeature() and \ref setFeature(). + uint32_t _features; + //! Maximum count of nodes to look ahead when allocating/spilling + //! registers. + uint32_t _maxLookAhead; + + //! Options affecting the next instruction. + uint32_t _instOptions; + //! Processing token generator. + //! + //! Used to get a unique token that is then used to process `HLNode`s. See + //! `Compiler::_getUniqueToken()` for more details. + uint32_t _tokenGenerator; //! Flow id added to each node created (used only by `Context)`. uint32_t _nodeFlowId; //! Flags added to each node created (used only by `Context)`. uint32_t _nodeFlags; - //! Maximum count of nodes to look ahead when allocating/spilling - //! registers. - uint32_t _maxLookAhead; - //! Variable mapping (translates incoming VarType into target). const uint8_t* _targetVarMapping; //! First node. - Node* _firstNode; + HLNode* _firstNode; //! Last node. - Node* _lastNode; + HLNode* _lastNode; //! Current node. - Node* _cursor; + HLNode* _cursor; //! Current function. - FuncNode* _func; + HLFunc* _func; + //! General purpose zone allocator. + Zone _zoneAllocator; //! Variable zone. - Zone _varZone; + Zone _varAllocator; //! String/data zone. - Zone _stringZone; + Zone _stringAllocator; //! Local constant pool zone. - Zone _localConstZone; + Zone _constAllocator; - //! TargetNode list. - PodVector _targetList; //! VarData list. PodVector _varList; @@ -3087,16 +472,18 @@ struct ASMJIT_VCLASS Compiler : public CodeGen { // ============================================================================ ASMJIT_INLINE Label::Label(Compiler& c) : Operand(NoInit) { - c._newLabel(this); + reset(); + _label.id = c._newLabelId(); } -ASMJIT_INLINE Node::Node(Compiler* compiler, uint32_t type) { +ASMJIT_INLINE HLNode::HLNode(Compiler* compiler, uint32_t type) { _prev = NULL; _next = NULL; _type = static_cast(type); _opCount = 0; _flags = static_cast(compiler->_nodeFlags); _flowId = compiler->_nodeFlowId; + _tokenId = 0; _comment = NULL; _map = NULL; _liveness = NULL; diff --git a/src/asmjit/base/context.cpp b/src/asmjit/base/compilercontext.cpp similarity index 83% rename from src/asmjit/base/context.cpp rename to src/asmjit/base/compilercontext.cpp index 89bad1d..b588eb7 100644 --- a/src/asmjit/base/context.cpp +++ b/src/asmjit/base/compilercontext.cpp @@ -12,8 +12,8 @@ #if !defined(ASMJIT_DISABLE_COMPILER) // [Dependencies - AsmJit] -#include "../base/context_p.h" -#include "../base/intutil.h" +#include "../base/compilercontext_p.h" +#include "../base/utils.h" // [Api-Begin] #include "../apibegin.h" @@ -26,7 +26,7 @@ namespace asmjit { Context::Context(Compiler* compiler) : _compiler(compiler), - _baseZone(8192 - kZoneOverhead), + _zoneAllocator(8192 - Zone::kZoneOverhead), _varMapToVaListOffset(0) { Context::reset(); @@ -39,7 +39,7 @@ Context::~Context() {} // ============================================================================ void Context::reset(bool releaseMemory) { - _baseZone.reset(releaseMemory); + _zoneAllocator.reset(releaseMemory); _func = NULL; _start = NULL; @@ -94,10 +94,10 @@ static ASMJIT_INLINE uint32_t BaseContext_getDefaultAlignment(uint32_t size) { return 1; } -MemCell* Context::_newVarCell(VarData* vd) { +VarCell* Context::_newVarCell(VarData* vd) { ASMJIT_ASSERT(vd->_memCell == NULL); - MemCell* cell; + VarCell* cell; uint32_t size = vd->getSize(); if (vd->isStack()) { @@ -107,7 +107,7 @@ MemCell* Context::_newVarCell(VarData* vd) { return NULL; } else { - cell = static_cast(_baseZone.alloc(sizeof(MemCell))); + cell = static_cast(_zoneAllocator.alloc(sizeof(VarCell))); if (cell == NULL) goto _NoMemory; @@ -118,7 +118,7 @@ MemCell* Context::_newVarCell(VarData* vd) { cell->_size = size; cell->_alignment = size; - _memMaxAlign = IntUtil::iMax(_memMaxAlign, size); + _memMaxAlign = Utils::iMax(_memMaxAlign, size); _memVarTotal += size; switch (size) { @@ -137,12 +137,12 @@ MemCell* Context::_newVarCell(VarData* vd) { return cell; _NoMemory: - _compiler->setError(kErrorNoHeapMemory); + _compiler->setLastError(kErrorNoHeapMemory); return NULL; } -MemCell* Context::_newStackCell(uint32_t size, uint32_t alignment) { - MemCell* cell = static_cast(_baseZone.alloc(sizeof(MemCell))); +VarCell* Context::_newStackCell(uint32_t size, uint32_t alignment) { + VarCell* cell = static_cast(_zoneAllocator.alloc(sizeof(VarCell))); if (cell == NULL) goto _NoMemory; @@ -152,13 +152,13 @@ MemCell* Context::_newStackCell(uint32_t size, uint32_t alignment) { if (alignment > 64) alignment = 64; - ASMJIT_ASSERT(IntUtil::isPowerOf2(alignment)); - size = IntUtil::alignTo(size, alignment); + ASMJIT_ASSERT(Utils::isPowerOf2(alignment)); + size = Utils::alignTo(size, alignment); // Insert it sorted according to the alignment and size. { - MemCell** pPrev = &_memStackCells; - MemCell* cur = *pPrev; + VarCell** pPrev = &_memStackCells; + VarCell* cur = *pPrev; for (cur = *pPrev; cur != NULL; cur = cur->_next) { if (cur->getAlignment() > alignment) @@ -176,20 +176,20 @@ MemCell* Context::_newStackCell(uint32_t size, uint32_t alignment) { *pPrev = cell; _memStackCellsUsed++; - _memMaxAlign = IntUtil::iMax(_memMaxAlign, alignment); + _memMaxAlign = Utils::iMax(_memMaxAlign, alignment); _memStackTotal += size; } return cell; _NoMemory: - _compiler->setError(kErrorNoHeapMemory); + _compiler->setLastError(kErrorNoHeapMemory); return NULL; } Error Context::resolveCellOffsets() { - MemCell* varCell = _memVarCells; - MemCell* stackCell = _memStackCells; + VarCell* varCell = _memVarCells; + VarCell* stackCell = _memStackCells; uint32_t stackAlignment = 0; if (stackCell != NULL) @@ -208,8 +208,9 @@ Error Context::resolveCellOffsets() { uint32_t gapAlignment = stackAlignment; uint32_t gapSize = 0; + // TODO: Not used! if (gapAlignment) - IntUtil::deltaTo(stackPos, gapAlignment); + Utils::alignDiff(stackPos, gapAlignment); stackPos += gapSize; uint32_t gapPos = stackPos; @@ -273,14 +274,14 @@ Error Context::resolveCellOffsets() { Error Context::removeUnreachableCode() { Compiler* compiler = getCompiler(); - PodList::Link* link = _unreachableList.getFirst(); - Node* stop = getStop(); + PodList::Link* link = _unreachableList.getFirst(); + HLNode* stop = getStop(); while (link != NULL) { - Node* node = link->getValue(); + HLNode* node = link->getValue(); if (node != NULL && node->getPrev() != NULL && node != stop) { // Locate all unreachable nodes. - Node* first = node; + HLNode* first = node; do { if (node->isFetched()) break; @@ -289,11 +290,11 @@ Error Context::removeUnreachableCode() { // Remove unreachable nodes that are neither informative nor directives. if (node != first) { - Node* end = node; + HLNode* end = node; node = first; do { - Node* next = node->getNext(); - if (!node->isInformative() && node->getType() != kNodeTypeAlign) { + HLNode* next = node->getNext(); + if (!node->isInformative() && node->getType() != kHLNodeTypeAlign) { ASMJIT_TLOG("[%05d] Unreachable\n", node->getFlowId()); compiler->removeNode(node); } @@ -318,32 +319,32 @@ struct LivenessTarget { LivenessTarget* prev; //! Target node. - TargetNode* node; + HLLabel* node; //! Jumped from. - JumpNode* from; + HLJump* from; }; Error Context::livenessAnalysis() { uint32_t bLen = static_cast( - ((_contextVd.getLength() + VarBits::kEntityBits - 1) / VarBits::kEntityBits)); + ((_contextVd.getLength() + BitArray::kEntityBits - 1) / BitArray::kEntityBits)); // No variables. if (bLen == 0) return kErrorOk; - FuncNode* func = getFunc(); - JumpNode* from = NULL; + HLFunc* func = getFunc(); + HLJump* from = NULL; LivenessTarget* ltCur = NULL; LivenessTarget* ltUnused = NULL; - PodList::Link* retPtr = _returningList.getFirst(); + PodList::Link* retPtr = _returningList.getFirst(); ASMJIT_ASSERT(retPtr != NULL); - Node* node = retPtr->getValue(); + HLNode* node = retPtr->getValue(); size_t varMapToVaListOffset = _varMapToVaListOffset; - VarBits* bCur = newBits(bLen); + BitArray* bCur = newBits(bLen); if (bCur == NULL) goto _NoMemory; @@ -358,7 +359,7 @@ _OnVisit: goto _OnDone; } - VarBits* bTmp = copyBits(bCur, bLen); + BitArray* bTmp = copyBits(bCur, bLen); if (bTmp == NULL) goto _NoMemory; @@ -374,9 +375,9 @@ _OnVisit: VarData* vd = va->getVd(); uint32_t flags = va->getFlags(); - uint32_t ctxId = vd->getContextId(); + uint32_t ctxId = vd->getLocalId(); - if ((flags & kVarAttrOutAll) && !(flags & kVarAttrInAll)) { + if ((flags & kVarAttrWAll) && !(flags & kVarAttrRAll)) { // Write-Only. bTmp->setBit(ctxId); bCur->delBit(ctxId); @@ -389,7 +390,7 @@ _OnVisit: } } - if (node->getType() == kNodeTypeTarget) + if (node->getType() == kHLNodeTypeLabel) goto _OnTarget; if (node == func) @@ -403,12 +404,12 @@ _OnVisit: _OnPatch: for (;;) { ASMJIT_ASSERT(node->hasLiveness()); - VarBits* bNode = node->getLiveness(); + BitArray* bNode = node->getLiveness(); if (!bNode->_addBitsDelSource(bCur, bLen)) goto _OnDone; - if (node->getType() == kNodeTypeTarget) + if (node->getType() == kHLNodeTypeLabel) goto _OnTarget; if (node == func) @@ -418,7 +419,7 @@ _OnPatch: } _OnTarget: - if (static_cast(node)->getNumRefs() != 0) { + if (static_cast(node)->getNumRefs() != 0) { // Push a new LivenessTarget onto the stack if needed. if (ltCur == NULL || ltCur->node != node) { // Allocate a new LivenessTarget object (from pool or zone). @@ -428,8 +429,8 @@ _OnTarget: ltUnused = ltUnused->prev; } else { - ltTmp = _baseZone.allocT( - sizeof(LivenessTarget) - sizeof(VarBits) + bLen * sizeof(uintptr_t)); + ltTmp = _zoneAllocator.allocT( + sizeof(LivenessTarget) - sizeof(BitArray) + bLen * sizeof(uintptr_t)); if (ltTmp == NULL) goto _NoMemory; @@ -437,10 +438,10 @@ _OnTarget: // Initialize and make current - ltTmp->from will be set later on. ltTmp->prev = ltCur; - ltTmp->node = static_cast(node); + ltTmp->node = static_cast(node); ltCur = ltTmp; - from = static_cast(node)->getFrom(); + from = static_cast(node)->getFrom(); ASMJIT_ASSERT(from != NULL); } else { @@ -508,7 +509,7 @@ _OnDone: return kErrorOk; _NoMemory: - return setError(kErrorNoHeapMemory); + return setLastError(kErrorNoHeapMemory); } // ============================================================================ @@ -530,7 +531,7 @@ void Context::cleanup() { for (size_t i = 0; i < length; i++) { VarData* vd = array[i]; - vd->resetContextId(); + vd->resetLocalId(); vd->resetRegIndex(); } @@ -542,9 +543,9 @@ void Context::cleanup() { // [asmjit::Context - CompileFunc] // ============================================================================ -Error Context::compile(FuncNode* func) { - Node* end = func->getEnd(); - Node* stop = end->getNext(); +Error Context::compile(HLFunc* func) { + HLNode* end = func->getEnd(); + HLNode* stop = end->getNext(); _func = func; _stop = stop; @@ -557,13 +558,13 @@ Error Context::compile(FuncNode* func) { Compiler* compiler = getCompiler(); #if !defined(ASMJIT_DISABLE_LOGGER) - if (compiler->hasLogger()) + if (compiler->getAssembler()->hasLogger()) ASMJIT_PROPAGATE_ERROR(annotate()); #endif // !ASMJIT_DISABLE_LOGGER ASMJIT_PROPAGATE_ERROR(translate()); - if (compiler->hasFeature(kCodeGenEnableScheduler)) + if (compiler->hasFeature(kCompilerFeatureEnableScheduler)) ASMJIT_PROPAGATE_ERROR(schedule()); // We alter the compiler cursor, because it doesn't make sense to reference diff --git a/src/asmjit/base/compilercontext_p.h b/src/asmjit/base/compilercontext_p.h new file mode 100644 index 0000000..42b2516 --- /dev/null +++ b/src/asmjit/base/compilercontext_p.h @@ -0,0 +1,914 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Guard] +#ifndef _ASMJIT_BASE_COMPILERCONTEXT_P_H +#define _ASMJIT_BASE_COMPILERCONTEXT_P_H + +#include "../build.h" +#if !defined(ASMJIT_DISABLE_COMPILER) + +// [Dependencies - AsmJit] +#include "../base/compiler.h" +#include "../base/zone.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +//! \addtogroup asmjit_base +//! \{ + +// ============================================================================ +// [asmjit::VarFlags] +// ============================================================================ + +//! \internal +//! +//! X86/X64 variable flags. +ASMJIT_ENUM(VarFlags) { + //! Variable contains single-precision floating-point(s). + kVarFlagSp = 0x10, + //! Variable contains double-precision floating-point(s). + kVarFlagDp = 0x20, + //! Variable is packed, i.e. packed floats, doubles, ... + kVarFlagPacked = 0x40 +}; + +// ============================================================================ +// [asmjit::VarAttrFlags] +// ============================================================================ + +//! \internal +//! +//! Variable attribute flags. +ASMJIT_ENUM(VarAttrFlags) { + //! Read from register. + kVarAttrRReg = 0x00000001, + //! Write to register. + kVarAttrWReg = 0x00000002, + //! Read/Write from/to register. + kVarAttrXReg = 0x00000003, + + //! Read from memory. + kVarAttrRMem = 0x00000004, + //! Write to memory. + kVarAttrWMem = 0x00000008, + //! Read/Write from/to memory. + kVarAttrXMem = 0x0000000C, + + //! Register allocator can decide if input will be in register or memory. + kVarAttrRDecide = 0x00000010, + //! Register allocator can decide if output will be in register or memory. + kVarAttrWDecide = 0x00000020, + //! Register allocator can decide if in/out will be in register or memory. + kVarAttrXDecide = 0x00000030, + + //! Variable is converted to other type/class on the input. + kVarAttrRConv = 0x00000040, + //! Variable is converted from other type/class on the output. + kVarAttrWConv = 0x00000080, + //! Combination of `kVarAttrRConv` and `kVarAttrWConv`. + kVarAttrXConv = 0x000000C0, + + //! Variable is a function call operand. + kVarAttrRCall = 0x00000100, + //! Variable is a function argument passed in register. + kVarAttrRFunc = 0x00000200, + //! Variable is a function return value passed in register. + kVarAttrWFunc = 0x00000400, + + //! Variable should be spilled. + kVarAttrSpill = 0x00000800, + //! Variable should be unused at the end of the instruction/node. + kVarAttrUnuse = 0x00001000, + + //! All in-flags. + kVarAttrRAll = kVarAttrRReg | kVarAttrRMem | kVarAttrRDecide | kVarAttrRCall | kVarAttrRFunc, + //! All out-flags. + kVarAttrWAll = kVarAttrWReg | kVarAttrWMem | kVarAttrWDecide | kVarAttrWFunc, + + //! Variable is already allocated on the input. + kVarAttrAllocRDone = 0x00400000, + //! Variable is already allocated on the output. + kVarAttrAllocWDone = 0x00800000, + + kVarAttrX86GpbLo = 0x10000000, + kVarAttrX86GpbHi = 0x20000000, + kVarAttrX86Fld4 = 0x40000000, + kVarAttrX86Fld8 = 0x80000000 +}; + +// ============================================================================ +// [asmjit::VarHint] +// ============================================================================ + +//! \internal +//! +//! Variable hint (used by `Compiler)`. +//! +//! \sa Compiler. +ASMJIT_ENUM(VarHint) { + //! Alloc variable. + kVarHintAlloc = 0, + //! Spill variable. + kVarHintSpill = 1, + //! Save variable if modified. + kVarHintSave = 2, + //! Save variable if modified and mark it as unused. + kVarHintSaveAndUnuse = 3, + //! Mark variable as unused. + kVarHintUnuse = 4 +}; + +// ============================================================================ +// [asmjit::kVarState] +// ============================================================================ + +// TODO: Rename `kVarState` or `VarState`. + +//! \internal +//! +//! State of variable. +//! +//! \note Variable states are used only during register allocation. +ASMJIT_ENUM(kVarState) { + //! Variable is currently not used. + kVarStateNone = 0, + //! Variable is currently allocated in register. + kVarStateReg = 1, + //! Variable is currently allocated in memory (or has been spilled). + kVarStateMem = 2 +}; + +// ============================================================================ +// [asmjit::VarCell] +// ============================================================================ + +struct VarCell { + ASMJIT_NO_COPY(VarCell) + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get cell offset. + ASMJIT_INLINE int32_t getOffset() const { return _offset; } + //! Set cell offset. + ASMJIT_INLINE void setOffset(int32_t offset) { _offset = offset; } + + //! Get cell size. + ASMJIT_INLINE uint32_t getSize() const { return _size; } + //! Set cell size. + ASMJIT_INLINE void setSize(uint32_t size) { _size = size; } + + //! Get cell alignment. + ASMJIT_INLINE uint32_t getAlignment() const { return _alignment; } + //! Set cell alignment. + ASMJIT_INLINE void setAlignment(uint32_t alignment) { _alignment = alignment; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Next active cell. + VarCell* _next; + + //! Offset, relative to base-offset. + int32_t _offset; + //! Size. + uint32_t _size; + //! Alignment. + uint32_t _alignment; +}; + +// ============================================================================ +// [asmjit::VarData] +// ============================================================================ + +//! HL variable data (base). +struct VarData { + // -------------------------------------------------------------------------- + // [Accessors - Base] + // -------------------------------------------------------------------------- + + //! Get variable name. + ASMJIT_INLINE const char* getName() const { return _name; } + //! Get variable id. + ASMJIT_INLINE uint32_t getId() const { return _id; } + //! Get variable type. + ASMJIT_INLINE uint32_t getType() const { return _type; } + //! Get variable class. + ASMJIT_INLINE uint32_t getClass() const { return _class; } + + // -------------------------------------------------------------------------- + // [Accessors - LocalId] + // -------------------------------------------------------------------------- + + //! Get whether the variable has a local id. + ASMJIT_INLINE bool hasLocalId() const { return _localId != kInvalidValue; } + //! Get a variable's local id. + ASMJIT_INLINE uint32_t getLocalId() const { return _localId; } + //! Set a variable's local id. + ASMJIT_INLINE void setLocalId(uint32_t localId) { _localId = localId; } + //! Reset a variable's local id. + ASMJIT_INLINE void resetLocalId() { _localId = kInvalidValue; } + + // -------------------------------------------------------------------------- + // [Accessors - Priority] + // -------------------------------------------------------------------------- + + //! Get variable priority, used by compiler to decide which variable to spill. + ASMJIT_INLINE uint32_t getPriority() const { return _priority; } + //! Set variable priority. + ASMJIT_INLINE void setPriority(uint32_t priority) { + ASMJIT_ASSERT(priority <= 0xFF); + _priority = static_cast(priority); + } + + // -------------------------------------------------------------------------- + // [Accessors - State] + // -------------------------------------------------------------------------- + + //! Get variable state, only used by `Context`. + ASMJIT_INLINE uint32_t getState() const { return _state; } + //! Set variable state, only used by `Context`. + ASMJIT_INLINE void setState(uint32_t state) { + ASMJIT_ASSERT(state <= 0xFF); + _state = static_cast(state); + } + + // -------------------------------------------------------------------------- + // [Accessors - RegIndex] + // -------------------------------------------------------------------------- + + //! Get register index. + ASMJIT_INLINE uint32_t getRegIndex() const { return _regIndex; } + //! Set register index. + ASMJIT_INLINE void setRegIndex(uint32_t regIndex) { + ASMJIT_ASSERT(regIndex <= kInvalidReg); + _regIndex = static_cast(regIndex); + } + //! Reset register index. + ASMJIT_INLINE void resetRegIndex() { + _regIndex = static_cast(kInvalidReg); + } + + // -------------------------------------------------------------------------- + // [Accessors - HomeIndex/Mask] + // -------------------------------------------------------------------------- + + //! Get home registers mask. + ASMJIT_INLINE uint32_t getHomeMask() const { return _homeMask; } + //! Add a home register index to the home registers mask. + ASMJIT_INLINE void addHomeIndex(uint32_t regIndex) { _homeMask |= Utils::mask(regIndex); } + + // -------------------------------------------------------------------------- + // [Accessors - Flags] + // -------------------------------------------------------------------------- + + //! Get variable flags. + ASMJIT_INLINE uint32_t getFlags() const { return _flags; } + + //! Get whether the VarData is only memory allocated on the stack. + ASMJIT_INLINE bool isStack() const { return static_cast(_isStack); } + //! Get whether the variable is a function argument passed through memory. + ASMJIT_INLINE bool isMemArg() const { return static_cast(_isMemArg); } + + //! Get variable content can be calculated by a simple instruction. + ASMJIT_INLINE bool isCalculated() const { return static_cast(_isCalculated); } + //! Get whether to save variable when it's unused (spill). + ASMJIT_INLINE bool saveOnUnuse() const { return static_cast(_saveOnUnuse); } + + //! Get whether the variable was changed. + ASMJIT_INLINE bool isModified() const { return static_cast(_modified); } + //! Set whether the variable was changed. + ASMJIT_INLINE void setModified(bool modified) { _modified = modified; } + + //! Get variable alignment. + ASMJIT_INLINE uint32_t getAlignment() const { return _alignment; } + //! Get variable size. + ASMJIT_INLINE uint32_t getSize() const { return _size; } + + //! Get home memory offset. + ASMJIT_INLINE int32_t getMemOffset() const { return _memOffset; } + //! Set home memory offset. + ASMJIT_INLINE void setMemOffset(int32_t offset) { _memOffset = offset; } + + //! Get home memory cell. + ASMJIT_INLINE VarCell* getMemCell() const { return _memCell; } + //! Set home memory cell. + ASMJIT_INLINE void setMemCell(VarCell* cell) { _memCell = cell; } + + // -------------------------------------------------------------------------- + // [Accessors - Temporary Usage] + // -------------------------------------------------------------------------- + + //! Get temporary VarAttr. + ASMJIT_INLINE VarAttr* getVa() const { return _va; } + //! Set temporary VarAttr. + ASMJIT_INLINE void setVa(VarAttr* va) { _va = va; } + //! Reset temporary VarAttr. + ASMJIT_INLINE void resetVa() { _va = NULL; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Variable name. + const char* _name; + + //! Variable id. + uint32_t _id; + //! Variable's local id (initially `kInvalidValue`). + uint32_t _localId; + + //! Variable type. + uint8_t _type; + //! Variable class. + uint8_t _class; + //! Variable flags. + uint8_t _flags; + //! Variable priority. + uint8_t _priority; + + //! Variable state (connected with actual `VarState)`. + uint8_t _state; + //! Actual register index (only used by `Context)`, during translate. + uint8_t _regIndex; + + //! Whether the variable is only used as memory allocated on the stack. + uint8_t _isStack : 1; + //! Whether the variable is a function argument passed through memory. + uint8_t _isMemArg : 1; + //! Whether variable content can be calculated by a simple instruction. + //! + //! This is used mainly by MMX and SSE2 code. This flag indicates that + //! register allocator should never reserve memory for this variable, because + //! the content can be generated by a single instruction (for example PXOR). + uint8_t _isCalculated : 1; + //! Save on unuse (at end of the variable scope). + uint8_t _saveOnUnuse : 1; + //! Whether variable was changed (connected with actual `VarState)`. + uint8_t _modified : 1; + //! \internal + uint8_t _reserved0 : 3; + //! Variable natural alignment. + uint8_t _alignment; + + //! Variable size. + uint32_t _size; + + //! Mask of all registers variable has been allocated to. + uint32_t _homeMask; + + //! Home memory offset. + int32_t _memOffset; + //! Home memory cell, used by `Context` (initially NULL). + VarCell* _memCell; + + //! Register read access statistics. + uint32_t rReadCount; + //! Register write access statistics. + uint32_t rWriteCount; + + //! Memory read statistics. + uint32_t mReadCount; + //! Memory write statistics. + uint32_t mWriteCount; + + // -------------------------------------------------------------------------- + // [Members - Temporary Usage] + // -------------------------------------------------------------------------- + + // These variables are only used during register allocation. They are + // initialized by init() phase and reset by cleanup() phase. + + union { + //! Temporary link to VarAttr* used by the `Context` used in + //! various phases, but always set back to NULL when finished. + //! + //! This temporary data is designed to be used by algorithms that need to + //! store some data into variables themselves during compilation. But it's + //! expected that after variable is compiled & translated the data is set + //! back to zero/null. Initial value is NULL. + VarAttr* _va; + + //! \internal + //! + //! Same as `_va` just provided as `uintptr_t`. + uintptr_t _vaUInt; + }; +}; + +// ============================================================================ +// [asmjit::VarAttr] +// ============================================================================ + +struct VarAttr { + // -------------------------------------------------------------------------- + // [Setup] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void setup(VarData* vd, uint32_t flags = 0, uint32_t inRegs = 0, uint32_t allocableRegs = 0) { + _vd = vd; + _flags = flags; + _varCount = 0; + _inRegIndex = kInvalidReg; + _outRegIndex = kInvalidReg; + _reserved = 0; + _inRegs = inRegs; + _allocableRegs = allocableRegs; + } + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get VarData. + ASMJIT_INLINE VarData* getVd() const { return _vd; } + //! Set VarData. + ASMJIT_INLINE void setVd(VarData* vd) { _vd = vd; } + + //! Get flags. + ASMJIT_INLINE uint32_t getFlags() const { return _flags; } + //! Set flags. + ASMJIT_INLINE void setFlags(uint32_t flags) { _flags = flags; } + + //! Get whether `flag` is on. + ASMJIT_INLINE bool hasFlag(uint32_t flag) { return (_flags & flag) != 0; } + //! Add `flags`. + ASMJIT_INLINE void orFlags(uint32_t flags) { _flags |= flags; } + //! Mask `flags`. + ASMJIT_INLINE void andFlags(uint32_t flags) { _flags &= flags; } + //! Clear `flags`. + ASMJIT_INLINE void andNotFlags(uint32_t flags) { _flags &= ~flags; } + + //! Get how many times the variable is used by the instruction/node. + ASMJIT_INLINE uint32_t getVarCount() const { return _varCount; } + //! Set how many times the variable is used by the instruction/node. + ASMJIT_INLINE void setVarCount(uint32_t count) { _varCount = static_cast(count); } + //! Add how many times the variable is used by the instruction/node. + ASMJIT_INLINE void addVarCount(uint32_t count = 1) { _varCount += static_cast(count); } + + //! Get whether the variable has to be allocated in a specific input register. + ASMJIT_INLINE uint32_t hasInRegIndex() const { return _inRegIndex != kInvalidReg; } + //! Get the input register index or `kInvalidReg`. + ASMJIT_INLINE uint32_t getInRegIndex() const { return _inRegIndex; } + //! Set the input register index. + ASMJIT_INLINE void setInRegIndex(uint32_t index) { _inRegIndex = static_cast(index); } + //! Reset the input register index. + ASMJIT_INLINE void resetInRegIndex() { _inRegIndex = kInvalidReg; } + + //! Get whether the variable has to be allocated in a specific output register. + ASMJIT_INLINE uint32_t hasOutRegIndex() const { return _outRegIndex != kInvalidReg; } + //! Get the output register index or `kInvalidReg`. + ASMJIT_INLINE uint32_t getOutRegIndex() const { return _outRegIndex; } + //! Set the output register index. + ASMJIT_INLINE void setOutRegIndex(uint32_t index) { _outRegIndex = static_cast(index); } + //! Reset the output register index. + ASMJIT_INLINE void resetOutRegIndex() { _outRegIndex = kInvalidReg; } + + //! Get whether the mandatory input registers are in used. + ASMJIT_INLINE bool hasInRegs() const { return _inRegs != 0; } + //! Get mandatory input registers (mask). + ASMJIT_INLINE uint32_t getInRegs() const { return _inRegs; } + //! Set mandatory input registers (mask). + ASMJIT_INLINE void setInRegs(uint32_t mask) { _inRegs = mask; } + //! Add mandatory input registers (mask). + ASMJIT_INLINE void addInRegs(uint32_t mask) { _inRegs |= mask; } + //! And mandatory input registers (mask). + ASMJIT_INLINE void andInRegs(uint32_t mask) { _inRegs &= mask; } + //! Clear mandatory input registers (mask). + ASMJIT_INLINE void delInRegs(uint32_t mask) { _inRegs &= ~mask; } + + //! Get allocable input registers (mask). + ASMJIT_INLINE uint32_t getAllocableRegs() const { return _allocableRegs; } + //! Set allocable input registers (mask). + ASMJIT_INLINE void setAllocableRegs(uint32_t mask) { _allocableRegs = mask; } + //! Add allocable input registers (mask). + ASMJIT_INLINE void addAllocableRegs(uint32_t mask) { _allocableRegs |= mask; } + //! And allocable input registers (mask). + ASMJIT_INLINE void andAllocableRegs(uint32_t mask) { _allocableRegs &= mask; } + //! Clear allocable input registers (mask). + ASMJIT_INLINE void delAllocableRegs(uint32_t mask) { _allocableRegs &= ~mask; } + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE VarAttr& operator=(const VarAttr& other) { + ::memcpy(this, &other, sizeof(VarAttr)); + return *this; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + VarData* _vd; + //! Flags. + uint32_t _flags; + + union { + struct { + //! How many times the variable is used by the instruction/node. + uint8_t _varCount; + //! Input register index or `kInvalidReg` if it's not given. + //! + //! Even if the input register index is not given (i.e. it may by any + //! register), register allocator should assign an index that will be + //! used to persist a variable into this specific index. It's helpful + //! in situations where one variable has to be allocated in multiple + //! registers to determine the register which will be persistent. + uint8_t _inRegIndex; + //! Output register index or `kInvalidReg` if it's not given. + //! + //! Typically `kInvalidReg` if variable is only used on input. + uint8_t _outRegIndex; + //! \internal + uint8_t _reserved; + }; + + //! \internal + //! + //! Packed data #0. + uint32_t _packed; + }; + + //! Mandatory input registers. + //! + //! Mandatory input registers are required by the instruction even if + //! there are duplicates. This schema allows us to allocate one variable + //! in one or more register when needed. Required mostly by instructions + //! that have implicit register operands (imul, cpuid, ...) and function + //! call. + uint32_t _inRegs; + + //! Allocable input registers. + //! + //! Optional input registers is a mask of all allocable registers for a given + //! variable where we have to pick one of them. This mask is usually not used + //! when _inRegs is set. If both masks are used then the register + //! allocator tries first to find an intersection between these and allocates + //! an extra slot if not found. + uint32_t _allocableRegs; +}; + +// ============================================================================ +// [asmjit::VarMap] +// ============================================================================ + +//! Variables' map related to a single node (instruction / other node). +struct VarMap { + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get count of variables (all). + ASMJIT_INLINE uint32_t getVaCount() const { + return _vaCount; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Variables count. + uint32_t _vaCount; +}; + +// ============================================================================ +// [asmjit::VarState] +// ============================================================================ + +//! Variables' state. +struct VarState {}; + +// ============================================================================ +// [asmjit::Context] +// ============================================================================ + +//! \internal +//! +//! Code generation context is the logic behind `Compiler`. The context is +//! used to compile the code stored in `Compiler`. +struct Context { + ASMJIT_NO_COPY(Context) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + Context(Compiler* compiler); + virtual ~Context(); + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + //! Reset the whole context. + virtual void reset(bool releaseMemory = false); + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get compiler. + ASMJIT_INLINE Compiler* getCompiler() const { return _compiler; } + + //! Get function. + ASMJIT_INLINE HLFunc* getFunc() const { return _func; } + //! Get stop node. + ASMJIT_INLINE HLNode* getStop() const { return _stop; } + + //! Get start of the current scope. + ASMJIT_INLINE HLNode* getStart() const { return _start; } + //! Get end of the current scope. + ASMJIT_INLINE HLNode* getEnd() const { return _end; } + + //! Get extra block. + ASMJIT_INLINE HLNode* getExtraBlock() const { return _extraBlock; } + //! Set extra block. + ASMJIT_INLINE void setExtraBlock(HLNode* node) { _extraBlock = node; } + + // -------------------------------------------------------------------------- + // [Error] + // -------------------------------------------------------------------------- + + //! Get the last error code. + ASMJIT_INLINE Error getLastError() const { + return getCompiler()->getLastError(); + } + + //! Set the last error code and propagate it through the error handler. + ASMJIT_INLINE Error setLastError(Error error, const char* message = NULL) { + return getCompiler()->setLastError(error, message); + } + + // -------------------------------------------------------------------------- + // [State] + // -------------------------------------------------------------------------- + + //! Get current state. + ASMJIT_INLINE VarState* getState() const { return _state; } + + //! Load current state from `target` state. + virtual void loadState(VarState* src) = 0; + + //! Save current state, returning new `VarState` instance. + virtual VarState* saveState() = 0; + + //! Change the current state to `target` state. + virtual void switchState(VarState* src) = 0; + + //! Change the current state to the intersection of two states `a` and `b`. + virtual void intersectStates(VarState* a, VarState* b) = 0; + + // -------------------------------------------------------------------------- + // [Context] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE Error _registerContextVar(VarData* vd) { + if (vd->hasLocalId()) + return kErrorOk; + + uint32_t cid = static_cast(_contextVd.getLength()); + ASMJIT_PROPAGATE_ERROR(_contextVd.append(vd)); + + vd->setLocalId(cid); + return kErrorOk; + } + + // -------------------------------------------------------------------------- + // [Mem] + // -------------------------------------------------------------------------- + + VarCell* _newVarCell(VarData* vd); + VarCell* _newStackCell(uint32_t size, uint32_t alignment); + + ASMJIT_INLINE VarCell* getVarCell(VarData* vd) { + VarCell* cell = vd->getMemCell(); + return cell ? cell : _newVarCell(vd); + } + + virtual Error resolveCellOffsets(); + + // -------------------------------------------------------------------------- + // [Bits] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE BitArray* newBits(uint32_t len) { + return static_cast( + _zoneAllocator.allocZeroed(static_cast(len) * BitArray::kEntitySize)); + } + + ASMJIT_INLINE BitArray* copyBits(const BitArray* src, uint32_t len) { + return static_cast( + _zoneAllocator.dup(src, static_cast(len) * BitArray::kEntitySize)); + } + + // -------------------------------------------------------------------------- + // [Fetch] + // -------------------------------------------------------------------------- + + //! Fetch. + //! + //! Fetch iterates over all nodes and gathers information about all variables + //! used. The process generates information required by register allocator, + //! variable liveness analysis and translator. + virtual Error fetch() = 0; + + // -------------------------------------------------------------------------- + // [Unreachable Code] + // -------------------------------------------------------------------------- + + //! Add unreachable-flow data to the unreachable flow list. + ASMJIT_INLINE Error addUnreachableNode(HLNode* node) { + PodList::Link* link = _zoneAllocator.allocT::Link>(); + if (link == NULL) + return setLastError(kErrorNoHeapMemory); + + link->setValue(node); + _unreachableList.append(link); + + return kErrorOk; + } + + //! Remove unreachable code. + virtual Error removeUnreachableCode(); + + // -------------------------------------------------------------------------- + // [Code-Flow] + // -------------------------------------------------------------------------- + + //! Add returning node (i.e. node that returns and where liveness analysis + //! should start). + ASMJIT_INLINE Error addReturningNode(HLNode* node) { + PodList::Link* link = _zoneAllocator.allocT::Link>(); + if (link == NULL) + return setLastError(kErrorNoHeapMemory); + + link->setValue(node); + _returningList.append(link); + + return kErrorOk; + } + + //! Add jump-flow data to the jcc flow list. + ASMJIT_INLINE Error addJccNode(HLNode* node) { + PodList::Link* link = _zoneAllocator.allocT::Link>(); + if (link == NULL) + return setLastError(kErrorNoHeapMemory); + + link->setValue(node); + _jccList.append(link); + + return kErrorOk; + } + + // -------------------------------------------------------------------------- + // [Analyze] + // -------------------------------------------------------------------------- + + //! Perform variable liveness analysis. + //! + //! Analysis phase iterates over nodes in reverse order and generates a bit + //! array describing variables that are alive at every node in the function. + //! When the analysis start all variables are assumed dead. When a read or + //! read/write operations of a variable is detected the variable becomes + //! alive; when only write operation is detected the variable becomes dead. + //! + //! When a label is found all jumps to that label are followed and analysis + //! repeats until all variables are resolved. + virtual Error livenessAnalysis(); + + // -------------------------------------------------------------------------- + // [Annotate] + // -------------------------------------------------------------------------- + + virtual Error annotate() = 0; + + // -------------------------------------------------------------------------- + // [Translate] + // -------------------------------------------------------------------------- + + //! Translate code by allocating registers and handling state changes. + virtual Error translate() = 0; + + // -------------------------------------------------------------------------- + // [Schedule] + // -------------------------------------------------------------------------- + + virtual Error schedule(); + + // -------------------------------------------------------------------------- + // [Cleanup] + // -------------------------------------------------------------------------- + + virtual void cleanup(); + + // -------------------------------------------------------------------------- + // [Compile] + // -------------------------------------------------------------------------- + + virtual Error compile(HLFunc* func); + + // -------------------------------------------------------------------------- + // [Serialize] + // -------------------------------------------------------------------------- + + virtual Error serialize(Assembler* assembler, HLNode* start, HLNode* stop) = 0; + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Compiler. + Compiler* _compiler; + //! Function. + HLFunc* _func; + + //! Zone allocator. + Zone _zoneAllocator; + + //! \internal + //! + //! Offset (how many bytes to add) to `VarMap` to get `VarAttr` array. Used + //! by liveness analysis shared across all backends. This is needed because + //! `VarMap` is a base class for a specialized version that liveness analysis + //! doesn't use, it just needs `VarAttr` array. + uint32_t _varMapToVaListOffset; + + //! Start of the current active scope. + HLNode* _start; + //! End of the current active scope. + HLNode* _end; + + //! Node that is used to insert extra code after the function body. + HLNode* _extraBlock; + //! Stop node. + HLNode* _stop; + + //! Unreachable nodes. + PodList _unreachableList; + //! Returning nodes. + PodList _returningList; + //! Jump nodes. + PodList _jccList; + + //! All variables used by the current function. + PodVector _contextVd; + + //! Memory used to spill variables. + VarCell* _memVarCells; + //! Memory used to alloc memory on the stack. + VarCell* _memStackCells; + + //! Count of 1-byte cells. + uint32_t _mem1ByteVarsUsed; + //! Count of 2-byte cells. + uint32_t _mem2ByteVarsUsed; + //! Count of 4-byte cells. + uint32_t _mem4ByteVarsUsed; + //! Count of 8-byte cells. + uint32_t _mem8ByteVarsUsed; + //! Count of 16-byte cells. + uint32_t _mem16ByteVarsUsed; + //! Count of 32-byte cells. + uint32_t _mem32ByteVarsUsed; + //! Count of 64-byte cells. + uint32_t _mem64ByteVarsUsed; + //! Count of stack memory cells. + uint32_t _memStackCellsUsed; + + //! Maximum memory alignment used by the function. + uint32_t _memMaxAlign; + //! Count of bytes used by variables. + uint32_t _memVarTotal; + //! Count of bytes used by stack. + uint32_t _memStackTotal; + //! Count of bytes used by variables and stack after alignment. + uint32_t _memAllTotal; + + //! Default lenght of annotated instruction. + uint32_t _annotationLength; + + //! Current state (used by register allocator). + VarState* _state; +}; + +//! \} + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" + +// [Guard] +#endif // !ASMJIT_DISABLE_COMPILER +#endif // _ASMJIT_BASE_COMPILERCONTEXT_P_H diff --git a/src/asmjit/base/compilerfunc.h b/src/asmjit/base/compilerfunc.h new file mode 100644 index 0000000..4abae0e --- /dev/null +++ b/src/asmjit/base/compilerfunc.h @@ -0,0 +1,1007 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Guard] +#ifndef _ASMJIT_BASE_COMPILERFUNC_H +#define _ASMJIT_BASE_COMPILERFUNC_H + +#include "../build.h" +#if !defined(ASMJIT_DISABLE_COMPILER) + +// [Dependencies - AsmJit] +#include "../base/operand.h" +#include "../base/utils.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +//! \addtogroup asmjit_base +//! \{ + +// ============================================================================ +// [asmjit::CallConv] +// ============================================================================ + +//! Function calling convention. +//! +//! Calling convention is a scheme that defines how function arguments are +//! passed and how the return value handled. In assembler programming it's +//! always needed to comply with function calling conventions, because even +//! small inconsistency can cause undefined behavior or application's crash. +//! +//! Platform Independent Conventions +//! -------------------------------- +//! +//! - `kCallConvHost` - Should match the current C++ compiler native calling +//! convention. +//! +//! X86/X64 Specific Conventions +//! ---------------------------- +//! +//! List of calling conventions for 32-bit x86 mode: +//! - `kCallConvX86CDecl` - Calling convention for C runtime. +//! - `kCallConvX86StdCall` - Calling convention for WinAPI functions. +//! - `kCallConvX86MsThisCall` - Calling convention for C++ members under +//! Windows (produced by MSVC and all MSVC compatible compilers). +//! - `kCallConvX86MsFastCall` - Fastest calling convention that can be used +//! by MSVC compiler. +//! - `kCallConvX86BorlandFastCall` - Borland fastcall convention. +//! - `kCallConvX86GccFastCall` - GCC fastcall convention (2 register arguments). +//! - `kCallConvX86GccRegParm1` - GCC regparm(1) convention. +//! - `kCallConvX86GccRegParm2` - GCC regparm(2) convention. +//! - `kCallConvX86GccRegParm3` - GCC regparm(3) convention. +//! +//! List of calling conventions for 64-bit x86 mode (x64): +//! - `kCallConvX64Win` - Windows 64-bit calling convention (WIN64 ABI). +//! - `kCallConvX64Unix` - Unix 64-bit calling convention (AMD64 ABI). +ASMJIT_ENUM(CallConv) { + //! Calling convention is invalid (can't be used). + kCallConvNone = 0, + + // -------------------------------------------------------------------------- + // [X86] + // -------------------------------------------------------------------------- + + //! X86 `__cdecl` calling convention (used by C runtime and libraries). + //! + //! Compatible across MSVC and GCC. + //! + //! Arguments direction: + //! - Right to left. + //! + //! Stack is cleaned by: + //! - Caller. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + kCallConvX86CDecl = 1, + + //! X86 `__stdcall` calling convention (used mostly by WinAPI). + //! + //! Compatible across MSVC and GCC. + //! + //! Arguments direction: + //! - Right to left. + //! + //! Stack is cleaned by: + //! - Callee. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + kCallConvX86StdCall = 2, + + //! X86 `__thiscall` calling convention (MSVC/Intel specific). + //! + //! This is MSVC (and Intel) specific calling convention used when targetting + //! Windows platform for C++ class methods. Implicit `this` pointer (defined + //! as the first argument) is stored in `ecx` register instead of storing it + //! on the stack. + //! + //! This calling convention is implicitly used by MSVC for class functions. + //! + //! C++ class functions that have variable number of arguments use `__cdecl` + //! calling convention instead. + //! + //! Arguments direction: + //! - Right to left (except for the first argument passed in `ecx`). + //! + //! Stack is cleaned by: + //! - Callee. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + kCallConvX86MsThisCall = 3, + + //! X86 `__fastcall` convention (MSVC/Intel specific). + //! + //! The first two arguments (evaluated from the left to the right) are passed + //! in `ecx` and `edx` registers, all others on the stack from the right to + //! the left. + //! + //! Arguments direction: + //! - Right to left (except for the first two integers passed in `ecx` and `edx`). + //! + //! Stack is cleaned by: + //! - Callee. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + //! + //! NOTE: This calling convention differs from GCC's one. + kCallConvX86MsFastCall = 4, + + //! X86 `__fastcall` convention (Borland specific). + //! + //! The first two arguments (evaluated from the left to the right) are passed + //! in `ecx` and `edx` registers, all others on the stack from the left to + //! the right. + //! + //! Arguments direction: + //! - Left to right (except for the first two integers passed in `ecx` and `edx`). + //! + //! Stack is cleaned by: + //! - Callee. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + //! + //! NOTE: Arguments on the stack are in passed in left to right order, which + //! is really Borland specific, all other `__fastcall` calling conventions + //! use right to left order. + kCallConvX86BorlandFastCall = 5, + + //! X86 `__fastcall` convention (GCC specific). + //! + //! The first two arguments (evaluated from the left to the right) are passed + //! in `ecx` and `edx` registers, all others on the stack from the right to + //! the left. + //! + //! Arguments direction: + //! - Right to left (except for the first two integers passed in `ecx` and `edx`). + //! + //! Stack is cleaned by: + //! - Callee. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + //! + //! NOTE: This calling convention should be compatible with `kCallConvX86MsFastCall`. + kCallConvX86GccFastCall = 6, + + //! X86 `regparm(1)` convention (GCC specific). + //! + //! The first argument (evaluated from the left to the right) is passed in + //! `eax` register, all others on the stack from the right to the left. + //! + //! Arguments direction: + //! - Right to left (except for the first integer passed in `eax`). + //! + //! Stack is cleaned by: + //! - Caller. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + kCallConvX86GccRegParm1 = 7, + + //! X86 `regparm(2)` convention (GCC specific). + //! + //! The first two arguments (evaluated from the left to the right) are passed + //! in `ecx` and `edx` registers, all others on the stack from the right to + //! the left. + //! + //! Arguments direction: + //! - Right to left (except for the first two integers passed in `ecx` and `edx`). + //! + //! Stack is cleaned by: + //! - Caller. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + kCallConvX86GccRegParm2 = 8, + + //! X86 `regparm(3)` convention (GCC specific). + //! + //! Three first parameters (evaluated from left-to-right) are in + //! EAX:EDX:ECX registers, all others on the stack in right-to-left direction. + //! + //! Arguments direction: + //! - Right to left (except for the first three integers passed in `ecx`, + //! `edx`, and `ecx`). + //! + //! Stack is cleaned by: + //! - Caller. + //! + //! Return value: + //! - Integer types - `eax:edx` registers. + //! - Floating point - `fp0` register. + kCallConvX86GccRegParm3 = 9, + + // -------------------------------------------------------------------------- + // [X64] + // -------------------------------------------------------------------------- + + //! X64 calling convention used by Windows platform (WIN64-ABI). + //! + //! The first 4 arguments are passed in the following registers: + //! - 1. 32/64-bit integer in `rcx` and floating point argument in `xmm0` + //! - 2. 32/64-bit integer in `rdx` and floating point argument in `xmm1` + //! - 3. 32/64-bit integer in `r8` and floating point argument in `xmm2` + //! - 4. 32/64-bit integer in `r9` and floating point argument in `xmm3` + //! + //! If one or more argument from the first four doesn't match the list above + //! it is simply skipped. WIN64-ABI is very specific about this. + //! + //! All other arguments are pushed on the stack from the right to the left. + //! Stack has to be aligned by 16 bytes, always. There is also a 32-byte + //! shadow space on the stack that can be used to save up to four 64-bit + //! registers. + //! + //! Arguments direction: + //! - Right to left (except for all parameters passed in registers). + //! + //! Stack cleaned by: + //! - Caller. + //! + //! Return value: + //! - Integer types - `rax`. + //! - Floating point - `xmm0`. + //! + //! Stack is always aligned to 16 bytes. + //! + //! More information about this calling convention can be found on MSDN + //! . + kCallConvX64Win = 10, + + //! X64 calling convention used by Unix platforms (AMD64-ABI). + //! + //! First six 32 or 64-bit integer arguments are passed in `rdi`, `rsi`, + //! `rdx`, `rcx`, `r8`, and `r9` registers. First eight floating point or xmm + //! arguments are passed in `xmm0`, `xmm1`, `xmm2`, `xmm3`, `xmm4`, `xmm5`, + //! `xmm6`, and `xmm7` registers. + //! + //! There is also a red zene below the stack pointer that can be used by the + //! function. The red zone is typically from [rsp-128] to [rsp-8], however, + //! red zone can also be disabled. + //! + //! Arguments direction: + //! - Right to left (except for all arguments passed in registers). + //! + //! Stack cleaned by: + //! - Caller. + //! + //! Return value: + //! - Integer types - `rax`. + //! - Floating point - `xmm0`. + //! + //! Stack is always aligned to 16 bytes. + kCallConvX64Unix = 11, + + // -------------------------------------------------------------------------- + // [Internal] + // -------------------------------------------------------------------------- + + //! \internal + _kCallConvX86Start = 1, + //! \internal + _kCallConvX86End = 9, + + //! \internal + _kCallConvX64Start = 10, + //! \internal + _kCallConvX64End = 11, + + // -------------------------------------------------------------------------- + // [Host] + // -------------------------------------------------------------------------- + +#if defined(ASMJIT_DOCGEN) + //! Default calling convention based on the current compiler's settings. + //! + //! NOTE: This should be always the same as `kCallConvHostCDecl`, but some + //! compilers allow to override the default calling convention. Overriding + //! is not detected at the moment. + kCallConvHost = DETECTED_AT_COMPILE_TIME, + //! Default C calling convention based on the current compiler's settings. + kCallConvHostCDecl = DETECTED_AT_COMPILE_TIME, + //! Compatibility for `__stdcall` calling convention. + //! + //! NOTE: This enumeration is always set to a value which is compatible with + //! the current compiler's `__stdcall` calling convention. In 64-bit mode + //! there is no such convention and the value is mapped to `kCallConvX64Win` + //! or `kCallConvX64Unix`, depending on the host architecture. + kCallConvHostStdCall = DETECTED_AT_COMPILE_TIME, + //! Compatibility for `__fastcall` calling convention. + //! + //! NOTE: This enumeration is always set to a value which is compatible with + //! the current compiler's `__fastcall` calling convention. In 64-bit mode + //! there is no such convention and the value is mapped to `kCallConvX64Win` + //! or `kCallConvX64Unix`, depending on the host architecture. + kCallConvHostFastCall = DETECTED_AT_COMPILE_TIME +#elif ASMJIT_ARCH_X86 + // X86 Host Support. + kCallConvHost = kCallConvX86CDecl, + kCallConvHostCDecl = kCallConvX86CDecl, + kCallConvHostStdCall = kCallConvX86StdCall, + kCallConvHostFastCall = + ASMJIT_CC_MSC ? kCallConvX86MsFastCall : + ASMJIT_CC_GCC ? kCallConvX86GccFastCall : + ASMJIT_CC_CLANG ? kCallConvX86GccFastCall : + ASMJIT_CC_CODEGEAR ? kCallConvX86BorlandFastCall : kCallConvNone +#elif ASMJIT_ARCH_X64 + // X64 Host Support. + kCallConvHost = ASMJIT_OS_WINDOWS ? kCallConvX64Win : kCallConvX64Unix, + // These don't exist in 64-bit mode. + kCallConvHostCDecl = kCallConvHost, + kCallConvHostStdCall = kCallConvHost, + kCallConvHostFastCall = kCallConvHost +#else +# error "[asmjit] Couldn't determine the target's calling convention." +#endif +}; + +// ============================================================================ +// [asmjit::FuncHint] +// ============================================================================ + +//! Function hints. +//! +//! For a platform specific calling conventions, see: +//! - `X86FuncHint` - X86/X64 function hints. +ASMJIT_ENUM(FuncHint) { + //! Generate a naked function by omitting its prolog and epilog (default true). + //! + //! Naked functions should always result in less code required for function's + //! prolog and epilog. In addition, on X86/64 naked functions save one register + //! (ebp or rbp), which can be used by the function instead. + kFuncHintNaked = 0, + + //! Generate a compact function prolog/epilog if possible (default true). + //! + //! X86/X64 Specific + //! ---------------- + //! + //! Use shorter, but possible slower prolog/epilog sequence to save/restore + //! registers. At the moment this only enables emitting `leave` in function's + //! epilog to make the code shorter, however, the counterpart `enter` is not + //! used in function's prolog for performance reasons. + kFuncHintCompact = 1, + + //! Emit `emms` instruction in the function's epilog. + kFuncHintX86Emms = 17, + //! Emit `sfence` instruction in the function's epilog. + kFuncHintX86SFence = 18, + //! Emit `lfence` instruction in the function's epilog. + kFuncHintX86LFence = 19 +}; + +// ============================================================================ +// [asmjit::FuncFlags] +// ============================================================================ + +//! Function flags. +ASMJIT_ENUM(FuncFlags) { + //! Whether the function is using naked (minimal) prolog / epilog. + kFuncFlagIsNaked = 0x00000001, + + //! Whether an another function is called from this function. + kFuncFlagIsCaller = 0x00000002, + + //! Whether the stack is not aligned to the required stack alignment, + //! thus it has to be aligned manually. + kFuncFlagIsStackMisaligned = 0x00000004, + + //! Whether the stack pointer is adjusted by the stack size needed + //! to save registers and function variables. + //! + //! X86/X64 Specific + //! ---------------- + //! + //! Stack pointer (ESP/RSP) is adjusted by 'sub' instruction in prolog and by + //! 'add' instruction in epilog (only if function is not naked). If function + //! needs to perform manual stack alignment more instructions are used to + //! adjust the stack (like "and zsp, -Alignment"). + kFuncFlagIsStackAdjusted = 0x00000008, + + //! Whether the function is finished using `Compiler::endFunc()`. + kFuncFlagIsFinished = 0x80000000, + + //! Whether to emit `leave` instead of two instructions in case that the + //! function saves and restores the frame pointer. + kFuncFlagX86Leave = 0x00010000, + + //! Whether it's required to move arguments to a new stack location, + //! because of manual aligning. + kFuncFlagX86MoveArgs = 0x00040000, + + //! Whether to emit `emms` instruction in epilog (auto-detected). + kFuncFlagX86Emms = 0x01000000, + + //! Whether to emit `sfence` instruction in epilog (auto-detected). + //! + //! `kFuncFlagX86SFence` with `kFuncFlagX86LFence` results in emitting `mfence`. + kFuncFlagX86SFence = 0x02000000, + + //! Whether to emit `lfence` instruction in epilog (auto-detected). + //! + //! `kFuncFlagX86SFence` with `kFuncFlagX86LFence` results in emitting `mfence`. + kFuncFlagX86LFence = 0x04000000 +}; + +// ============================================================================ +// [asmjit::FuncDir] +// ============================================================================ + +//! Function arguments direction. +ASMJIT_ENUM(FuncDir) { + //! Arguments are passed left to right. + //! + //! This arguments direction is unusual in C, however it's used in Pascal. + kFuncDirLTR = 0, + + //! Arguments are passed right ro left + //! + //! This is the default argument direction in C. + kFuncDirRTL = 1 +}; + +// ============================================================================ +// [asmjit::FuncMisc] +// ============================================================================ + +enum { + //! Function doesn't have variable number of arguments (`...`) (default). + kFuncNoVarArgs = 0xFF, + //! Invalid stack offset in function or function parameter. + kFuncStackInvalid = -1 +}; + +// ============================================================================ +// [asmjit::FuncArgIndex] +// ============================================================================ + +//! Function argument index (lo/hi). +ASMJIT_ENUM(FuncArgIndex) { + //! Maxumum number of function arguments supported by AsmJit. + kFuncArgCount = 16, + //! Extended maximum number of arguments (used internally). + kFuncArgCountLoHi = kFuncArgCount * 2, + + //! Index to the LO part of function argument (default). + //! + //! This value is typically omitted and added only if there is HI argument + //! accessed. + kFuncArgLo = 0, + //! Index to the HI part of function argument. + //! + //! HI part of function argument depends on target architecture. On x86 it's + //! typically used to transfer 64-bit integers (they form a pair of 32-bit + //! integers). + kFuncArgHi = kFuncArgCount +}; + +// ============================================================================ +// [asmjit::FuncRet] +// ============================================================================ + +//! Function return value (lo/hi) specification. +ASMJIT_ENUM(FuncRet) { + //! Index to the LO part of function return value. + kFuncRetLo = 0, + //! Index to the HI part of function return value. + kFuncRetHi = 1 +}; + +// ============================================================================ +// [asmjit::TypeId] +// ============================================================================ + +//! Function builder's `void` type. +struct Void {}; + +//! Function builder's `int8_t` type. +struct Int8Type {}; +//! Function builder's `uint8_t` type. +struct UInt8Type {}; + +//! Function builder's `int16_t` type. +struct Int16Type {}; +//! Function builder's `uint16_t` type. +struct UInt16Type {}; + +//! Function builder's `int32_t` type. +struct Int32Type {}; +//! Function builder's `uint32_t` type. +struct UInt32Type {}; + +//! Function builder's `int64_t` type. +struct Int64Type {}; +//! Function builder's `uint64_t` type. +struct UInt64Type {}; + +//! Function builder's `intptr_t` type. +struct IntPtrType {}; +//! Function builder's `uintptr_t` type. +struct UIntPtrType {}; + +//! Function builder's `float` type. +struct FloatType {}; +//! Function builder's `double` type. +struct DoubleType {}; + +#if !defined(ASMJIT_DOCGEN) +template +struct TypeId { + // Let it fail here if `T` was not specialized. +}; + +template +struct TypeId { + enum { kId = kVarTypeIntPtr }; +}; + +template +struct TypeIdOfInt { + enum { kId = (sizeof(T) == 1) ? (int)(IntTraits::kIsSigned ? kVarTypeInt8 : kVarTypeUInt8 ) : + (sizeof(T) == 2) ? (int)(IntTraits::kIsSigned ? kVarTypeInt16 : kVarTypeUInt16) : + (sizeof(T) == 4) ? (int)(IntTraits::kIsSigned ? kVarTypeInt32 : kVarTypeUInt32) : + (sizeof(T) == 8) ? (int)(IntTraits::kIsSigned ? kVarTypeInt64 : kVarTypeUInt64) : (int)kInvalidVar + }; +}; + +#define ASMJIT_TYPE_ID(T, ID) \ + template<> struct TypeId { enum { kId = ID }; } + +ASMJIT_TYPE_ID(void , kInvalidVar); +ASMJIT_TYPE_ID(signed char , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(unsigned char , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(short , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(unsigned short , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(int , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(unsigned int , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(long , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(unsigned long , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(float , kVarTypeFp32); +ASMJIT_TYPE_ID(double , kVarTypeFp64); + +#if ASMJIT_CC_HAS_NATIVE_CHAR +ASMJIT_TYPE_ID(char , TypeIdOfInt::kId); +#endif +#if ASMJIT_CC_HAS_NATIVE_WCHAR_T +ASMJIT_TYPE_ID(wchar_t , TypeIdOfInt::kId); +#endif +#if ASMJIT_CC_HAS_NATIVE_CHAR16_T +ASMJIT_TYPE_ID(char16_t , TypeIdOfInt::kId); +#endif +#if ASMJIT_CC_HAS_NATIVE_CHAR32_T +ASMJIT_TYPE_ID(char32_t , TypeIdOfInt::kId); +#endif + +#if ASMJIT_CC_MSC && ASMJIT_CC_MSC < 1600 +ASMJIT_TYPE_ID(__int64 , TypeIdOfInt<__int64>::kId); +ASMJIT_TYPE_ID(unsigned __int64 , TypeIdOfInt::kId); +#else +ASMJIT_TYPE_ID(long long , TypeIdOfInt::kId); +ASMJIT_TYPE_ID(unsigned long long, TypeIdOfInt::kId); +#endif + +ASMJIT_TYPE_ID(Void , kInvalidVar); +ASMJIT_TYPE_ID(Int8Type , kVarTypeInt8); +ASMJIT_TYPE_ID(UInt8Type , kVarTypeUInt8); +ASMJIT_TYPE_ID(Int16Type , kVarTypeInt16); +ASMJIT_TYPE_ID(UInt16Type , kVarTypeUInt16); +ASMJIT_TYPE_ID(Int32Type , kVarTypeInt32); +ASMJIT_TYPE_ID(UInt32Type , kVarTypeUInt32); +ASMJIT_TYPE_ID(Int64Type , kVarTypeInt64); +ASMJIT_TYPE_ID(UInt64Type , kVarTypeUInt64); +ASMJIT_TYPE_ID(IntPtrType , kVarTypeIntPtr); +ASMJIT_TYPE_ID(UIntPtrType , kVarTypeUIntPtr); +ASMJIT_TYPE_ID(FloatType , kVarTypeFp32); +ASMJIT_TYPE_ID(DoubleType , kVarTypeFp64); +#endif // !ASMJIT_DOCGEN + +// ============================================================================ +// [asmjit::FuncInOut] +// ============================================================================ + +//! Function in/out - argument or return value translated from `FuncPrototype`. +struct FuncInOut { + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE uint32_t getVarType() const { return _varType; } + + ASMJIT_INLINE bool hasRegIndex() const { return _regIndex != kInvalidReg; } + ASMJIT_INLINE uint32_t getRegIndex() const { return _regIndex; } + + ASMJIT_INLINE bool hasStackOffset() const { return _stackOffset != kFuncStackInvalid; } + ASMJIT_INLINE int32_t getStackOffset() const { return static_cast(_stackOffset); } + + //! Get whether the argument / return value is assigned. + ASMJIT_INLINE bool isSet() const { + return (_regIndex != kInvalidReg) | (_stackOffset != kFuncStackInvalid); + } + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + //! Reset the function argument to "unassigned state". + ASMJIT_INLINE void reset() { _packed = 0xFFFFFFFFU; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + union { + struct { + //! Variable type, see \ref VarType. + uint8_t _varType; + //! Register index if argument / return value is a register. + uint8_t _regIndex; + //! Stack offset if argument / return value is on the stack. + int16_t _stackOffset; + }; + + //! All members packed into single 32-bit integer. + uint32_t _packed; + }; +}; + +// ============================================================================ +// [asmjit::FuncPrototype] +// ============================================================================ + +//! Function prototype. +//! +//! Function prototype contains information about function return type, count +//! of arguments and their types. Function prototype is a low level structure +//! which doesn't contain platform specific or calling convention specific +//! information. Function prototype is used to create a `FuncDecl`. +struct FuncPrototype { + // -------------------------------------------------------------------------- + // [Setup] + // -------------------------------------------------------------------------- + + //! Setup the prototype. + ASMJIT_INLINE void setup( + uint32_t callConv, + uint32_t ret, + const uint32_t* args, uint32_t numArgs) { + + ASMJIT_ASSERT(callConv <= 0xFF); + ASMJIT_ASSERT(numArgs <= 0xFF); + + _callConv = static_cast(callConv); + _varArgs = kFuncNoVarArgs; + _numArgs = static_cast(numArgs); + _reserved = 0; + + _ret = ret; + _args = args; + } + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get the function's calling convention. + ASMJIT_INLINE uint32_t getCallConv() const { return _callConv; } + //! Get the variable arguments `...` index, `kFuncNoVarArgs` if none. + ASMJIT_INLINE uint32_t getVarArgs() const { return _varArgs; } + //! Get the number of function arguments. + ASMJIT_INLINE uint32_t getNumArgs() const { return _numArgs; } + + //! Get the return value type. + ASMJIT_INLINE uint32_t getRet() const { return _ret; } + //! Get the type of the argument at index `i`. + ASMJIT_INLINE uint32_t getArg(uint32_t i) const { + ASMJIT_ASSERT(i < _numArgs); + return _args[i]; + } + //! Get the array of function arguments' types. + ASMJIT_INLINE const uint32_t* getArgs() const { return _args; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + uint8_t _callConv; + uint8_t _varArgs; + uint8_t _numArgs; + uint8_t _reserved; + + uint32_t _ret; + const uint32_t* _args; +}; + +// ============================================================================ +// [asmjit::FuncBuilderX] +// ============================================================================ + +// TODO: Rename to `DynamicFuncBuilder` +//! Custom function builder for up to 32 function arguments. +struct FuncBuilderX : public FuncPrototype { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE FuncBuilderX(uint32_t callConv = kCallConvHost) { + setup(callConv, kInvalidVar, _builderArgList, 0); + } + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void setCallConv(uint32_t callConv) { + ASMJIT_ASSERT(callConv <= 0xFF); + _callConv = static_cast(callConv); + } + + //! Set the return type to `retType`. + ASMJIT_INLINE void setRet(uint32_t retType) { + _ret = retType; + } + //! Set the return type based on `T`. + template + ASMJIT_INLINE void setRetT() { setRet(TypeId::kId); } + + //! Set the argument at index `i` to the `type` + ASMJIT_INLINE void setArg(uint32_t i, uint32_t type) { + ASMJIT_ASSERT(i < _numArgs); + _builderArgList[i] = type; + } + //! Set the argument at index `i` to the type based on `T`. + template + ASMJIT_INLINE void setArgT(uint32_t i) { setArg(i, TypeId::kId); } + + //! Append an argument of `type` to the function prototype. + ASMJIT_INLINE void addArg(uint32_t type) { + ASMJIT_ASSERT(_numArgs < kFuncArgCount); + _builderArgList[_numArgs++] = type; + } + //! Append an argument of type based on `T` to the function prototype. + template + ASMJIT_INLINE void addArgT() { addArg(TypeId::kId); } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + uint32_t _builderArgList[kFuncArgCount]; +}; + +//! \internal +#define T(_Type_) TypeId<_Type_>::kId + +//! Function prototype (no args). +template +struct FuncBuilder0 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder0(uint32_t callConv = kCallConvHost) { + setup(callConv, T(RET), NULL, 0); + } +}; + +//! Function prototype (1 argument). +template +struct FuncBuilder1 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder1(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (2 arguments). +template +struct FuncBuilder2 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder2(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (3 arguments). +template +struct FuncBuilder3 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder3(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1), T(P2) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (4 arguments). +template +struct FuncBuilder4 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder4(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (5 arguments). +template +struct FuncBuilder5 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder5(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (6 arguments). +template +struct FuncBuilder6 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder6(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (7 arguments). +template +struct FuncBuilder7 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder7(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (8 arguments). +template +struct FuncBuilder8 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder8(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (9 arguments). +template +struct FuncBuilder9 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder9(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7), T(P8) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; + +//! Function prototype (10 arguments). +template +struct FuncBuilder10 : public FuncPrototype { + ASMJIT_INLINE FuncBuilder10(uint32_t callConv = kCallConvHost) { + static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7), T(P8), T(P9) }; + setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args)); + } +}; +#undef T + +// ============================================================================ +// [asmjit::FuncDecl] +// ============================================================================ + +//! Function declaration. +struct FuncDecl { + // -------------------------------------------------------------------------- + // [Accessors - Calling Convention] + // -------------------------------------------------------------------------- + + //! Get the function's calling convention, see `CallConv`. + ASMJIT_INLINE uint32_t getCallConv() const { return _callConv; } + + //! Get whether the callee pops the stack. + ASMJIT_INLINE uint32_t getCalleePopsStack() const { return _calleePopsStack; } + + //! Get direction of arguments passed on the stack. + //! + //! Direction should be always `kFuncDirRTL`. + //! + //! \note This is related to used calling convention, it's not affected by + //! number of function arguments or their types. + ASMJIT_INLINE uint32_t getDirection() const { return _direction; } + + //! Get stack size needed for function arguments passed on the stack. + ASMJIT_INLINE uint32_t getArgStackSize() const { return _argStackSize; } + //! Get size of "Red Zone". + ASMJIT_INLINE uint32_t getRedZoneSize() const { return _redZoneSize; } + //! Get size of "Spill Zone". + ASMJIT_INLINE uint32_t getSpillZoneSize() const { return _spillZoneSize; } + + // -------------------------------------------------------------------------- + // [Accessors - Arguments and Return] + // -------------------------------------------------------------------------- + + //! Get whether the function has a return value. + ASMJIT_INLINE bool hasRet() const { return _retCount != 0; } + //! Get count of function return values. + ASMJIT_INLINE uint32_t getRetCount() const { return _retCount; } + + //! Get function return value. + ASMJIT_INLINE FuncInOut& getRet(uint32_t index = kFuncRetLo) { return _rets[index]; } + //! Get function return value. + ASMJIT_INLINE const FuncInOut& getRet(uint32_t index = kFuncRetLo) const { return _rets[index]; } + + //! Get the number of function arguments. + ASMJIT_INLINE uint32_t getNumArgs() const { return _numArgs; } + + //! Get function arguments array. + ASMJIT_INLINE FuncInOut* getArgs() { return _args; } + //! Get function arguments array (const). + ASMJIT_INLINE const FuncInOut* getArgs() const { return _args; } + + //! Get function argument at index `index`. + ASMJIT_INLINE FuncInOut& getArg(size_t index) { + ASMJIT_ASSERT(index < kFuncArgCountLoHi); + return _args[index]; + } + + //! Get function argument at index `index`. + ASMJIT_INLINE const FuncInOut& getArg(size_t index) const { + ASMJIT_ASSERT(index < kFuncArgCountLoHi); + return _args[index]; + } + + ASMJIT_INLINE void resetArg(size_t index) { + ASMJIT_ASSERT(index < kFuncArgCountLoHi); + _args[index].reset(); + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Calling convention. + uint8_t _callConv; + //! Whether a callee pops stack. + uint8_t _calleePopsStack : 1; + //! Direction for arguments passed on the stack, see `FuncDir`. + uint8_t _direction : 1; + //! Reserved #0 (alignment). + uint8_t _reserved0 : 6; + + //! Number of function arguments. + uint8_t _numArgs; + //! Number of function return values. + uint8_t _retCount; + + //! Count of bytes consumed by arguments on the stack (aligned). + uint32_t _argStackSize; + + //! Size of "Red Zone". + //! + //! \note Used by AMD64-ABI (128 bytes). + uint16_t _redZoneSize; + + //! Size of "Spill Zone". + //! + //! \note Used by WIN64-ABI (32 bytes). + uint16_t _spillZoneSize; + + //! Function arguments (LO & HI) mapped to physical registers and stack. + FuncInOut _args[kFuncArgCountLoHi]; + + //! Function return value(s). + FuncInOut _rets[2]; +}; + +//! \} + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" + +// [Guard] +#endif // !ASMJIT_DISABLE_COMPILER +#endif // _ASMJIT_BASE_COMPILERFUNC_H diff --git a/src/asmjit/base/constpool.cpp b/src/asmjit/base/constpool.cpp index cd7e6d7..1e01144 100644 --- a/src/asmjit/base/constpool.cpp +++ b/src/asmjit/base/constpool.cpp @@ -9,7 +9,7 @@ // [Dependencies - AsmJit] #include "../base/constpool.h" -#include "../base/intutil.h" +#include "../base/utils.h" // [Api-Begin] #include "../apibegin.h" @@ -21,14 +21,14 @@ namespace asmjit { // get, insert and traverse. // ============================================================================ -// [asmjit::ConstPoolTree - Ops] +// [asmjit::ConstPool::Tree - Ops] // ============================================================================ //! \internal //! //! Remove left horizontal links. -static ASMJIT_INLINE ConstPoolNode* ConstPoolTree_skewNode(ConstPoolNode* node) { - ConstPoolNode* link = node->_link[0]; +static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) { + ConstPool::Node* link = node->_link[0]; uint32_t level = node->_level; if (level != 0 && link != NULL && link->_level == level) { @@ -44,8 +44,8 @@ static ASMJIT_INLINE ConstPoolNode* ConstPoolTree_skewNode(ConstPoolNode* node) //! \internal //! //! Remove consecutive horizontal links. -static ASMJIT_INLINE ConstPoolNode* ConstPoolTree_splitNode(ConstPoolNode* node) { - ConstPoolNode* link = node->_link[1]; +static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) { + ConstPool::Node* link = node->_link[1]; uint32_t level = node->_level; if (level != 0 && link != NULL && link->_link[1] != NULL && link->_link[1]->_level == level) { @@ -59,8 +59,8 @@ static ASMJIT_INLINE ConstPoolNode* ConstPoolTree_splitNode(ConstPoolNode* node) return node; } -ConstPoolNode* ConstPoolTree::get(const void* data) { - ConstPoolNode* node = _root; +ConstPool::Node* ConstPool::Tree::get(const void* data) { + ConstPool::Node* node = _root; size_t dataSize = _dataSize; while (node != NULL) { @@ -73,7 +73,7 @@ ConstPoolNode* ConstPoolTree::get(const void* data) { return NULL; } -void ConstPoolTree::put(ConstPoolNode* newNode) { +void ConstPool::Tree::put(ConstPool::Node* newNode) { size_t dataSize = _dataSize; _length++; @@ -82,8 +82,8 @@ void ConstPoolTree::put(ConstPoolNode* newNode) { return; } - ConstPoolNode* node = _root; - ConstPoolNode* stack[kHeightLimit]; + ConstPool::Node* node = _root; + ConstPool::Node* stack[kHeightLimit]; unsigned int top = 0; unsigned int dir; @@ -93,7 +93,7 @@ void ConstPoolTree::put(ConstPoolNode* newNode) { stack[top++] = node; dir = ::memcmp(node->getData(), newNode->getData(), dataSize) < 0; - ConstPoolNode* link = node->_link[dir]; + ConstPool::Node* link = node->_link[dir]; if (link == NULL) break; @@ -162,16 +162,16 @@ void ConstPool::reset() { // [asmjit::ConstPool - Ops] // ============================================================================ -static ASMJIT_INLINE ConstPoolGap* ConstPool_allocGap(ConstPool* self) { - ConstPoolGap* gap = self->_gapPool; +static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) { + ConstPool::Gap* gap = self->_gapPool; if (gap == NULL) - return self->_zone->allocT(); + return self->_zone->allocT(); self->_gapPool = gap->_next; return gap; } -static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPoolGap* gap) { +static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) { gap->_next = self->_gapPool; self->_gapPool = gap; } @@ -183,19 +183,19 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) { size_t gapIndex; size_t gapLength; - if (length >= 16 && IntUtil::isAligned(offset, 16)) { + if (length >= 16 && Utils::isAligned(offset, 16)) { gapIndex = ConstPool::kIndex16; gapLength = 16; } - else if (length >= 8 && IntUtil::isAligned(offset, 8)) { + else if (length >= 8 && Utils::isAligned(offset, 8)) { gapIndex = ConstPool::kIndex8; gapLength = 8; } - else if (length >= 4 && IntUtil::isAligned(offset, 4)) { + else if (length >= 4 && Utils::isAligned(offset, 4)) { gapIndex = ConstPool::kIndex4; gapLength = 4; } - else if (length >= 2 && IntUtil::isAligned(offset, 2)) { + else if (length >= 2 && Utils::isAligned(offset, 2)) { gapIndex = ConstPool::kIndex2; gapLength = 2; } @@ -207,7 +207,7 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) { // We don't have to check for errors here, if this failed nothing really // happened (just the gap won't be visible) and it will fail again at // place where checking will cause kErrorNoHeapMemory. - ConstPoolGap* gap = ConstPool_allocGap(self); + ConstPool::Gap* gap = ConstPool_allocGap(self); if (gap == NULL) return; @@ -240,7 +240,7 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { else return kErrorInvalidArgument; - ConstPoolNode* node = _tree[treeIndex].get(data); + ConstPool::Node* node = _tree[treeIndex].get(data); if (node != NULL) { dstOffset = node->_offset; return kErrorOk; @@ -252,7 +252,7 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { size_t gapIndex = treeIndex; while (gapIndex != kIndexCount - 1) { - ConstPoolGap* gap = _gaps[treeIndex]; + ConstPool::Gap* gap = _gaps[treeIndex]; // Check if there is a gap. if (gap != NULL) { @@ -264,7 +264,7 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { ConstPool_freeGap(this, gap); offset = gapOffset; - ASMJIT_ASSERT(IntUtil::isAligned(offset, size)); + ASMJIT_ASSERT(Utils::isAligned(offset, size)); gapLength -= size; if (gapLength > 0) @@ -277,11 +277,11 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { if (offset == ~static_cast(0)) { // Get how many bytes have to be skipped so the address is aligned accordingly // to the 'size'. - size_t deltaTo = IntUtil::deltaTo(_size, size); + size_t diff = Utils::alignDiff(_size, size); - if (deltaTo != 0) { - ConstPool_addGap(this, _size, deltaTo); - _size += deltaTo; + if (diff != 0) { + ConstPool_addGap(this, _size, diff); + _size += diff; } offset = _size; @@ -289,12 +289,12 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { } // Add the initial node to the right index. - node = ConstPoolTree::_newNode(_zone, data, size, offset, false); + node = ConstPool::Tree::_newNode(_zone, data, size, offset, false); if (node == NULL) return kErrorNoHeapMemory; _tree[treeIndex].put(node); - _alignment = IntUtil::iMax(_alignment, size); + _alignment = Utils::iMax(_alignment, size); dstOffset = offset; @@ -316,7 +316,7 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { if (node != NULL) continue; - node = ConstPoolTree::_newNode(_zone, pData, size, offset + (i * size), true); + node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true); _tree[treeIndex].put(node); } } @@ -333,7 +333,7 @@ struct ConstPoolFill { _dst(dst), _dataSize(dataSize) {} - ASMJIT_INLINE void visit(const ConstPoolNode* node) { + ASMJIT_INLINE void visit(const ConstPool::Node* node) { if (!node->_shared) ::memcpy(_dst + node->_offset, node->getData(), _dataSize); } @@ -342,7 +342,7 @@ struct ConstPoolFill { size_t _dataSize; }; -void ConstPool::fill(void* dst) { +void ConstPool::fill(void* dst) const { // Clears possible gaps, asmjit should never emit garbage to the output. ::memset(dst, 0, _size); @@ -359,7 +359,7 @@ void ConstPool::fill(void* dst) { #if defined(ASMJIT_TEST) UNIT(base_constpool) { - Zone zone(32384 - kZoneOverhead); + Zone zone(32384 - Zone::kZoneOverhead); ConstPool pool(&zone); uint32_t i; diff --git a/src/asmjit/base/constpool.h b/src/asmjit/base/constpool.h index 8f66b8b..c04db2f 100644 --- a/src/asmjit/base/constpool.h +++ b/src/asmjit/base/constpool.h @@ -9,7 +9,6 @@ #define _ASMJIT_BASE_CONSTPOOL_H // [Dependencies - AsmJit] -#include "../base/error.h" #include "../base/zone.h" // [Api-Begin] @@ -17,186 +16,9 @@ namespace asmjit { -//! \addtogroup asmjit_base_util +//! \addtogroup asmjit_base //! \{ -// ============================================================================ -// [asmjit::ConstPoolNode] -// ============================================================================ - -//! \internal -//! -//! Zone-allocated constant-pool node. -struct ConstPoolNode { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void* getData() const { - return static_cast(const_cast(this) + 1); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Left/Right nodes. - ConstPoolNode* _link[2]; - //! Horizontal level for balance. - uint32_t _level : 31; - //! Whether this constant is shared with another. - uint32_t _shared : 1; - //! Data offset from the beginning of the pool. - uint32_t _offset; -}; - -// ============================================================================ -// [asmjit::ConstPoolTree] -// ============================================================================ - -//! \internal -//! -//! Zone-allocated constant-pool tree. -struct ConstPoolTree { - enum { - //! Maximum tree height == log2(1 << 64). - kHeightLimit = 64 - }; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE ConstPoolTree(size_t dataSize = 0) : - _root(NULL), - _length(0), - _dataSize(dataSize) {} - ASMJIT_INLINE ~ConstPoolTree() {} - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void reset() { - _root = NULL; - _length = 0; - } - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool isEmpty() const { - return _length == 0; - } - - ASMJIT_INLINE size_t getLength() const { - return _length; - } - - ASMJIT_INLINE void setDataSize(size_t dataSize) { - ASMJIT_ASSERT(isEmpty()); - _dataSize = dataSize; - } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - ASMJIT_API ConstPoolNode* get(const void* data); - ASMJIT_API void put(ConstPoolNode* node); - - // -------------------------------------------------------------------------- - // [Iterate] - // -------------------------------------------------------------------------- - - template - ASMJIT_INLINE void iterate(Visitor& visitor) const { - ConstPoolNode* node = const_cast(_root); - ConstPoolNode* link; - - ConstPoolNode* stack[kHeightLimit]; - - if (node == NULL) - return; - - size_t top = 0; - - for (;;) { - link = node->_link[0]; - - if (link != NULL) { - ASMJIT_ASSERT(top != kHeightLimit); - stack[top++] = node; - - node = link; - continue; - } - -_Visit: - visitor.visit(node); - link = node->_link[1]; - - if (link != NULL) { - node = link; - continue; - } - - if (top == 0) - break; - - node = stack[--top]; - goto _Visit; - } - } - - // -------------------------------------------------------------------------- - // [Helpers] - // -------------------------------------------------------------------------- - - static ASMJIT_INLINE ConstPoolNode* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) { - ConstPoolNode* node = zone->allocT(sizeof(ConstPoolNode) + size); - if (node == NULL) - return NULL; - - node->_link[0] = NULL; - node->_link[1] = NULL; - node->_level = 1; - node->_shared = shared; - node->_offset = static_cast(offset); - - ::memcpy(node->getData(), data, size); - return node; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Root of the tree - ConstPoolNode* _root; - //! Length of the tree (count of nodes). - size_t _length; - //! Size of the data. - size_t _dataSize; -}; - -// ============================================================================ -// [asmjit::ConstPoolGap] -// ============================================================================ - -//! \internal -//! -//! Zone-allocated constant-pool gap. -struct ConstPoolGap { - //! Link to the next gap - ConstPoolGap* _next; - //! Offset of the gap. - size_t _offset; - //! Remaining bytes of the gap (basically a gap size). - size_t _length; -}; - // ============================================================================ // [asmjit::ConstPool] // ============================================================================ @@ -215,6 +37,178 @@ struct ConstPool { kIndexCount = 6 }; + // -------------------------------------------------------------------------- + // [Gap] + // -------------------------------------------------------------------------- + + //! \internal + //! + //! Zone-allocated const-pool gap. + struct Gap { + //! Link to the next gap + Gap* _next; + //! Offset of the gap. + size_t _offset; + //! Remaining bytes of the gap (basically a gap size). + size_t _length; + }; + + // -------------------------------------------------------------------------- + // [Node] + // -------------------------------------------------------------------------- + + //! \internal + //! + //! Zone-allocated const-pool node. + struct Node { + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void* getData() const { + return static_cast(const_cast(this) + 1); + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Left/Right nodes. + Node* _link[2]; + //! Horizontal level for balance. + uint32_t _level : 31; + //! Whether this constant is shared with another. + uint32_t _shared : 1; + //! Data offset from the beginning of the pool. + uint32_t _offset; + }; + + // -------------------------------------------------------------------------- + // [Tree] + // -------------------------------------------------------------------------- + + //! \internal + //! + //! Zone-allocated const-pool tree. + struct Tree { + enum { + //! Maximum tree height == log2(1 << 64). + kHeightLimit = 64 + }; + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE Tree(size_t dataSize = 0) + : _root(NULL), + _length(0), + _dataSize(dataSize) {} + ASMJIT_INLINE ~Tree() {} + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void reset() { + _root = NULL; + _length = 0; + } + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE bool isEmpty() const { return _length == 0; } + ASMJIT_INLINE size_t getLength() const { return _length; } + + ASMJIT_INLINE void setDataSize(size_t dataSize) { + ASMJIT_ASSERT(isEmpty()); + _dataSize = dataSize; + } + + // -------------------------------------------------------------------------- + // [Ops] + // -------------------------------------------------------------------------- + + ASMJIT_API Node* get(const void* data); + ASMJIT_API void put(Node* node); + + // -------------------------------------------------------------------------- + // [Iterate] + // -------------------------------------------------------------------------- + + template + ASMJIT_INLINE void iterate(Visitor& visitor) const { + Node* node = const_cast(_root); + Node* link; + + Node* stack[kHeightLimit]; + + if (node == NULL) + return; + + size_t top = 0; + + for (;;) { + link = node->_link[0]; + + if (link != NULL) { + ASMJIT_ASSERT(top != kHeightLimit); + stack[top++] = node; + + node = link; + continue; + } + + _Visit: + visitor.visit(node); + link = node->_link[1]; + + if (link != NULL) { + node = link; + continue; + } + + if (top == 0) + break; + + node = stack[--top]; + goto _Visit; + } + } + + // -------------------------------------------------------------------------- + // [Helpers] + // -------------------------------------------------------------------------- + + static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) { + Node* node = zone->allocT(sizeof(Node) + size); + if (node == NULL) + return NULL; + + node->_link[0] = NULL; + node->_link[1] = NULL; + node->_level = 1; + node->_shared = shared; + node->_offset = static_cast(offset); + + ::memcpy(node->getData(), data, size); + return node; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Root of the tree + Node* _root; + //! Length of the tree (count of nodes). + size_t _length; + //! Size of the data. + size_t _dataSize; + }; + // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- @@ -233,19 +227,11 @@ struct ConstPool { // -------------------------------------------------------------------------- //! Get whether the constant-pool is empty. - ASMJIT_INLINE bool isEmpty() const { - return _size == 0; - } - + ASMJIT_INLINE bool isEmpty() const { return _size == 0; } //! Get the size of the constant-pool in bytes. - ASMJIT_INLINE size_t getSize() const { - return _size; - } - + ASMJIT_INLINE size_t getSize() const { return _size; } //! Get minimum alignment. - ASMJIT_INLINE size_t getAlignment() const { - return _alignment; - } + ASMJIT_INLINE size_t getAlignment() const { return _alignment; } //! Add a constant to the constant pool. //! @@ -271,7 +257,7 @@ struct ConstPool { // -------------------------------------------------------------------------- //! Fill the destination with the constants from the pool. - ASMJIT_API void fill(void* dst); + ASMJIT_API void fill(void* dst) const; // -------------------------------------------------------------------------- // [Members] @@ -280,11 +266,11 @@ struct ConstPool { //! Zone allocator. Zone* _zone; //! Tree per size. - ConstPoolTree _tree[kIndexCount]; + Tree _tree[kIndexCount]; //! Gaps per size. - ConstPoolGap* _gaps[kIndexCount]; + Gap* _gaps[kIndexCount]; //! Gaps pool - ConstPoolGap* _gapPool; + Gap* _gapPool; //! Size of the pool (in bytes). size_t _size; diff --git a/src/asmjit/base/containers.cpp b/src/asmjit/base/containers.cpp index b931813..ee7dad8 100644 --- a/src/asmjit/base/containers.cpp +++ b/src/asmjit/base/containers.cpp @@ -9,7 +9,7 @@ // [Dependencies - AsmJit] #include "../base/containers.h" -#include "../base/intutil.h" +#include "../base/utils.h" // [Api-Begin] #include "../apibegin.h" @@ -53,7 +53,7 @@ Error PodVectorBase::_grow(size_t n, size_t sizeOfT) { size_t capacity = d->capacity; size_t after = d->length; - if (IntUtil::maxUInt() - n < after) + if (IntTraits::maxValue() - n < after) return kErrorNoHeapMemory; after += n; @@ -110,6 +110,358 @@ Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) { return kErrorOk; } +// Should be placed in read-only memory. +static const char StringBuilder_empty[4] = { 0 }; + +// ============================================================================ +// [asmjit::StringBuilder - Construction / Destruction] +// ============================================================================ + +StringBuilder::StringBuilder() + : _data(const_cast(StringBuilder_empty)), + _length(0), + _capacity(0), + _canFree(false) {} + +StringBuilder::~StringBuilder() { + if (_canFree) + ASMJIT_FREE(_data); +} + +// ============================================================================ +// [asmjit::StringBuilder - Prepare / Reserve] +// ============================================================================ + +char* StringBuilder::prepare(uint32_t op, size_t len) { + // -------------------------------------------------------------------------- + // [Set] + // -------------------------------------------------------------------------- + + if (op == kStringOpSet) { + // We don't care here, but we can't return a NULL pointer since it indicates + // failure in memory allocation. + if (len == 0) { + if (_data != StringBuilder_empty) + _data[0] = 0; + + _length = 0; + return _data; + } + + if (_capacity < len) { + if (len >= IntTraits::maxValue() - sizeof(intptr_t) * 2) + return NULL; + + size_t to = Utils::alignTo(len, sizeof(intptr_t)); + if (to < 256 - sizeof(intptr_t)) + to = 256 - sizeof(intptr_t); + + char* newData = static_cast(ASMJIT_ALLOC(to + sizeof(intptr_t))); + if (newData == NULL) { + clear(); + return NULL; + } + + if (_canFree) + ASMJIT_FREE(_data); + + _data = newData; + _capacity = to + sizeof(intptr_t) - 1; + _canFree = true; + } + + _data[len] = 0; + _length = len; + + ASMJIT_ASSERT(_length <= _capacity); + return _data; + } + + // -------------------------------------------------------------------------- + // [Append] + // -------------------------------------------------------------------------- + + else { + // We don't care here, but we can't return a NULL pointer since it indicates + // failure in memory allocation. + if (len == 0) + return _data + _length; + + // Overflow. + if (IntTraits::maxValue() - sizeof(intptr_t) * 2 - _length < len) + return NULL; + + size_t after = _length + len; + if (_capacity < after) { + size_t to = _capacity; + + if (to < 256) + to = 256; + + while (to < 1024 * 1024 && to < after) + to *= 2; + + if (to < after) { + to = after; + if (to < (IntTraits::maxValue() - 1024 * 32)) + to = Utils::alignTo(to, 1024 * 32); + } + + to = Utils::alignTo(to, sizeof(intptr_t)); + char* newData = static_cast(ASMJIT_ALLOC(to + sizeof(intptr_t))); + + if (newData == NULL) + return NULL; + + ::memcpy(newData, _data, _length); + if (_canFree) + ASMJIT_FREE(_data); + + _data = newData; + _capacity = to + sizeof(intptr_t) - 1; + _canFree = true; + } + + char* ret = _data + _length; + _data[after] = 0; + _length = after; + + ASMJIT_ASSERT(_length <= _capacity); + return ret; + } +} + +bool StringBuilder::reserve(size_t to) { + if (_capacity >= to) + return true; + + if (to >= IntTraits::maxValue() - sizeof(intptr_t) * 2) + return false; + + to = Utils::alignTo(to, sizeof(intptr_t)); + + char* newData = static_cast(ASMJIT_ALLOC(to + sizeof(intptr_t))); + if (newData == NULL) + return false; + + ::memcpy(newData, _data, _length + 1); + if (_canFree) + ASMJIT_FREE(_data); + + _data = newData; + _capacity = to + sizeof(intptr_t) - 1; + _canFree = true; + return true; +} + +// ============================================================================ +// [asmjit::StringBuilder - Clear] +// ============================================================================ + +void StringBuilder::clear() { + if (_data != StringBuilder_empty) + _data[0] = 0; + _length = 0; +} + +// ============================================================================ +// [asmjit::StringBuilder - Methods] +// ============================================================================ + +bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) { + if (len == kInvalidIndex) + len = str != NULL ? ::strlen(str) : static_cast(0); + + char* p = prepare(op, len); + if (p == NULL) + return false; + + ::memcpy(p, str, len); + return true; +} + +bool StringBuilder::_opChar(uint32_t op, char c) { + char* p = prepare(op, 1); + if (p == NULL) + return false; + + *p = c; + return true; +} + +bool StringBuilder::_opChars(uint32_t op, char c, size_t len) { + char* p = prepare(op, len); + if (p == NULL) + return false; + + ::memset(p, c, len); + return true; +} + +static const char StringBuilder_numbers[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + +bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) { + if (base < 2 || base > 36) + base = 10; + + char buf[128]; + char* p = buf + ASMJIT_ARRAY_SIZE(buf); + + uint64_t orig = i; + char sign = '\0'; + + // -------------------------------------------------------------------------- + // [Sign] + // -------------------------------------------------------------------------- + + if ((flags & kStringFormatSigned) != 0 && static_cast(i) < 0) { + i = static_cast(-static_cast(i)); + sign = '-'; + } + else if ((flags & kStringFormatShowSign) != 0) { + sign = '+'; + } + else if ((flags & kStringFormatShowSpace) != 0) { + sign = ' '; + } + + // -------------------------------------------------------------------------- + // [Number] + // -------------------------------------------------------------------------- + + do { + uint64_t d = i / base; + uint64_t r = i % base; + + *--p = StringBuilder_numbers[r]; + i = d; + } while (i); + + size_t numberLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p); + + // -------------------------------------------------------------------------- + // [Alternate Form] + // -------------------------------------------------------------------------- + + if ((flags & kStringFormatAlternate) != 0) { + if (base == 8) { + if (orig != 0) + *--p = '0'; + } + if (base == 16) { + *--p = 'x'; + *--p = '0'; + } + } + + // -------------------------------------------------------------------------- + // [Width] + // -------------------------------------------------------------------------- + + if (sign != 0) + *--p = sign; + + if (width > 256) + width = 256; + + if (width <= numberLength) + width = 0; + else + width -= numberLength; + + // -------------------------------------------------------------------------- + // Write] + // -------------------------------------------------------------------------- + + size_t prefixLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberLength; + char* data = prepare(op, prefixLength + width + numberLength); + + if (data == NULL) + return false; + + ::memcpy(data, p, prefixLength); + data += prefixLength; + + ::memset(data, '0', width); + data += width; + + ::memcpy(data, p + prefixLength, numberLength); + return true; +} + +bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) { + if (len >= IntTraits::maxValue() / 2) + return false; + + char* dst = prepare(op, len * 2); + if (dst == NULL) + return false; + + const char* src = static_cast(data); + for (size_t i = 0; i < len; i++, dst += 2, src += 1) + { + dst[0] = StringBuilder_numbers[(src[0] >> 4) & 0xF]; + dst[1] = StringBuilder_numbers[(src[0] ) & 0xF]; + } + + return true; +} + +bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) { + char buf[1024]; + + vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); + buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; + + return _opString(op, buf); +} + +bool StringBuilder::setFormat(const char* fmt, ...) { + bool result; + + va_list ap; + va_start(ap, fmt); + result = _opVFormat(kStringOpSet, fmt, ap); + va_end(ap); + + return result; +} + +bool StringBuilder::appendFormat(const char* fmt, ...) { + bool result; + + va_list ap; + va_start(ap, fmt); + result = _opVFormat(kStringOpAppend, fmt, ap); + va_end(ap); + + return result; +} + +bool StringBuilder::eq(const char* str, size_t len) const { + const char* aData = _data; + const char* bData = str; + + size_t aLength = _length; + size_t bLength = len; + + if (bLength == kInvalidIndex) { + size_t i; + for (i = 0; i < aLength; i++) { + if (aData[i] != bData[i] || bData[i] == 0) + return false; + } + + return bData[i] == 0; + } + else { + if (aLength != bLength) + return false; + + return ::memcmp(aData, bData, aLength) == 0; + } +} + } // asmjit namespace // [Api-End] diff --git a/src/asmjit/base/containers.h b/src/asmjit/base/containers.h index c6dc70d..211db7b 100644 --- a/src/asmjit/base/containers.h +++ b/src/asmjit/base/containers.h @@ -9,7 +9,6 @@ #define _ASMJIT_BASE_CONTAINERS_H // [Dependencies - AsmJit] -#include "../base/error.h" #include "../base/globals.h" // [Api-Begin] @@ -17,9 +16,125 @@ namespace asmjit { -//! \addtogroup asmjit_base_util +//! \addtogroup asmjit_base //! \{ +// ============================================================================ +// [asmjit::BitArray] +// ============================================================================ + +//! Fixed size bit-array. +//! +//! Used by variable liveness analysis. +struct BitArray { + // -------------------------------------------------------------------------- + // [Enums] + // -------------------------------------------------------------------------- + + enum { + kEntitySize = static_cast(sizeof(uintptr_t)), + kEntityBits = kEntitySize * 8 + }; + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE uintptr_t getBit(uint32_t index) const { + return (data[index / kEntityBits] >> (index % kEntityBits)) & 1; + } + + ASMJIT_INLINE void setBit(uint32_t index) { + data[index / kEntityBits] |= static_cast(1) << (index % kEntityBits); + } + + ASMJIT_INLINE void delBit(uint32_t index) { + data[index / kEntityBits] &= ~(static_cast(1) << (index % kEntityBits)); + } + + // -------------------------------------------------------------------------- + // [Interface] + // -------------------------------------------------------------------------- + + //! Copy bits from `s0`, returns `true` if at least one bit is set in `s0`. + ASMJIT_INLINE bool copyBits(const BitArray* s0, uint32_t len) { + uintptr_t r = 0; + for (uint32_t i = 0; i < len; i++) { + uintptr_t t = s0->data[i]; + data[i] = t; + r |= t; + } + return r != 0; + } + + ASMJIT_INLINE bool addBits(const BitArray* s0, uint32_t len) { + return addBits(this, s0, len); + } + + ASMJIT_INLINE bool addBits(const BitArray* s0, const BitArray* s1, uint32_t len) { + uintptr_t r = 0; + for (uint32_t i = 0; i < len; i++) { + uintptr_t t = s0->data[i] | s1->data[i]; + data[i] = t; + r |= t; + } + return r != 0; + } + + ASMJIT_INLINE bool andBits(const BitArray* s1, uint32_t len) { + return andBits(this, s1, len); + } + + ASMJIT_INLINE bool andBits(const BitArray* s0, const BitArray* s1, uint32_t len) { + uintptr_t r = 0; + for (uint32_t i = 0; i < len; i++) { + uintptr_t t = s0->data[i] & s1->data[i]; + data[i] = t; + r |= t; + } + return r != 0; + } + + ASMJIT_INLINE bool delBits(const BitArray* s1, uint32_t len) { + return delBits(this, s1, len); + } + + ASMJIT_INLINE bool delBits(const BitArray* s0, const BitArray* s1, uint32_t len) { + uintptr_t r = 0; + for (uint32_t i = 0; i < len; i++) { + uintptr_t t = s0->data[i] & ~s1->data[i]; + data[i] = t; + r |= t; + } + return r != 0; + } + + ASMJIT_INLINE bool _addBitsDelSource(BitArray* s1, uint32_t len) { + return _addBitsDelSource(this, s1, len); + } + + ASMJIT_INLINE bool _addBitsDelSource(const BitArray* s0, BitArray* s1, uint32_t len) { + uintptr_t r = 0; + for (uint32_t i = 0; i < len; i++) { + uintptr_t a = s0->data[i]; + uintptr_t b = s1->data[i]; + + this->data[i] = a | b; + b &= ~a; + + s1->data[i] = b; + r |= b; + } + return r != 0; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + uintptr_t data[1]; +}; + // ============================================================================ // [asmjit::PodVectorData] // ============================================================================ @@ -31,9 +146,7 @@ struct PodVectorData { // -------------------------------------------------------------------------- //! Get data. - ASMJIT_INLINE void* getData() const { - return (void*)(this + 1); - } + ASMJIT_INLINE void* getData() const { return (void*)(this + 1); } // -------------------------------------------------------------------------- // [Members] @@ -58,13 +171,9 @@ struct PodVectorBase { // -------------------------------------------------------------------------- //! Create a new instance of `PodVectorBase`. - ASMJIT_INLINE PodVectorBase() : - _d(const_cast(&_nullData)) {} - + ASMJIT_INLINE PodVectorBase() : _d(const_cast(&_nullData)) {} //! Destroy the `PodVectorBase` and data. - ASMJIT_INLINE ~PodVectorBase() { - reset(true); - } + ASMJIT_INLINE ~PodVectorBase() { reset(true); } // -------------------------------------------------------------------------- // [Reset] @@ -121,43 +230,24 @@ struct PodVector : PodVectorBase { // -------------------------------------------------------------------------- //! Get whether the vector is empty. - ASMJIT_INLINE bool isEmpty() const { - return _d->length == 0; - } - + ASMJIT_INLINE bool isEmpty() const { return _d->length == 0; } //! Get length. - ASMJIT_INLINE size_t getLength() const { - return _d->length; - } - + ASMJIT_INLINE size_t getLength() const { return _d->length; } //! Get capacity. - ASMJIT_INLINE size_t getCapacity() const { - return _d->capacity; - } - + ASMJIT_INLINE size_t getCapacity() const { return _d->capacity; } //! Get data. - ASMJIT_INLINE T* getData() { - return static_cast(_d->getData()); - } - + ASMJIT_INLINE T* getData() { return static_cast(_d->getData()); } //! \overload - ASMJIT_INLINE const T* getData() const { - return static_cast(_d->getData()); - } + ASMJIT_INLINE const T* getData() const { return static_cast(_d->getData()); } // -------------------------------------------------------------------------- // [Grow / Reserve] // -------------------------------------------------------------------------- //! Called to grow the buffer to fit at least `n` elements more. - ASMJIT_INLINE Error _grow(size_t n) { - return PodVectorBase::_grow(n, sizeof(T)); - } - + ASMJIT_INLINE Error _grow(size_t n) { return PodVectorBase::_grow(n, sizeof(T)); } //! Realloc internal array to fit at least `n` items. - ASMJIT_INLINE Error _reserve(size_t n) { - return PodVectorBase::_reserve(n, sizeof(T)); - } + ASMJIT_INLINE Error _reserve(size_t n) { return PodVectorBase::_reserve(n, sizeof(T)); } // -------------------------------------------------------------------------- // [Ops] @@ -339,6 +429,324 @@ struct PodList { Link* _last; }; +// ============================================================================ +// [asmjit::StringBuilder] +// ============================================================================ + +//! String builder. +//! +//! String builder was designed to be able to build a string using append like +//! operation to append numbers, other strings, or signle characters. It can +//! allocate it's own buffer or use a buffer created on the stack. +//! +//! String builder contains method specific to AsmJit functionality, used for +//! logging or HTML output. +struct StringBuilder { + ASMJIT_NO_COPY(StringBuilder) + + // -------------------------------------------------------------------------- + // [Enums] + // -------------------------------------------------------------------------- + + //! \internal + //! + //! String operation. + ASMJIT_ENUM(StringOp) { + //! Replace the current string by a given content. + kStringOpSet = 0, + //! Append a given content to the current string. + kStringOpAppend = 1 + }; + + //! \internal + //! + //! String format flags. + ASMJIT_ENUM(StringFormatFlags) { + kStringFormatShowSign = 0x00000001, + kStringFormatShowSpace = 0x00000002, + kStringFormatAlternate = 0x00000004, + kStringFormatSigned = 0x80000000 + }; + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_API StringBuilder(); + ASMJIT_API ~StringBuilder(); + + ASMJIT_INLINE StringBuilder(const _NoInit&) {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get string builder capacity. + ASMJIT_INLINE size_t getCapacity() const { return _capacity; } + //! Get length. + ASMJIT_INLINE size_t getLength() const { return _length; } + + //! Get null-terminated string data. + ASMJIT_INLINE char* getData() { return _data; } + //! Get null-terminated string data (const). + ASMJIT_INLINE const char* getData() const { return _data; } + + // -------------------------------------------------------------------------- + // [Prepare / Reserve] + // -------------------------------------------------------------------------- + + //! Prepare to set/append. + ASMJIT_API char* prepare(uint32_t op, size_t len); + + //! Reserve `to` bytes in string builder. + ASMJIT_API bool reserve(size_t to); + + // -------------------------------------------------------------------------- + // [Clear] + // -------------------------------------------------------------------------- + + //! Clear the content in String builder. + ASMJIT_API void clear(); + + // -------------------------------------------------------------------------- + // [Op] + // -------------------------------------------------------------------------- + + ASMJIT_API bool _opString(uint32_t op, const char* str, size_t len = kInvalidIndex); + ASMJIT_API bool _opVFormat(uint32_t op, const char* fmt, va_list ap); + ASMJIT_API bool _opChar(uint32_t op, char c); + ASMJIT_API bool _opChars(uint32_t op, char c, size_t len); + ASMJIT_API bool _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0); + ASMJIT_API bool _opHex(uint32_t op, const void* data, size_t len); + + // -------------------------------------------------------------------------- + // [Set] + // -------------------------------------------------------------------------- + + //! Replace the current content by `str` of `len`. + ASMJIT_INLINE bool setString(const char* str, size_t len = kInvalidIndex) { + return _opString(kStringOpSet, str, len); + } + + //! Replace the current content by formatted string `fmt`. + ASMJIT_INLINE bool setVFormat(const char* fmt, va_list ap) { + return _opVFormat(kStringOpSet, fmt, ap); + } + + //! Replace the current content by formatted string `fmt`. + ASMJIT_API bool setFormat(const char* fmt, ...); + + //! Replace the current content by `c` character. + ASMJIT_INLINE bool setChar(char c) { + return _opChar(kStringOpSet, c); + } + + //! Replace the current content by `c` of `len`. + ASMJIT_INLINE bool setChars(char c, size_t len) { + return _opChars(kStringOpSet, c, len); + } + + //! Replace the current content by formatted integer `i`. + ASMJIT_INLINE bool setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { + return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned); + } + + //! Replace the current content by formatted integer `i`. + ASMJIT_INLINE bool setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { + return _opNumber(kStringOpSet, i, base, width, flags); + } + + //! Replace the current content by the given `data` converted to a HEX string. + ASMJIT_INLINE bool setHex(const void* data, size_t len) { + return _opHex(kStringOpSet, data, len); + } + + // -------------------------------------------------------------------------- + // [Append] + // -------------------------------------------------------------------------- + + //! Append `str` of `len`. + ASMJIT_INLINE bool appendString(const char* str, size_t len = kInvalidIndex) { + return _opString(kStringOpAppend, str, len); + } + + //! Append a formatted string `fmt` to the current content. + ASMJIT_INLINE bool appendVFormat(const char* fmt, va_list ap) { + return _opVFormat(kStringOpAppend, fmt, ap); + } + + //! Append a formatted string `fmt` to the current content. + ASMJIT_API bool appendFormat(const char* fmt, ...); + + //! Append `c` character. + ASMJIT_INLINE bool appendChar(char c) { + return _opChar(kStringOpAppend, c); + } + + //! Append `c` of `len`. + ASMJIT_INLINE bool appendChars(char c, size_t len) { + return _opChars(kStringOpAppend, c, len); + } + + //! Append `i`. + ASMJIT_INLINE bool appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { + return _opNumber(kStringOpAppend, static_cast(i), base, width, flags | kStringFormatSigned); + } + + //! Append `i`. + ASMJIT_INLINE bool appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { + return _opNumber(kStringOpAppend, i, base, width, flags); + } + + //! Append the given `data` converted to a HEX string. + ASMJIT_INLINE bool appendHex(const void* data, size_t len) { + return _opHex(kStringOpAppend, data, len); + } + + // -------------------------------------------------------------------------- + // [_Append] + // -------------------------------------------------------------------------- + + //! Append `str` of `len`, inlined, without buffer overflow check. + ASMJIT_INLINE void _appendString(const char* str, size_t len = kInvalidIndex) { + // len should be a constant if we are inlining. + if (len == kInvalidIndex) { + char* p = &_data[_length]; + + while (*str) { + ASMJIT_ASSERT(p < _data + _capacity); + *p++ = *str++; + } + + *p = '\0'; + _length = (size_t)(p - _data); + } + else { + ASMJIT_ASSERT(_capacity - _length >= len); + + char* p = &_data[_length]; + char* pEnd = p + len; + + while (p < pEnd) + *p++ = *str++; + + *p = '\0'; + _length += len; + } + } + + //! Append `c` character, inlined, without buffer overflow check. + ASMJIT_INLINE void _appendChar(char c) { + ASMJIT_ASSERT(_capacity - _length >= 1); + + _data[_length] = c; + _length++; + _data[_length] = '\0'; + } + + //! Append `c` of `len`, inlined, without buffer overflow check. + ASMJIT_INLINE void _appendChars(char c, size_t len) { + ASMJIT_ASSERT(_capacity - _length >= len); + + char* p = &_data[_length]; + char* pEnd = p + len; + + while (p < pEnd) + *p++ = c; + + *p = '\0'; + _length += len; + } + + ASMJIT_INLINE void _appendUInt32(uint32_t i) { + char buf_[32]; + + char* pEnd = buf_ + ASMJIT_ARRAY_SIZE(buf_); + char* pBuf = pEnd; + + do { + uint32_t d = i / 10; + uint32_t r = i % 10; + + *--pBuf = static_cast(r + '0'); + i = d; + } while (i); + + ASMJIT_ASSERT(_capacity - _length >= (size_t)(pEnd - pBuf)); + char* p = &_data[_length]; + + do { + *p++ = *pBuf; + } while (++pBuf != pEnd); + + *p = '\0'; + _length = (size_t)(p - _data); + } + + // -------------------------------------------------------------------------- + // [Eq] + // -------------------------------------------------------------------------- + + //! Check for equality with other `str` of `len`. + ASMJIT_API bool eq(const char* str, size_t len = kInvalidIndex) const; + //! Check for equality with `other`. + ASMJIT_INLINE bool eq(const StringBuilder& other) const { return eq(other._data); } + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE bool operator==(const StringBuilder& other) const { return eq(other); } + ASMJIT_INLINE bool operator!=(const StringBuilder& other) const { return !eq(other); } + + ASMJIT_INLINE bool operator==(const char* str) const { return eq(str); } + ASMJIT_INLINE bool operator!=(const char* str) const { return !eq(str); } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! String data. + char* _data; + //! Length. + size_t _length; + //! Capacity. + size_t _capacity; + //! Whether the string can be freed. + size_t _canFree; +}; + +// ============================================================================ +// [asmjit::StringBuilderTmp] +// ============================================================================ + +//! Temporary string builder, has statically allocated `N` bytes. +template +struct StringBuilderTmp : public StringBuilder { + ASMJIT_NO_COPY(StringBuilderTmp) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE StringBuilderTmp() : StringBuilder(NoInit) { + _data = _embeddedData; + _data[0] = 0; + + _length = 0; + _capacity = N; + _canFree = false; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Embedded data. + char _embeddedData[static_cast( + N + 1 + sizeof(intptr_t)) & ~static_cast(sizeof(intptr_t) - 1)]; +}; + //! \} } // asmjit namespace diff --git a/src/asmjit/base/context_p.h b/src/asmjit/base/context_p.h deleted file mode 100644 index 5c0ed45..0000000 --- a/src/asmjit/base/context_p.h +++ /dev/null @@ -1,350 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CONTEXT_P_H -#define _ASMJIT_BASE_CONTEXT_P_H - -#include "../build.h" -#if !defined(ASMJIT_DISABLE_COMPILER) - -// [Dependencies - AsmJit] -#include "../base/compiler.h" -#include "../base/zone.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base_compiler -//! \{ - -// ============================================================================ -// [asmjit::Context] -// ============================================================================ - -//! \internal -//! -//! Code generation context is the logic behind `Compiler`. The context is -//! used to compile the code stored in `Compiler`. -struct Context { - ASMJIT_NO_COPY(Context) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - Context(Compiler* compiler); - virtual ~Context(); - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - //! Reset the whole context. - virtual void reset(bool releaseMemory = false); - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get compiler. - ASMJIT_INLINE Compiler* getCompiler() const { return _compiler; } - - //! Get function. - ASMJIT_INLINE FuncNode* getFunc() const { return _func; } - //! Get stop node. - ASMJIT_INLINE Node* getStop() const { return _stop; } - - //! Get start of the current scope. - ASMJIT_INLINE Node* getStart() const { return _start; } - //! Get end of the current scope. - ASMJIT_INLINE Node* getEnd() const { return _end; } - - //! Get extra block. - ASMJIT_INLINE Node* getExtraBlock() const { return _extraBlock; } - //! Set extra block. - ASMJIT_INLINE void setExtraBlock(Node* node) { _extraBlock = node; } - - // -------------------------------------------------------------------------- - // [Error] - // -------------------------------------------------------------------------- - - //! Get the last error code. - ASMJIT_INLINE Error getError() const { - return getCompiler()->getError(); - } - - //! Set the last error code and propagate it through the error handler. - ASMJIT_INLINE Error setError(Error error, const char* message = NULL) { - return getCompiler()->setError(error, message); - } - - // -------------------------------------------------------------------------- - // [State] - // -------------------------------------------------------------------------- - - //! Get current state. - ASMJIT_INLINE VarState* getState() const { - return _state; - } - - //! Load current state from `target` state. - virtual void loadState(VarState* src) = 0; - - //! Save current state, returning new `VarState` instance. - virtual VarState* saveState() = 0; - - //! Change the current state to `target` state. - virtual void switchState(VarState* src) = 0; - - //! Change the current state to the intersection of two states `a` and `b`. - virtual void intersectStates(VarState* a, VarState* b) = 0; - - // -------------------------------------------------------------------------- - // [Context] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE Error _registerContextVar(VarData* vd) { - if (vd->hasContextId()) - return kErrorOk; - - uint32_t cid = static_cast(_contextVd.getLength()); - ASMJIT_PROPAGATE_ERROR(_contextVd.append(vd)); - - vd->setContextId(cid); - return kErrorOk; - } - - // -------------------------------------------------------------------------- - // [Mem] - // -------------------------------------------------------------------------- - - MemCell* _newVarCell(VarData* vd); - MemCell* _newStackCell(uint32_t size, uint32_t alignment); - - ASMJIT_INLINE MemCell* getVarCell(VarData* vd) { - MemCell* cell = vd->getMemCell(); - return cell ? cell : _newVarCell(vd); - } - - virtual Error resolveCellOffsets(); - - // -------------------------------------------------------------------------- - // [Bits] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE VarBits* newBits(uint32_t len) { - return static_cast( - _baseZone.allocZeroed(static_cast(len) * VarBits::kEntitySize)); - } - - ASMJIT_INLINE VarBits* copyBits(const VarBits* src, uint32_t len) { - return static_cast( - _baseZone.dup(src, static_cast(len) * VarBits::kEntitySize)); - } - - // -------------------------------------------------------------------------- - // [Fetch] - // -------------------------------------------------------------------------- - - //! Fetch. - //! - //! Fetch iterates over all nodes and gathers information about all variables - //! used. The process generates information required by register allocator, - //! variable liveness analysis and translator. - virtual Error fetch() = 0; - - // -------------------------------------------------------------------------- - // [Unreachable Code] - // -------------------------------------------------------------------------- - - //! Add unreachable-flow data to the unreachable flow list. - ASMJIT_INLINE Error addUnreachableNode(Node* node) { - PodList::Link* link = _baseZone.allocT::Link>(); - if (link == NULL) - return setError(kErrorNoHeapMemory); - - link->setValue(node); - _unreachableList.append(link); - - return kErrorOk; - } - - //! Remove unreachable code. - virtual Error removeUnreachableCode(); - - // -------------------------------------------------------------------------- - // [Code-Flow] - // -------------------------------------------------------------------------- - - //! Add returning node (i.e. node that returns and where liveness analysis - //! should start). - ASMJIT_INLINE Error addReturningNode(Node* node) { - PodList::Link* link = _baseZone.allocT::Link>(); - if (link == NULL) - return setError(kErrorNoHeapMemory); - - link->setValue(node); - _returningList.append(link); - - return kErrorOk; - } - - //! Add jump-flow data to the jcc flow list. - ASMJIT_INLINE Error addJccNode(Node* node) { - PodList::Link* link = _baseZone.allocT::Link>(); - if (link == NULL) - return setError(kErrorNoHeapMemory); - - link->setValue(node); - _jccList.append(link); - - return kErrorOk; - } - - // -------------------------------------------------------------------------- - // [Analyze] - // -------------------------------------------------------------------------- - - //! Perform variable liveness analysis. - //! - //! Analysis phase iterates over nodes in reverse order and generates a bit - //! array describing variables that are alive at every node in the function. - //! When the analysis start all variables are assumed dead. When a read or - //! read/write operations of a variable is detected the variable becomes - //! alive; when only write operation is detected the variable becomes dead. - //! - //! When a label is found all jumps to that label are followed and analysis - //! repeats until all variables are resolved. - virtual Error livenessAnalysis(); - - // -------------------------------------------------------------------------- - // [Annotate] - // -------------------------------------------------------------------------- - - virtual Error annotate() = 0; - - // -------------------------------------------------------------------------- - // [Translate] - // -------------------------------------------------------------------------- - - //! Translate code by allocating registers and handling state changes. - virtual Error translate() = 0; - - // -------------------------------------------------------------------------- - // [Schedule] - // -------------------------------------------------------------------------- - - virtual Error schedule(); - - // -------------------------------------------------------------------------- - // [Cleanup] - // -------------------------------------------------------------------------- - - virtual void cleanup(); - - // -------------------------------------------------------------------------- - // [Compile] - // -------------------------------------------------------------------------- - - virtual Error compile(FuncNode* func); - - // -------------------------------------------------------------------------- - // [Serialize] - // -------------------------------------------------------------------------- - - virtual Error serialize(Assembler* assembler, Node* start, Node* stop) = 0; - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Compiler. - Compiler* _compiler; - //! Function. - FuncNode* _func; - - //! Zone allocator. - Zone _baseZone; - - //! \internal - //! - //! Offset (how many bytes to add) to `VarMap` to get `VarAttr` array. Used - //! by liveness analysis shared across all backends. This is needed because - //! `VarMap` is a base class for a specialized version that liveness analysis - //! doesn't use, it just needs `VarAttr` array. - uint32_t _varMapToVaListOffset; - - //! Start of the current active scope. - Node* _start; - //! End of the current active scope. - Node* _end; - - //! Node that is used to insert extra code after the function body. - Node* _extraBlock; - //! Stop node. - Node* _stop; - - //! Unreachable nodes. - PodList _unreachableList; - //! Returning nodes. - PodList _returningList; - //! Jump nodes. - PodList _jccList; - - //! All variables used by the current function. - PodVector _contextVd; - - //! Memory used to spill variables. - MemCell* _memVarCells; - //! Memory used to alloc memory on the stack. - MemCell* _memStackCells; - - //! Count of 1-byte cells. - uint32_t _mem1ByteVarsUsed; - //! Count of 2-byte cells. - uint32_t _mem2ByteVarsUsed; - //! Count of 4-byte cells. - uint32_t _mem4ByteVarsUsed; - //! Count of 8-byte cells. - uint32_t _mem8ByteVarsUsed; - //! Count of 16-byte cells. - uint32_t _mem16ByteVarsUsed; - //! Count of 32-byte cells. - uint32_t _mem32ByteVarsUsed; - //! Count of 64-byte cells. - uint32_t _mem64ByteVarsUsed; - //! Count of stack memory cells. - uint32_t _memStackCellsUsed; - - //! Maximum memory alignment used by the function. - uint32_t _memMaxAlign; - //! Count of bytes used by variables. - uint32_t _memVarTotal; - //! Count of bytes used by stack. - uint32_t _memStackTotal; - //! Count of bytes used by variables and stack after alignment. - uint32_t _memAllTotal; - - //! Default lenght of annotated instruction. - uint32_t _annotationLength; - - //! Current state (used by register allocator). - VarState* _state; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_COMPILER -#endif // _ASMJIT_BASE_CONTEXT_P_H diff --git a/src/asmjit/base/cpuinfo.cpp b/src/asmjit/base/cpuinfo.cpp index 1a6d775..8e0760d 100644 --- a/src/asmjit/base/cpuinfo.cpp +++ b/src/asmjit/base/cpuinfo.cpp @@ -10,14 +10,14 @@ // [Dependencies - AsmJit] #include "../base/cpuinfo.h" -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 #include "../x86/x86cpuinfo.h" #else // ? #endif // [Dependencies - Posix] -#if defined(ASMJIT_OS_POSIX) +#if ASMJIT_OS_POSIX # include # include # include @@ -34,11 +34,11 @@ namespace asmjit { // ============================================================================ uint32_t CpuInfo::detectHwThreadsCount() { -#if defined(ASMJIT_OS_WINDOWS) +#if ASMJIT_OS_WINDOWS SYSTEM_INFO info; ::GetSystemInfo(&info); return info.dwNumberOfProcessors; -#elif defined(ASMJIT_OS_POSIX) && defined(_SC_NPROCESSORS_ONLN) +#elif ASMJIT_OS_POSIX && defined(_SC_NPROCESSORS_ONLN) // It seems that sysconf returns the number of "logical" processors on both // mac and linux. So we get the number of "online logical" processors. long res = ::sysconf(_SC_NPROCESSORS_ONLN); @@ -54,21 +54,21 @@ uint32_t CpuInfo::detectHwThreadsCount() { // [asmjit::CpuInfo - GetHost] // ============================================================================ -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 struct AutoX86CpuInfo : public X86CpuInfo { ASMJIT_INLINE AutoX86CpuInfo() : X86CpuInfo() { X86CpuUtil::detect(this); } }; #else -#error "AsmJit - Unsupported CPU." +#error "[asmjit] Unsupported CPU." #endif const CpuInfo* CpuInfo::getHost() { -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 static AutoX86CpuInfo cpuInfo; #else -#error "AsmJit - Unsupported CPU." +#error "[asmjit] Unsupported CPU." #endif return &cpuInfo; } diff --git a/src/asmjit/base/cpuinfo.h b/src/asmjit/base/cpuinfo.h index 04abb62..0def203 100644 --- a/src/asmjit/base/cpuinfo.h +++ b/src/asmjit/base/cpuinfo.h @@ -16,7 +16,7 @@ namespace asmjit { -//! \addtogroup asmjit_base_general +//! \addtogroup asmjit_base //! \{ // ============================================================================ @@ -37,9 +37,9 @@ ASMJIT_ENUM(CpuVendor) { //! Intel vendor. kCpuVendorIntel = 1, //! AMD vendor. - kCpuVendorAmd = 2, + kCpuVendorAMD = 2, //! VIA vendor. - kCpuVendorVia = 3 + kCpuVendorVIA = 3 }; // ============================================================================ diff --git a/src/asmjit/base/cputicks.cpp b/src/asmjit/base/cputicks.cpp deleted file mode 100644 index d3fb393..0000000 --- a/src/asmjit/base/cputicks.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies - AsmJit] -#include "../base/cputicks.h" - -// [Dependencies - Posix] -#if defined(ASMJIT_OS_POSIX) -# include -# include -#endif // ASMJIT_OS_POSIX - -// [Dependencies - Mac] -#if defined(ASMJIT_OS_MAC) -# include -#endif // ASMJIT_OS_MAC - -// [Dependencies - Windows] -#if defined(ASMJIT_OS_WINDOWS) -// `_InterlockedCompareExchange` is only available as intrinsic (MS Compiler). -# if defined(_MSC_VER) && _MSC_VER >= 1400 -# include -# pragma intrinsic(_InterlockedCompareExchange) -# else -# define _InterlockedCompareExchange InterlockedCompareExchange -# endif // _MSC_VER -#endif // ASMJIT_OS_WINDOWS - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::CpuTicks - Windows] -// ============================================================================ - -#if defined(ASMJIT_OS_WINDOWS) -static volatile uint32_t CpuTicks_hiResOk; -static volatile double CpuTicks_hiResFreq; - -uint32_t CpuTicks::now() { - do { - uint32_t hiResOk = CpuTicks_hiResOk; - - if (hiResOk == 1) { - LARGE_INTEGER now; - if (!::QueryPerformanceCounter(&now)) - break; - return (int64_t)(double(now.QuadPart) / CpuTicks_hiResFreq); - } - - if (hiResOk == 0) { - LARGE_INTEGER qpf; - if (!::QueryPerformanceFrequency(&qpf)) { - _InterlockedCompareExchange((LONG*)&CpuTicks_hiResOk, 0xFFFFFFFF, 0); - break; - } - - LARGE_INTEGER now; - if (!::QueryPerformanceCounter(&now)) { - _InterlockedCompareExchange((LONG*)&CpuTicks_hiResOk, 0xFFFFFFFF, 0); - break; - } - - double freqDouble = double(qpf.QuadPart) / 1000.0; - - CpuTicks_hiResFreq = freqDouble; - _InterlockedCompareExchange((LONG*)&CpuTicks_hiResOk, 1, 0); - - return static_cast( - static_cast(double(now.QuadPart) / freqDouble) & 0xFFFFFFFF); - } - } while (0); - - // Bail to a less precise GetTickCount(). - return ::GetTickCount(); -} - -// ============================================================================ -// [asmjit::CpuTicks - Mac] -// ============================================================================ - -#elif defined(ASMJIT_OS_MAC) -static mach_timebase_info_data_t CpuTicks_machTime; - -uint32_t CpuTicks::now() { - // Initialize the first time CpuTicks::now() is called (See Apple's QA1398). - if (CpuTicks_machTime.denom == 0) { - if (mach_timebase_info(&CpuTicks_machTime) != KERN_SUCCESS) - return 0; - } - - // mach_absolute_time() returns nanoseconds, we need just milliseconds. - uint64_t t = mach_absolute_time() / 1000000; - - t = t * CpuTicks_machTime.numer / CpuTicks_machTime.denom; - return static_cast(t & 0xFFFFFFFFU); -} - -// ============================================================================ -// [asmjit::CpuTicks - Posix] -// ============================================================================ - -#else -uint32_t CpuTicks::now() { -#if defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0 - struct timespec ts; - - if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) - return 0; - - uint64_t t = (uint64_t(ts.tv_sec ) * 1000) + (uint64_t(ts.tv_nsec) / 1000000); - return static_cast(t & 0xFFFFFFFFU); -#else // _POSIX_MONOTONIC_CLOCK -#error "AsmJit - Unsupported OS." - return 0; -#endif // _POSIX_MONOTONIC_CLOCK -} -#endif // ASMJIT_OS - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" diff --git a/src/asmjit/base/cputicks.h b/src/asmjit/base/cputicks.h deleted file mode 100644 index 1732c2e..0000000 --- a/src/asmjit/base/cputicks.h +++ /dev/null @@ -1,40 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_CPUTICKS_H -#define _ASMJIT_BASE_CPUTICKS_H - -// [Dependencies - AsmJit] -#include "../base/globals.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base_util -//! \{ - -// ============================================================================ -// [asmjit::CpuTicks] -// ============================================================================ - -//! CPU ticks utilities. -struct CpuTicks { - //! Get the current CPU ticks for benchmarking (1ms resolution). - static ASMJIT_API uint32_t now(); -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_CPUTICKS_H diff --git a/src/asmjit/base/error.cpp b/src/asmjit/base/error.cpp deleted file mode 100644 index 51516fb..0000000 --- a/src/asmjit/base/error.cpp +++ /dev/null @@ -1,83 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies - AsmJit] -#include "../base/error.h" -#include "../base/intutil.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::ErrorHandler - Construction / Destruction] -// ============================================================================ - -ErrorHandler::ErrorHandler() {} -ErrorHandler::~ErrorHandler() {} - -// ============================================================================ -// [asmjit::ErrorHandler - Interface] -// ============================================================================ - -ErrorHandler* ErrorHandler::addRef() const { - return const_cast(this); -} - -void ErrorHandler::release() {} - -// ============================================================================ -// [asmjit::ErrorUtil - AsString] -// ============================================================================ - -#if !defined(ASMJIT_DISABLE_NAMES) -static const char errorMessages[] = { - "Ok\0" - "No heap memory\0" - "No virtual memory\0" - "Invalid argument\0" - "Invalid state\0" - "No code generated\0" - "Code too large\0" - "Label already bound\0" - "Unknown instruction\0" - "Illegal instruction\0" - "Illegal addressing\0" - "Illegal displacement\0" - "Overlapped arguments\0" - "Unknown error\0" -}; - -static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) { - uint32_t i = 0; - - if (id > maxId) - id = maxId; - - while (i < id) { - while (p[0]) - p++; - - p++; - i++; - } - - return p; -} - -const char* ErrorUtil::asString(Error e) { - return findPackedString(errorMessages, e, kErrorCount); -} -#endif // ASMJIT_DISABLE_NAMES - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" diff --git a/src/asmjit/base/error.h b/src/asmjit/base/error.h deleted file mode 100644 index 6f76934..0000000 --- a/src/asmjit/base/error.h +++ /dev/null @@ -1,218 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_ERROR_H -#define _ASMJIT_BASE_ERROR_H - -// [Dependencies - AsmJit] -#include "../base/globals.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base_general -//! \{ - -// ============================================================================ -// [asmjit::ErrorCode] -// ============================================================================ - -//! AsmJit error codes. -ASMJIT_ENUM(ErrorCode) { - //! No error (success). - //! - //! This is default state and state you want. - kErrorOk = 0, - - //! Heap memory allocation failed. - kErrorNoHeapMemory = 1, - - //! Virtual memory allocation failed. - kErrorNoVirtualMemory = 2, - - //! Invalid argument. - kErrorInvalidArgument = 3, - - //! Invalid state. - kErrorInvalidState = 4, - - //! No code generated. - //! - //! Returned by runtime if the code-generator contains no code. - kErrorNoCodeGenerated = 5, - - //! Code generated is too large to fit in memory reserved. - //! - //! Returned by `StaticRuntime` in case that the code generated is too large - //! to fit in the memory already reserved for it. - kErrorCodeTooLarge = 6, - - //! Label is already bound. - kErrorLabelAlreadyBound = 7, - - //! Unknown instruction (an instruction ID is out of bounds or instruction - //! name is invalid). - kErrorUnknownInst = 8, - - //! Illegal instruction. - //! - //! This status code can also be returned in X64 mode if AH, BH, CH or DH - //! registers have been used together with a REX prefix. The instruction - //! is not encodable in such case. - //! - //! Example of raising `kErrorIllegalInst` error. - //! - //! ~~~ - //! // Invalid address size. - //! a.mov(dword_ptr(eax), al); - //! - //! // Undecodable instruction - AH used with R10, however R10 can only be - //! // encoded by using REX prefix, which conflicts with AH. - //! a.mov(byte_ptr(r10), ah); - //! ~~~ - //! - //! \note In debug mode assertion is raised instead of returning an error. - kErrorIllegalInst = 9, - - //! Illegal (unencodable) addressing used. - kErrorIllegalAddresing = 10, - - //! Illegal (unencodable) displacement used. - //! - //! X86/X64 - //! ------- - //! - //! Short form of jump instruction has been used, but the displacement is out - //! of bounds. - kErrorIllegalDisplacement = 11, - - //! A variable has been assigned more than once to a function argument (Compiler). - kErrorOverlappedArgs = 12, - - //! Count of AsmJit error codes. - kErrorCount = 13 -}; - -// ============================================================================ -// [asmjit::Error] -// ============================================================================ - -//! AsmJit error type (unsigned integer). -typedef uint32_t Error; - -// ============================================================================ -// [asmjit::ErrorHandler] -// ============================================================================ - -//! Error handler. -//! -//! Error handler can be used to override the default behavior of `CodeGen` -//! error handling and propagation. See `handleError` on how to override it. -//! -//! Please note that `addRef` and `release` functions are used, but there is -//! no reference counting implemented by default, reimplement to change the -//! default behavior. -struct ASMJIT_VCLASS ErrorHandler { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `ErrorHandler` instance. - ASMJIT_API ErrorHandler(); - //! Destroy the `ErrorHandler` instance. - ASMJIT_API virtual ~ErrorHandler(); - - // -------------------------------------------------------------------------- - // [Interface] - // -------------------------------------------------------------------------- - - //! Reference this error handler. - //! - //! \note This member function is provided for convenience. The default - //! implementation does nothing. If you are working in environment where - //! multiple `ErrorHandler` instances are used by a different code generators - //! you may provide your own functionality for reference counting. In that - //! case `addRef()` and `release()` functions should be overridden. - ASMJIT_API virtual ErrorHandler* addRef() const; - - //! Release this error handler. - //! - //! \note This member function is provided for convenience. See `addRef()` - //! for more detailed information related to reference counting. - ASMJIT_API virtual void release(); - - //! Error handler (pure). - //! - //! Error handler is called when an error happened. An error can happen in - //! many places, but error handler is mostly used by `Assembler` and - //! `Compiler` classes to report anything that may cause incorrect code - //! generation. There are multiple ways how the error handler can be used - //! and each has it's pros/cons. - //! - //! AsmJit library doesn't use exceptions and can be compiled with or without - //! exception handling support. Even if the AsmJit library is compiled without - //! exceptions it is exception-safe and handleError() can report an incoming - //! error by throwing an exception of any type. It's guaranteed that the - //! exception won't be catched by AsmJit and will be propagated to the code - //! calling AsmJit `Assembler` or `Compiler` methods. Alternative to - //! throwing an exception is using `setjmp()` and `longjmp()` pair available - //! in the standard C library. - //! - //! If the exception or setjmp() / longjmp() mechanism is used, the state of - //! the `BaseAssember` or `Compiler` is unchanged and if it's possible the - //! execution (instruction serialization) can continue. However if the error - //! happened during any phase that translates or modifies the stored code - //! (for example relocation done by `Assembler` or analysis/translation - //! done by `Compiler`) the execution can't continue and the error will - //! be also stored in `Assembler` or `Compiler`. - //! - //! Finally, if no exceptions nor setjmp() / longjmp() mechanisms were used, - //! you can still implement a compatible handling by returning from your - //! error handler. Returning `true` means that error was reported and AsmJit - //! should continue execution, but `false` sets the rror immediately to the - //! `Assembler` or `Compiler` and execution shouldn't continue (this - //! is the default behavior in case no error handler is used). - virtual bool handleError(Error code, const char* message) = 0; -}; - -// ============================================================================ -// [asmjit::ErrorUtil] -// ============================================================================ - -//! Error utilities. -struct ErrorUtil { -#if !defined(ASMJIT_DISABLE_NAMES) - //! Get a printable version of AsmJit `Error` code. - static ASMJIT_API const char* asString(Error code); -#endif // ASMJIT_DISABLE_NAMES -}; - -//! \} - -// ============================================================================ -// [ASMJIT_PROPAGATE_ERROR] -// ============================================================================ - -//! \internal -//! -//! Used by AsmJit to return the `_Exp_` result if it's an error. -#define ASMJIT_PROPAGATE_ERROR(_Exp_) \ - do { \ - ::asmjit::Error errval_ = (_Exp_); \ - if (errval_ != ::asmjit::kErrorOk) \ - return errval_; \ - } while (0) - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_ERROR_H diff --git a/src/asmjit/base/globals.cpp b/src/asmjit/base/globals.cpp index 2971e00..b442901 100644 --- a/src/asmjit/base/globals.cpp +++ b/src/asmjit/base/globals.cpp @@ -16,11 +16,75 @@ namespace asmjit { // ============================================================================ -// [asmjit::Assert] +// [asmjit::DebugUtils] // ============================================================================ -void assertionFailed(const char* exp, const char* file, int line) { - ::fprintf(stderr, "Assertion failed: %s\n, file %s, line %d\n", exp, file, line); +#if !defined(ASMJIT_DISABLE_NAMES) +static const char errorMessages[] = { + "Ok\0" + "No heap memory\0" + "No virtual memory\0" + "Invalid argument\0" + "Invalid state\0" + "Invalid architecture\0" + "Not initialized\0" + "No code generated\0" + "Code too large\0" + "Label already bound\0" + "Unknown instruction\0" + "Illegal instruction\0" + "Illegal addressing\0" + "Illegal displacement\0" + "Overlapped arguments\0" + "Unknown error\0" +}; + +static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) { + uint32_t i = 0; + + if (id > maxId) + id = maxId; + + while (i < id) { + while (p[0]) + p++; + + p++; + i++; + } + + return p; +} +#endif // ASMJIT_DISABLE_NAMES + +const char* DebugUtils::errorAsString(Error e) { +#if !defined(ASMJIT_DISABLE_NAMES) + return findPackedString(errorMessages, e, kErrorCount); +#else + static const char noMessage[] = ""; + return noMessage; +#endif +} + +void DebugUtils::debugOutput(const char* str) { +#if ASMJIT_OS_WINDOWS + ::OutputDebugStringA(str); +#else + ::fputs(str, stderr); +#endif +} + +void DebugUtils::assertionFailed(const char* file, int line, const char* msg) { + char str[1024]; + + snprintf(str, 1024, + "[asmjit] Assertion failed at %s (line %d):\n" + "[asmjit] %s\n", file, line, msg); + + // Support buggy `snprintf` implementations. + str[1023] = '\0'; + + debugOutput(str); ::abort(); } diff --git a/src/asmjit/base/globals.h b/src/asmjit/base/globals.h index 5c1bcae..84ef188 100644 --- a/src/asmjit/base/globals.h +++ b/src/asmjit/base/globals.h @@ -16,21 +16,24 @@ namespace asmjit { -//! \addtogroup asmjit_base_general +//! \addtogroup asmjit_base //! \{ // ============================================================================ -// [asmjit::Ptr / SignedPtr] +// [asmjit::TypeDefs] // ============================================================================ +//! AsmJit error core (unsigned integer). +typedef uint32_t Error; + //! 64-bit unsigned pointer, compatible with JIT and non-JIT generators. //! //! This is the preferred pointer type to use with AsmJit library. It has a //! capability to hold any pointer for any architecture making it an ideal -//! candidate for cross-platform code generation. +//! candidate for a cross-platform code generator. typedef uint64_t Ptr; -//! 64-bit signed pointer, like \ref Ptr, but made signed. +//! like \ref Ptr, but signed. typedef int64_t SignedPtr; // ============================================================================ @@ -60,8 +63,8 @@ ASMJIT_ENUM(GlobalDefs) { //! Host memory allocator overhead. //! //! The overhead is decremented from all zone allocators so the operating - //! system doesn't have allocate extra virtual page to keep tract of the - //! requested memory block. + //! system doesn't have to allocate one extra virtual page to keep tract of + //! the requested memory block. //! //! The number is actually a guess. kMemAllocOverhead = sizeof(intptr_t) * 4, @@ -82,30 +85,119 @@ ASMJIT_ENUM(ArchId) { //! No/Unknown architecture. kArchNone = 0, - //! X86 architecture. + //! X86 architecture (32-bit). kArchX86 = 1, - //! X64 architecture, also called AMD64. + //! X64 architecture (64-bit), also called AMD64. kArchX64 = 2, - //! Arm architecture. + //! X32 architecture (64-bit with 32-bit pointers) (NOT USED ATM). + kArchX32 = 3, + + //! Arm architecture (32-bit). kArchArm = 4, + //! Arm64 architecture (64-bit). + kArchArm64 = 5, -#if defined(ASMJIT_ARCH_X86) +#if ASMJIT_ARCH_X86 kArchHost = kArchX86, -#endif // ASMJIT_ARCH_X86 - -#if defined(ASMJIT_ARCH_X64) +#elif ASMJIT_ARCH_X64 kArchHost = kArchX64, -#endif // ASMJIT_ARCH_X64 - -#if defined(ASMJIT_ARCH_ARM) +#elif ASMJIT_ARCH_ARM kArchHost = kArchArm, -#endif // ASMJIT_ARCH_ARM +#elif ASMJIT_ARCH_ARM64 + kArchHost = kArchArm64, +#endif //! Whether the host is 64-bit. kArchHost64Bit = sizeof(intptr_t) >= 8 }; +// ============================================================================ +// [asmjit::ErrorCode] +// ============================================================================ + +//! AsmJit error codes. +ASMJIT_ENUM(ErrorCode) { + //! No error (success). + //! + //! This is default state and state you want. + kErrorOk = 0, + + //! Heap memory allocation failed. + kErrorNoHeapMemory, + + //! Virtual memory allocation failed. + kErrorNoVirtualMemory, + + //! Invalid argument. + kErrorInvalidArgument, + + //! Invalid state. + kErrorInvalidState, + + //! Invalid architecture. + kErrorInvalidArch, + + //! The object is not initialized. + kErrorNotInitialized, + + //! No code generated. + //! + //! Returned by runtime if the code-generator contains no code. + kErrorNoCodeGenerated, + + //! Code generated is too large to fit in memory reserved. + //! + //! Returned by `StaticRuntime` in case that the code generated is too large + //! to fit in the memory already reserved for it. + kErrorCodeTooLarge, + + //! Label is already bound. + kErrorLabelAlreadyBound, + + //! Unknown instruction (an instruction ID is out of bounds or instruction + //! name is invalid). + kErrorUnknownInst, + + //! Illegal instruction. + //! + //! This status code can also be returned in X64 mode if AH, BH, CH or DH + //! registers have been used together with a REX prefix. The instruction + //! is not encodable in such case. + //! + //! Example of raising `kErrorIllegalInst` error. + //! + //! ~~~ + //! // Invalid address size. + //! a.mov(dword_ptr(eax), al); + //! + //! // Undecodable instruction - AH used with R10, however R10 can only be + //! // encoded by using REX prefix, which conflicts with AH. + //! a.mov(byte_ptr(r10), ah); + //! ~~~ + //! + //! \note In debug mode assertion is raised instead of returning an error. + kErrorIllegalInst, + + //! Illegal (unencodable) addressing used. + kErrorIllegalAddresing, + + //! Illegal (unencodable) displacement used. + //! + //! X86/X64 + //! ------- + //! + //! Short form of jump instruction has been used, but the displacement is out + //! of bounds. + kErrorIllegalDisplacement, + + //! A variable has been assigned more than once to a function argument (Compiler). + kErrorOverlappedArgs, + + //! Count of AsmJit error codes. + kErrorCount +}; + //! \} // ============================================================================ @@ -121,41 +213,74 @@ static const _NoInit NoInit = {}; #endif // !ASMJIT_DOCGEN // ============================================================================ -// [asmjit::Assert] +// [asmjit::DebugUtils] // ============================================================================ -//! \addtogroup asmjit_base_general +namespace DebugUtils { + +//! Get a printable version of AsmJit `Error` code. +ASMJIT_API const char* errorAsString(Error code); + +//! \addtogroup asmjit_base //! \{ +//! Called in debug build to output a debugging message caused by assertion +//! failure or tracing. +ASMJIT_API void debugOutput(const char* str); + //! Called in debug build on assertion failure. //! -//! \param exp Expression that failed. //! \param file Source file name where it happened. //! \param line Line in the source file. +//! \param msg Message to display. //! //! If you have problems with assertions put a breakpoint at assertionFailed() //! function (asmjit/base/globals.cpp) and check the call stack to locate the //! failing code. -ASMJIT_API void assertionFailed(const char* exp, const char* file, int line); - -#if defined(ASMJIT_DEBUG) -#define ASMJIT_ASSERT(_Exp_) \ - do { \ - if (!(_Exp_)) ::asmjit::assertionFailed(#_Exp_, __FILE__, __LINE__); \ - } while (0) -#else -#define ASMJIT_ASSERT(_Exp_) ASMJIT_NOP() -#endif // DEBUG +ASMJIT_API void assertionFailed(const char* file, int line, const char* msg); //! \} +} // DebugUtils namespace } // asmjit namespace +// ============================================================================ +// [ASMJIT_ASSERT] +// ============================================================================ + +#if defined(ASMJIT_DEBUG) +# define ASMJIT_ASSERT(exp) \ + do { \ + if (!(exp)) { \ + ::asmjit::DebugUtils::assertionFailed( \ + __FILE__ + ::asmjit::DebugUtils::kSourceRelativePathOffset, \ + __LINE__, \ + #exp); \ + } \ + } while (0) +#else +# define ASMJIT_ASSERT(exp) ASMJIT_NOP +#endif // DEBUG + +// ============================================================================ +// [ASMJIT_PROPAGATE_ERROR] +// ============================================================================ + +//! \internal +//! +//! Used by AsmJit to return the `_Exp_` result if it's an error. +#define ASMJIT_PROPAGATE_ERROR(_Exp_) \ + do { \ + ::asmjit::Error _errval = (_Exp_); \ + if (_errval != ::asmjit::kErrorOk) \ + return _errval; \ + } while (0) + // ============================================================================ // [asmjit_cast<>] // ============================================================================ -//! \addtogroup asmjit_base_util +//! \addtogroup asmjit_base //! \{ //! Cast used to cast pointer to function. It's like reinterpret_cast<>, diff --git a/src/asmjit/base/hlstream.cpp b/src/asmjit/base/hlstream.cpp new file mode 100644 index 0000000..66c774f --- /dev/null +++ b/src/asmjit/base/hlstream.cpp @@ -0,0 +1,20 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Export] +#define ASMJIT_EXPORTS + +// [Dependencies - AsmJit] +#include "../base/hlstream.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" diff --git a/src/asmjit/base/hlstream.h b/src/asmjit/base/hlstream.h new file mode 100644 index 0000000..d0aa549 --- /dev/null +++ b/src/asmjit/base/hlstream.h @@ -0,0 +1,1135 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Guard] +#ifndef _ASMJIT_BASE_HLSTREAM_H +#define _ASMJIT_BASE_HLSTREAM_H + +#include "../build.h" +#if !defined(ASMJIT_DISABLE_COMPILER) + +// [Dependencies - AsmJit] +#include "../base/assembler.h" +#include "../base/operand.h" + +// TODO: Cannot depend on it. +#include "../base/compilerfunc.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +// ============================================================================ +// [Forward Declarations] +// ============================================================================ + +struct Compiler; +struct VarData; +struct VarState; +struct VarMap; + +struct HLInst; +struct HLJump; +struct HLLabel; +struct HLSentinel; + +//! \addtogroup asmjit_base +//! \{ + +// ============================================================================ +// [asmjit::HLNodeType] +// ============================================================================ + +//! Type of \ref HLNode. +ASMJIT_ENUM(HLNodeType) { + //! Invalid node (internal, don't use). + kHLNodeTypeNone = 0, + + // -------------------------------------------------------------------------- + // [Low-Level - Assembler / Compiler] + // -------------------------------------------------------------------------- + + //! Node is \ref HLInst or \ref HLJump. + kHLNodeTypeInst, + //! Node is \ref HLData. + kHLNodeTypeData, + //! Node is \ref HLAlign. + kHLNodeTypeAlign, + //! Node is \ref HLLabel. + kHLNodeTypeLabel, + //! Node is \ref HLComment. + kHLNodeTypeComment, + //! Node is \ref HLSentinel. + kHLNodeTypeSentinel, + + // -------------------------------------------------------------------------- + // [High-Level - Compiler-Only] + // -------------------------------------------------------------------------- + + //! Node is \ref HLHint. + kHLNodeTypeHint, + //! Node is \ref HLFunc. + kHLNodeTypeFunc, + //! Node is \ref HLRet. + kHLNodeTypeRet, + //! Node is \ref HLCall. + kHLNodeTypeCall, + //! Node is \ref HLCallArg. + kHLNodeTypeCallArg +}; + +// ============================================================================ +// [asmjit::HLNodeFlags] +// ============================================================================ + +ASMJIT_ENUM(HLNodeFlags) { + //! Whether the node has been translated, thus contains only registers. + kHLNodeFlagIsTranslated = 0x0001, + + //! Whether the node was scheduled - possibly reordered, but basically this + //! is a mark that is set by scheduler after the node has been visited. + kHLNodeFlagIsScheduled = 0x0002, + + //! Whether the node is informative only and can be safely removed. + kHLNodeFlagIsInformative = 0x0004, + + //! Whether the `HLInst` is a jump. + kHLNodeFlagIsJmp = 0x0008, + //! Whether the `HLInst` is a conditional jump. + kHLNodeFlagIsJcc = 0x0010, + + //! Whether the `HLInst` is an unconditinal jump or conditional jump that is + //! likely to be taken. + kHLNodeFlagIsTaken = 0x0020, + + //! Whether the `HLNode` will return from a function. + //! + //! This flag is used by both `HLSentinel` and `HLRet`. + kHLNodeFlagIsRet = 0x0040, + + //! Whether the instruction is special. + kHLNodeFlagIsSpecial = 0x0080, + + //! Whether the instruction is an FPU instruction. + kHLNodeFlagIsFp = 0x0100 +}; + +// ============================================================================ +// [asmjit::HLNode] +// ============================================================================ + +//! Assembler stream (AS) node. +//! +//! Every node represents an abstract instruction, directive, label, or +//! macro-instruction generated by the `Compiler` or other code generator. +struct HLNode { + ASMJIT_NO_COPY(HLNode) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLNode`. + //! + //! \note Always use compiler to create nodes. + ASMJIT_INLINE HLNode(Compiler* compiler, uint32_t type); // Defined-Later. + + //! Destroy the `HLNode`. + //! + //! NOTE: Nodes are zone allocated, there should be no code in the destructor. + ASMJIT_INLINE ~HLNode() {} + + // -------------------------------------------------------------------------- + // [Accessors - List] + // -------------------------------------------------------------------------- + + //! Get previous node in the compiler stream. + ASMJIT_INLINE HLNode* getPrev() const { return _prev; } + //! Get next node in the compiler stream. + ASMJIT_INLINE HLNode* getNext() const { return _next; } + + // -------------------------------------------------------------------------- + // [Accessors - Comment] + // -------------------------------------------------------------------------- + + //! Get an inline comment string. + ASMJIT_INLINE const char* getComment() const { return _comment; } + //! Set an inline comment string to `comment`. + ASMJIT_INLINE void setComment(const char* comment) { _comment = comment; } + + // -------------------------------------------------------------------------- + // [Accessors - Type and Flags] + // -------------------------------------------------------------------------- + + //! Get the node type, see \ref HLNodeType. + ASMJIT_INLINE uint32_t getType() const { return _type; } + //! Get the node flags. + ASMJIT_INLINE uint32_t getFlags() const { return _flags; } + + //! Get whether the instruction has flag `flag`. + ASMJIT_INLINE bool hasFlag(uint32_t flag) const { return (static_cast(_flags) & flag) != 0; } + //! Set node flags to `flags`. + ASMJIT_INLINE void setFlags(uint32_t flags) { _flags = static_cast(flags); } + //! Add instruction `flags`. + ASMJIT_INLINE void orFlags(uint32_t flags) { _flags |= static_cast(flags); } + //! And instruction `flags`. + ASMJIT_INLINE void andFlags(uint32_t flags) { _flags &= static_cast(flags); } + //! Clear instruction `flags`. + ASMJIT_INLINE void andNotFlags(uint32_t flags) { _flags &= ~static_cast(flags); } + + //! Get whether the node has beed fetched. + ASMJIT_INLINE bool isFetched() const { return _flowId != 0; } + //! Get whether the node has been translated. + ASMJIT_INLINE bool isTranslated() const { return hasFlag(kHLNodeFlagIsTranslated); } + //! Get whether the node has been translated. + ASMJIT_INLINE bool isScheduled() const { return hasFlag(kHLNodeFlagIsScheduled); } + //! Get whether the node is informative only (comment, hint). + ASMJIT_INLINE bool isInformative() const { return hasFlag(kHLNodeFlagIsInformative); } + + //! Whether the `HLInst` node is an unconditional jump. + ASMJIT_INLINE bool isJmp() const { return hasFlag(kHLNodeFlagIsJmp); } + //! Whether the `HLInst` node is a conditional jump. + ASMJIT_INLINE bool isJcc() const { return hasFlag(kHLNodeFlagIsJcc); } + //! Whether the `HLInst` node is a conditional/unconditional jump. + ASMJIT_INLINE bool isJmpOrJcc() const { return hasFlag(kHLNodeFlagIsJmp | kHLNodeFlagIsJcc); } + //! Whether the `HLInst` node is a return. + ASMJIT_INLINE bool isRet() const { return hasFlag(kHLNodeFlagIsRet); } + + //! Get whether the node is `HLInst` and the instruction is special. + ASMJIT_INLINE bool isSpecial() const { return hasFlag(kHLNodeFlagIsSpecial); } + //! Get whether the node is `HLInst` and the instruction uses x87-FPU. + ASMJIT_INLINE bool isFp() const { return hasFlag(kHLNodeFlagIsFp); } + + // -------------------------------------------------------------------------- + // [Accessors - FlowId] + // -------------------------------------------------------------------------- + + //! Get flow index. + ASMJIT_INLINE uint32_t getFlowId() const { return _flowId; } + //! Set flow index. + ASMJIT_INLINE void setFlowId(uint32_t flowId) { _flowId = flowId; } + + // -------------------------------------------------------------------------- + // [Accessors - TokenId] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE bool hasTokenId(uint32_t id) const { return _tokenId == id; } + ASMJIT_INLINE uint32_t getTokenId() const { return _tokenId; } + ASMJIT_INLINE void setTokenId(uint32_t id) { _tokenId = id; } + + // -------------------------------------------------------------------------- + // [Accessors - VarMap] + // -------------------------------------------------------------------------- + + //! Get whether node contains variable allocation instructions. + ASMJIT_INLINE bool hasMap() const { return _map != NULL; } + //! Get variable allocation instructions. + ASMJIT_INLINE VarMap* getMap() const { return _map; } + //! Get variable allocation instructions casted to `T*`. + template + ASMJIT_INLINE T* getMap() const { return static_cast(_map); } + //! Set variable allocation instructions. + ASMJIT_INLINE void setMap(VarMap* map) { _map = map; } + + // -------------------------------------------------------------------------- + // [Accessors - VarState] + // -------------------------------------------------------------------------- + + //! Get whether the node has an associated `VarState`. + ASMJIT_INLINE bool hasState() const { return _state != NULL; } + //! Get node state. + ASMJIT_INLINE VarState* getState() const { return _state; } + //! Get node state casted to `T*`. + template + ASMJIT_INLINE T* getState() const { return static_cast(_state); } + //! Set node state. + ASMJIT_INLINE void setState(VarState* state) { _state = state; } + + // -------------------------------------------------------------------------- + // [Accessors - Liveness] + // -------------------------------------------------------------------------- + + //! Get whether the node has variable liveness bits. + ASMJIT_INLINE bool hasLiveness() const { return _liveness != NULL; } + //! Get variable liveness bits. + ASMJIT_INLINE BitArray* getLiveness() const { return _liveness; } + //! Set variable liveness bits. + ASMJIT_INLINE void setLiveness(BitArray* liveness) { _liveness = liveness; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Previous node. + HLNode* _prev; + //! Next node. + HLNode* _next; + + //! Node type, see \ref HLNodeType. + uint8_t _type; + //! Count of operands (if the node has operands, otherwise zero). + uint8_t _opCount; + //! Node flags, different meaning for every type of the node. + uint16_t _flags; + + //! Flow index. + uint32_t _flowId; + + //! Processing token ID. + //! + //! Used by some algorithms to mark nodes as visited. If the token is + //! generated in an incrementing way the visitor can just mark nodes it + //! visits and them compare the `HLNode`s token with it's local token. + //! If they match the node has been visited already. Then the visitor + //! doesn't need to clean things up as the next time the token will be + //! different. + uint32_t _tokenId; + + // TODO: 32-bit gap + + //! Inline comment string, initially set to NULL. + const char* _comment; + + //! Variable mapping (VarAttr to VarData), initially NULL, filled during + //! fetch phase. + VarMap* _map; + + //! Variable liveness bits (initially NULL, filled by analysis phase). + BitArray* _liveness; + + //! Saved state. + //! + //! Initially NULL, not all nodes have saved state, only branch/flow control + //! nodes. + VarState* _state; +}; + +// ============================================================================ +// [asmjit::HLInst] +// ============================================================================ + +//! Instruction node (HL). +//! +//! Wraps an instruction with its options and operands. +struct HLInst : public HLNode { + ASMJIT_NO_COPY(HLInst) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLInst` instance. + ASMJIT_INLINE HLInst(Compiler* compiler, uint32_t instId, uint32_t instOptions, Operand* opList, uint32_t opCount) + : HLNode(compiler, kHLNodeTypeInst) { + + _instId = static_cast(instId); + _reserved = 0; + _instOptions = instOptions; + + _opCount = static_cast(opCount); + _opList = opList; + + _updateMemOp(); + } + + //! Destroy the `HLInst` instance. + ASMJIT_INLINE ~HLInst() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get the instruction id, see `X86InstId`. + ASMJIT_INLINE uint32_t getInstId() const { return _instId; } + //! Set the instruction id to `instId`. + //! + //! NOTE: Please do not modify instruction code if you don't know what you + //! are doing. Incorrect instruction code and/or operands can cause random + //! errors in production builds and will most probably trigger assertion + //! failures in debug builds. + ASMJIT_INLINE void setInstId(uint32_t instId) { _instId = static_cast(instId); } + + //! Whether the instruction is either a jump or a conditional jump likely to + //! be taken. + ASMJIT_INLINE bool isTaken() const { return hasFlag(kHLNodeFlagIsTaken); } + + //! Get emit options. + ASMJIT_INLINE uint32_t getOptions() const { return _instOptions; } + //! Set emit options. + ASMJIT_INLINE void setOptions(uint32_t options) { _instOptions = options; } + //! Add emit options. + ASMJIT_INLINE void addOptions(uint32_t options) { _instOptions |= options; } + //! Mask emit options. + ASMJIT_INLINE void andOptions(uint32_t options) { _instOptions &= options; } + //! Clear emit options. + ASMJIT_INLINE void delOptions(uint32_t options) { _instOptions &= ~options; } + + //! Get operands count. + ASMJIT_INLINE uint32_t getOpCount() const { return _opCount; } + //! Get operands list. + ASMJIT_INLINE Operand* getOpList() { return _opList; } + //! \overload + ASMJIT_INLINE const Operand* getOpList() const { return _opList; } + + //! Get whether the instruction contains a memory operand. + ASMJIT_INLINE bool hasMemOp() const { return _memOpIndex != 0xFF; } + //! Get memory operand. + //! + //! NOTE: Can only be called if the instruction has such operand, + //! see `hasMemOp()`. + ASMJIT_INLINE BaseMem* getMemOp() const { + ASMJIT_ASSERT(hasMemOp()); + return static_cast(&_opList[_memOpIndex]); + } + //! \overload + template + ASMJIT_INLINE T* getMemOp() const { + ASMJIT_ASSERT(hasMemOp()); + return static_cast(&_opList[_memOpIndex]); + } + + //! Set memory operand index, `0xFF` means no memory operand. + ASMJIT_INLINE void setMemOpIndex(uint32_t index) { _memOpIndex = static_cast(index); } + //! Reset memory operand index to `0xFF` (no operand). + ASMJIT_INLINE void resetMemOpIndex() { _memOpIndex = 0xFF; } + + // -------------------------------------------------------------------------- + // [Utils] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void _updateMemOp() { + Operand* opList = getOpList(); + uint32_t opCount = getOpCount(); + + uint32_t i; + for (i = 0; i < opCount; i++) + if (opList[i].isMem()) + goto _Update; + i = 0xFF; + +_Update: + setMemOpIndex(i); + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Instruction ID, see `InstId`. + uint16_t _instId; + //! \internal + uint8_t _memOpIndex; + //! \internal + uint8_t _reserved; + //! Instruction options, see `InstOptions`. + uint32_t _instOptions; + + //! Operands list. + Operand* _opList; +}; + +// ============================================================================ +// [asmjit::HLJump] +// ============================================================================ + +//! Jump node (HL). +//! +//! Extension of `HLInst` node, which stores more information about the jump. +struct HLJump : public HLInst { + ASMJIT_NO_COPY(HLJump) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE HLJump(Compiler* compiler, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) : + HLInst(compiler, code, options, opList, opCount) {} + ASMJIT_INLINE ~HLJump() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE HLLabel* getTarget() const { return _target; } + ASMJIT_INLINE HLJump* getJumpNext() const { return _jumpNext; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Target node. + HLLabel* _target; + //! Next jump to the same target in a single linked-list. + HLJump* _jumpNext; +}; + +// ============================================================================ +// [asmjit::HLData] +// ============================================================================ + +//! Data node (HL). +//! +//! Wraps `.data` directive. The node contains data that will be placed at the +//! node's position in the assembler stream. The data is considered to be RAW; +//! no analysis nor byte-order conversion is performed on RAW data. +struct HLData : public HLNode { + ASMJIT_NO_COPY(HLData) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + enum { kInlineBufferSize = 12 }; + + //! Create a new `HLData` instance. + ASMJIT_INLINE HLData(Compiler* compiler, void* data, uint32_t size) + : HLNode(compiler, kHLNodeTypeData) { + + _size = size; + if (size <= kInlineBufferSize) { + if (data != NULL) + ::memcpy(_data.buf, data, size); + } + else { + _data.ptr = static_cast(data); + } + } + + //! Destroy the `HLData` instance. + ASMJIT_INLINE ~HLData() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get size of the data. + uint32_t getSize() const { return _size; } + //! Get pointer to the data. + uint8_t* getData() const { return _size <= kInlineBufferSize ? const_cast(_data.buf) : _data.ptr; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + union { + //! data buffer. + uint8_t buf[kInlineBufferSize]; + //! Data buffer. + uint8_t* ptr; + } _data; + + //! Size of the data. + uint32_t _size; +}; + +// ============================================================================ +// [asmjit::HLAlign] +// ============================================================================ + +//! Align node (HL). +//! +//! Wraps `.align` directive. +struct HLAlign : public HLNode { + ASMJIT_NO_COPY(HLAlign) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLAlign` instance. + ASMJIT_INLINE HLAlign(Compiler* compiler, uint32_t alignMode, uint32_t offset) + : HLNode(compiler, kHLNodeTypeAlign) { + + _alignMode = alignMode; + _offset = offset; + } + + //! Destroy the `HLAlign` instance. + ASMJIT_INLINE ~HLAlign() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get align mode. + ASMJIT_INLINE uint32_t getAlignMode() const { return _alignMode; } + //! Set align mode. + ASMJIT_INLINE void setAlignMode(uint32_t alignMode) { _alignMode = alignMode; } + + //! Get align offset in bytes. + ASMJIT_INLINE uint32_t getOffset() const { return _offset; } + //! Set align offset in bytes to `offset`. + ASMJIT_INLINE void setOffset(uint32_t offset) { _offset = offset; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Align mode, see \ref AlignMode. + uint32_t _alignMode; + //! Align offset (in bytes). + uint32_t _offset; +}; + +// ============================================================================ +// [asmjit::HLLabel] +// ============================================================================ + +//! label node (HL). +struct HLLabel : public HLNode { + ASMJIT_NO_COPY(HLLabel) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLLabel` instance. + ASMJIT_INLINE HLLabel(Compiler* compiler, uint32_t labelId) + : HLNode(compiler, kHLNodeTypeLabel) { + + _id = labelId; + _numRefs = 0; + _from = NULL; + } + + //! Destroy the `HLLabel` instance. + ASMJIT_INLINE ~HLLabel() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get target label. + ASMJIT_INLINE Label getLabel() const { return Label(_id); } + //! Get target label id. + ASMJIT_INLINE uint32_t getLabelId() const { return _id; } + + //! Get first jmp instruction. + ASMJIT_INLINE HLJump* getFrom() const { return _from; } + + //! Get whether the node has assigned state. + ASMJIT_INLINE bool hasState() const { return _state != NULL; } + //! Get state for this target. + ASMJIT_INLINE VarState* getState() const { return _state; } + //! Set state for this target. + ASMJIT_INLINE void setState(VarState* state) { _state = state; } + + //! Get number of jumps to this target. + ASMJIT_INLINE uint32_t getNumRefs() const { return _numRefs; } + //! Set number of jumps to this target. + ASMJIT_INLINE void setNumRefs(uint32_t i) { _numRefs = i; } + + //! Add number of jumps to this target. + ASMJIT_INLINE void addNumRefs(uint32_t i = 1) { _numRefs += i; } + //! Subtract number of jumps to this target. + ASMJIT_INLINE void subNumRefs(uint32_t i = 1) { _numRefs -= i; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Label id. + uint32_t _id; + //! Count of jumps here. + uint32_t _numRefs; + + //! First jump instruction that points to this target (label). + HLJump* _from; +}; + +// ============================================================================ +// [asmjit::HLComment] +// ============================================================================ + +//! Comment node (HL). +struct HLComment : public HLNode { + ASMJIT_NO_COPY(HLComment) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLComment` instance. + ASMJIT_INLINE HLComment(Compiler* compiler, const char* comment) + : HLNode(compiler, kHLNodeTypeComment) { + + orFlags(kHLNodeFlagIsInformative); + _comment = comment; + } + + //! Destroy the `HLComment` instance. + ASMJIT_INLINE ~HLComment() {} +}; + +// ============================================================================ +// [asmjit::HLSentinel] +// ============================================================================ + +//! Sentinel node (HL). +struct HLSentinel : public HLNode { + ASMJIT_NO_COPY(HLSentinel) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLSentinel` instance. + ASMJIT_INLINE HLSentinel(Compiler* compiler) + : HLNode(compiler, kHLNodeTypeSentinel) { + _flags |= kHLNodeFlagIsRet; + } + + //! Destroy the `HLSentinel` instance. + ASMJIT_INLINE ~HLSentinel() {} +}; + +// ============================================================================ +// [asmjit::HLHint] +// ============================================================================ + +//! Hint node. +struct HLHint : public HLNode { + ASMJIT_NO_COPY(HLHint) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLHint` instance. + ASMJIT_INLINE HLHint(Compiler* compiler, VarData* vd, uint32_t hint, uint32_t value) + : HLNode(compiler, kHLNodeTypeHint) { + + orFlags(kHLNodeFlagIsInformative); + _vd = vd; + _hint = hint; + _value = value; + } + + //! Destroy the `HLHint` instance. + ASMJIT_INLINE ~HLHint() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get variable. + ASMJIT_INLINE VarData* getVd() const { return _vd; } + + //! Get hint it (see `kVarHint)`. + ASMJIT_INLINE uint32_t getHint() const{ return _hint; } + //! Set hint it (see `kVarHint)`. + ASMJIT_INLINE void setHint(uint32_t hint) { _hint = hint; } + + //! Get hint value. + ASMJIT_INLINE uint32_t getValue() const { return _value; } + //! Set hint value. + ASMJIT_INLINE void setValue(uint32_t value) { _value = value; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Variable. + VarData* _vd; + //! Hint id. + uint32_t _hint; + //! Value. + uint32_t _value; +}; + +// ============================================================================ +// [asmjit::HLFunc] +// ============================================================================ + +//! Function node (HL). +struct HLFunc : public HLNode { + ASMJIT_NO_COPY(HLFunc) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLFunc` instance. + //! + //! Always use `Compiler::addFunc()` to create an `HLFunc` instance. + ASMJIT_INLINE HLFunc(Compiler* compiler) + : HLNode(compiler, kHLNodeTypeFunc), + _entryNode(NULL), + _exitNode(NULL), + _decl(NULL), + _end(NULL), + _args(NULL), + _funcHints(Utils::mask(kFuncHintNaked)), + _funcFlags(0), + _expectedStackAlignment(0), + _requiredStackAlignment(0), + _redZoneSize(0), + _spillZoneSize(0), + _argStackSize(0), + _memStackSize(0), + _callStackSize(0) {} + + //! Destroy the `HLFunc` instance. + ASMJIT_INLINE ~HLFunc() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get function entry `HLLabel`. + ASMJIT_INLINE HLLabel* getEntryNode() const { return _entryNode; } + //! Get function exit `HLLabel`. + ASMJIT_INLINE HLLabel* getExitNode() const { return _exitNode; } + + //! Get function entry label. + ASMJIT_INLINE Label getEntryLabel() const { return _entryNode->getLabel(); } + //! Get function exit label. + ASMJIT_INLINE Label getExitLabel() const { return _exitNode->getLabel(); } + + //! Get the function end sentinel. + ASMJIT_INLINE HLSentinel* getEnd() const { return _end; } + //! Get function declaration. + ASMJIT_INLINE FuncDecl* getDecl() const { return _decl; } + + //! Get arguments count. + ASMJIT_INLINE uint32_t getNumArgs() const { return _decl->getNumArgs(); } + //! Get arguments list. + ASMJIT_INLINE VarData** getArgs() const { return _args; } + + //! Get argument at `i`. + ASMJIT_INLINE VarData* getArg(uint32_t i) const { + ASMJIT_ASSERT(i < getNumArgs()); + return _args[i]; + } + + //! Set argument at `i`. + ASMJIT_INLINE void setArg(uint32_t i, VarData* vd) { + ASMJIT_ASSERT(i < getNumArgs()); + _args[i] = vd; + } + + //! Reset argument at `i`. + ASMJIT_INLINE void resetArg(uint32_t i) { + ASMJIT_ASSERT(i < getNumArgs()); + _args[i] = NULL; + } + + //! Get function hints. + ASMJIT_INLINE uint32_t getFuncHints() const { return _funcHints; } + //! Get function flags. + ASMJIT_INLINE uint32_t getFuncFlags() const { return _funcFlags; } + + //! Get whether the _funcFlags has `flag` + ASMJIT_INLINE bool hasFuncFlag(uint32_t flag) const { return (_funcFlags & flag) != 0; } + //! Set function `flag`. + ASMJIT_INLINE void addFuncFlags(uint32_t flags) { _funcFlags |= flags; } + //! Clear function `flag`. + ASMJIT_INLINE void clearFuncFlags(uint32_t flags) { _funcFlags &= ~flags; } + + //! Get whether the function is naked. + ASMJIT_INLINE bool isNaked() const { return hasFuncFlag(kFuncFlagIsNaked); } + //! Get whether the function is also a caller. + ASMJIT_INLINE bool isCaller() const { return hasFuncFlag(kFuncFlagIsCaller); } + //! Get whether the required stack alignment is lower than expected one, + //! thus it has to be aligned manually. + ASMJIT_INLINE bool isStackMisaligned() const { return hasFuncFlag(kFuncFlagIsStackMisaligned); } + //! Get whether the stack pointer is adjusted inside function prolog/epilog. + ASMJIT_INLINE bool isStackAdjusted() const { return hasFuncFlag(kFuncFlagIsStackAdjusted); } + + //! Get whether the function is finished. + ASMJIT_INLINE bool isFinished() const { return hasFuncFlag(kFuncFlagIsFinished); } + + //! Get expected stack alignment. + ASMJIT_INLINE uint32_t getExpectedStackAlignment() const { return _expectedStackAlignment; } + //! Set expected stack alignment. + ASMJIT_INLINE void setExpectedStackAlignment(uint32_t alignment) { _expectedStackAlignment = alignment; } + + //! Get required stack alignment. + ASMJIT_INLINE uint32_t getRequiredStackAlignment() const { return _requiredStackAlignment; } + //! Set required stack alignment. + ASMJIT_INLINE void setRequiredStackAlignment(uint32_t alignment) { _requiredStackAlignment = alignment; } + + //! Update required stack alignment so it's not lower than expected + //! stack alignment. + ASMJIT_INLINE void updateRequiredStackAlignment() { + if (_requiredStackAlignment <= _expectedStackAlignment) { + _requiredStackAlignment = _expectedStackAlignment; + clearFuncFlags(kFuncFlagIsStackMisaligned); + } + else { + addFuncFlags(kFuncFlagIsStackMisaligned); + } + } + + //! Set stack "Red Zone" size. + ASMJIT_INLINE uint32_t getRedZoneSize() const { return _redZoneSize; } + //! Get stack "Red Zone" size. + ASMJIT_INLINE void setRedZoneSize(uint32_t s) { _redZoneSize = static_cast(s); } + + //! Set stack "Spill Zone" size. + ASMJIT_INLINE uint32_t getSpillZoneSize() const { return _spillZoneSize; } + //! Get stack "Spill Zone" size. + ASMJIT_INLINE void setSpillZoneSize(uint32_t s) { _spillZoneSize = static_cast(s); } + + //! Get stack size used by function arguments. + ASMJIT_INLINE uint32_t getArgStackSize() const { return _argStackSize; } + + //! Get stack size used by variables and memory allocated on the stack. + ASMJIT_INLINE uint32_t getMemStackSize() const { return _memStackSize; } + + //! Get stack size used by function calls. + ASMJIT_INLINE uint32_t getCallStackSize() const { return _callStackSize; } + //! Merge stack size used by function call with `s`. + ASMJIT_INLINE void mergeCallStackSize(uint32_t s) { if (_callStackSize < s) _callStackSize = s; } + + // -------------------------------------------------------------------------- + // [Hints] + // -------------------------------------------------------------------------- + + //! Set function hint. + ASMJIT_INLINE void setHint(uint32_t hint, uint32_t value) { + ASMJIT_ASSERT(hint <= 31); + ASMJIT_ASSERT(value <= 1); + + _funcHints &= ~(1 << hint); + _funcHints |= (value << hint); + } + + //! Get function hint. + ASMJIT_INLINE uint32_t getHint(uint32_t hint) const { + ASMJIT_ASSERT(hint <= 31); + return (_funcHints >> hint) & 0x1; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Function entry. + HLLabel* _entryNode; + //! Function exit. + HLLabel* _exitNode; + + //! Function declaration. + FuncDecl* _decl; + //! Function end. + HLSentinel* _end; + + //! Arguments list as `VarData`. + VarData** _args; + + //! Function hints; + uint32_t _funcHints; + //! Function flags. + uint32_t _funcFlags; + + //! Expected stack alignment (we depend on this value). + //! + //! \note It can be global alignment given by the OS or described by the + //! target platform ABI. + uint32_t _expectedStackAlignment; + //! Required stack alignment (required by SIMD instructions). + uint32_t _requiredStackAlignment; + + //! The "Red Zone" size - count of bytes which might be accessed by a left + //! function without adjusting the stack pointer (`esp` or `rsp`) (AMD64 ABI). + uint16_t _redZoneSize; + + //! The "Spill Zone" size - count of bytes after the function return address + //! that can be used by the function to spill variables in (WIN64 ABI). + uint16_t _spillZoneSize; + + //! Stack size needed for function arguments. + uint32_t _argStackSize; + //! Stack size needed for all variables and memory allocated on the stack. + uint32_t _memStackSize; + //! Stack size needed to call other functions. + uint32_t _callStackSize; +}; + +// ============================================================================ +// [asmjit::HLRet] +// ============================================================================ + +//! Return node (HL). +struct HLRet : public HLNode { + ASMJIT_NO_COPY(HLRet) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLRet` instance. + ASMJIT_INLINE HLRet(Compiler* compiler, const Operand& o0, const Operand& o1) + : HLNode(compiler, kHLNodeTypeRet) { + + _flags |= kHLNodeFlagIsRet; + _ret[0] = o0; + _ret[1] = o1; + } + + //! Destroy the `HLRet` instance. + ASMJIT_INLINE ~HLRet() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get the first return operand. + ASMJIT_INLINE Operand& getFirst() { return _ret[0]; } + //! \overload + ASMJIT_INLINE const Operand& getFirst() const { return _ret[0]; } + + //! Get the second return operand. + ASMJIT_INLINE Operand& getSecond() { return _ret[1]; } + //! \overload + ASMJIT_INLINE const Operand& getSecond() const { return _ret[1]; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Ret operand(s). + Operand _ret[2]; +}; + +// ============================================================================ +// [asmjit::HLCall] +// ============================================================================ + +//! Call node (HL). +struct HLCall : public HLNode { + ASMJIT_NO_COPY(HLCall) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLCall` instance. + ASMJIT_INLINE HLCall(Compiler* compiler, const Operand& target) + : HLNode(compiler, kHLNodeTypeCall), + _decl(NULL), + _target(target), + _args(NULL) {} + + //! Destroy the `HLCall` instance. + ASMJIT_INLINE ~HLCall() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get function declaration. + ASMJIT_INLINE FuncDecl* getDecl() const { return _decl; } + + //! Get target operand. + ASMJIT_INLINE Operand& getTarget() { return _target; } + //! \overload + ASMJIT_INLINE const Operand& getTarget() const { return _target; } + + //! Get return at `i`. + ASMJIT_INLINE Operand& getRet(uint32_t i = 0) { + ASMJIT_ASSERT(i < 2); + return _ret[i]; + } + //! \overload + ASMJIT_INLINE const Operand& getRet(uint32_t i = 0) const { + ASMJIT_ASSERT(i < 2); + return _ret[i]; + } + + //! Get argument at `i`. + ASMJIT_INLINE Operand& getArg(uint32_t i) { + ASMJIT_ASSERT(i < kFuncArgCountLoHi); + return _args[i]; + } + //! \overload + ASMJIT_INLINE const Operand& getArg(uint32_t i) const { + ASMJIT_ASSERT(i < kFuncArgCountLoHi); + return _args[i]; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Function declaration. + FuncDecl* _decl; + + //! Target (address of function, register, label, ...). + Operand _target; + //! Return. + Operand _ret[2]; + //! Arguments. + Operand* _args; +}; + +// ============================================================================ +// [asmjit::HLCallArg] +// ============================================================================ + +//! Function call's argument node (HL). +struct HLCallArg : public HLNode { + ASMJIT_NO_COPY(HLCallArg) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `HLCallArg` instance. + ASMJIT_INLINE HLCallArg(Compiler* compiler, HLCall* call, VarData* sVd, VarData* cVd) + : HLNode(compiler, kHLNodeTypeCallArg), + _call(call), + _sVd(sVd), + _cVd(cVd), + _args(0) {} + + //! Destroy the `HLCallArg` instance. + ASMJIT_INLINE ~HLCallArg() {} + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get the associated function-call. + ASMJIT_INLINE HLCall* getCall() const { return _call; } + //! Get source variable. + ASMJIT_INLINE VarData* getSVd() const { return _sVd; } + //! Get conversion variable. + ASMJIT_INLINE VarData* getCVd() const { return _cVd; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Associated `HLCall`. + HLCall* _call; + //! Source variable. + VarData* _sVd; + //! Temporary variable used for conversion (or NULL). + VarData* _cVd; + + //! Affected arguments bit-array. + uint32_t _args; +}; + +// ============================================================================ +// [asmjit::Stream] +// ============================================================================ + + + +//! \} + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" + +// [Guard] +#endif // !ASMJIT_DISABLE_COMPILER +#endif // _ASMJIT_BASE_HLSTREAM_H diff --git a/src/asmjit/base/intutil.cpp b/src/asmjit/base/intutil.cpp deleted file mode 100644 index 371a2c4..0000000 --- a/src/asmjit/base/intutil.cpp +++ /dev/null @@ -1,217 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies - AsmJit] -#include "../base/intutil.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -#if defined(ASMJIT_TEST) -UNIT(base_intutil) { - uint32_t i; - - INFO("IntTraits<>."); - EXPECT(IntTraits::kIsSigned, - "IntTraits should report signed."); - EXPECT(IntTraits::kIsUnsigned, - "IntTraits should report unsigned."); - - EXPECT(IntTraits::kIsSigned, - "IntTraits should report signed."); - EXPECT(IntTraits::kIsUnsigned, - "IntTraits should report unsigned."); - - EXPECT(IntTraits::kIsSigned, - "IntTraits should report signed."); - EXPECT(IntTraits::kIsUnsigned, - "IntTraits should report unsigned."); - - EXPECT(IntTraits::kIsSigned, - "IntTraits should report signed."); - EXPECT(IntTraits::kIsUnsigned, - "IntTraits should report unsigned."); - - EXPECT(IntTraits::kIsSigned, - "IntTraits should report signed."); - EXPECT(IntTraits::kIsUnsigned, - "IntTraits should report unsigned."); - - EXPECT(IntTraits::kIsIntPtr, - "IntTraits should report intptr_t type."); - EXPECT(IntTraits::kIsIntPtr, - "IntTraits should report intptr_t type."); - - INFO("IntUtil::iMin()/iMax()."); - EXPECT(IntUtil::iMin(0, -1) == -1, - "IntUtil::iMin should return a minimum value."); - EXPECT(IntUtil::iMin(-1, -2) == -2, - "IntUtil::iMin should return a minimum value."); - EXPECT(IntUtil::iMin(1, 2) == 1, - "IntUtil::iMin should return a minimum value."); - - EXPECT(IntUtil::iMax(0, -1) == 0, - "IntUtil::iMax should return a maximum value."); - EXPECT(IntUtil::iMax(-1, -2) == -1, - "IntUtil::iMax should return a maximum value."); - EXPECT(IntUtil::iMax(1, 2) == 2, - "IntUtil::iMax should return a maximum value."); - - INFO("IntUtil::inInterval()."); - EXPECT(IntUtil::inInterval(11, 10, 20) == true, - "IntUtil::inInterval should return true if inside."); - EXPECT(IntUtil::inInterval(101, 10, 20) == false, - "IntUtil::inInterval should return false if outside."); - - INFO("IntUtil::isInt8()."); - EXPECT(IntUtil::isInt8(-128) == true, - "IntUtil::isInt8<> should return true if inside."); - EXPECT(IntUtil::isInt8(127) == true, - "IntUtil::isInt8<> should return true if inside."); - EXPECT(IntUtil::isInt8(-129) == false, - "IntUtil::isInt8<> should return false if outside."); - EXPECT(IntUtil::isInt8(128) == false, - "IntUtil::isInt8<> should return false if outside."); - - INFO("IntUtil::isUInt8()."); - EXPECT(IntUtil::isUInt8(255) == true, - "IntUtil::isUInt8<> should return true if inside."); - EXPECT(IntUtil::isUInt8(256) == false, - "IntUtil::isUInt8<> should return false if outside."); - EXPECT(IntUtil::isUInt8(-1) == false, - "IntUtil::isUInt8<> should return false if negative."); - - INFO("IntUtil::isInt16()."); - EXPECT(IntUtil::isInt16(-32768) == true, - "IntUtil::isInt16<> should return true if inside."); - EXPECT(IntUtil::isInt16(32767) == true, - "IntUtil::isInt16<> should return true if inside."); - EXPECT(IntUtil::isInt16(-32769) == false, - "IntUtil::isInt16<> should return false if outside."); - EXPECT(IntUtil::isInt16(32768) == false, - "IntUtil::isInt16<> should return false if outside."); - - INFO("IntUtil::isUInt16()."); - EXPECT(IntUtil::isUInt16(65535) == true, - "IntUtil::isUInt16<> should return true if inside."); - EXPECT(IntUtil::isUInt16(65536) == false, - "IntUtil::isUInt16<> should return false if outside."); - EXPECT(IntUtil::isUInt16(-1) == false, - "IntUtil::isUInt16<> should return false if negative."); - - INFO("IntUtil::isInt32()."); - EXPECT(IntUtil::isInt32(2147483647) == true, - "IntUtil::isInt32 should return true if inside."); - EXPECT(IntUtil::isInt32(-2147483647 - 1) == true, - "IntUtil::isInt32 should return true if inside."); - EXPECT(IntUtil::isInt32(ASMJIT_UINT64_C(2147483648)) == false, - "IntUtil::isInt32 should return false if outside."); - EXPECT(IntUtil::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == false, - "IntUtil::isInt32 should return false if outside."); - EXPECT(IntUtil::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, - "IntUtil::isInt32 should return false if outside."); - - INFO("IntUtil::isUInt32()."); - EXPECT(IntUtil::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == true, - "IntUtil::isUInt32 should return true if inside."); - EXPECT(IntUtil::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, - "IntUtil::isUInt32 should return false if outside."); - EXPECT(IntUtil::isUInt32(-1) == false, - "IntUtil::isUInt32 should return false if negative."); - - INFO("IntUtil::isPower2()."); - for (i = 0; i < 64; i++) { - EXPECT(IntUtil::isPowerOf2(static_cast(1) << i) == true, - "IntUtil::isPower2() didn't report power of 2."); - EXPECT(IntUtil::isPowerOf2((static_cast(1) << i) ^ 0x001101) == false, - "IntUtil::isPower2() didn't report not power of 2."); - } - - INFO("IntUtil::mask()."); - for (i = 0; i < 32; i++) { - EXPECT(IntUtil::mask(i) == (1 << i), - "IntUtil::mask(%u) should return %X.", i, (1 << i)); - } - - INFO("IntUtil::bits()."); - for (i = 0; i < 32; i++) { - uint32_t expectedBits = 0; - - for (uint32_t b = 0; b < i; b++) - expectedBits |= static_cast(1) << b; - - EXPECT(IntUtil::bits(i) == expectedBits, - "IntUtil::bits(%u) should return %X.", i, expectedBits); - } - - INFO("IntUtil::hasBit()."); - for (i = 0; i < 32; i++) { - EXPECT(IntUtil::hasBit((1 << i), i) == true, - "IntUtil::hasBit(%X, %u) should return true.", (1 << i), i); - } - - INFO("IntUtil::bitCount()."); - for (i = 0; i < 32; i++) { - EXPECT(IntUtil::bitCount((1 << i)) == 1, - "IntUtil::bitCount(%X) should return true.", (1 << i)); - } - EXPECT(IntUtil::bitCount(0x000000F0) == 4, ""); - EXPECT(IntUtil::bitCount(0x10101010) == 4, ""); - EXPECT(IntUtil::bitCount(0xFF000000) == 8, ""); - EXPECT(IntUtil::bitCount(0xFFFFFFF7) == 31, ""); - EXPECT(IntUtil::bitCount(0x7FFFFFFF) == 31, ""); - - INFO("IntUtil::findFirstBit()."); - for (i = 0; i < 32; i++) { - EXPECT(IntUtil::findFirstBit((1 << i)) == i, - "IntUtil::findFirstBit(%X) should return %u.", (1 << i), i); - } - - INFO("IntUtil::keepNOnesFromRight()."); - EXPECT(IntUtil::keepNOnesFromRight(0xF, 1) == 0x1, ""); - EXPECT(IntUtil::keepNOnesFromRight(0xF, 2) == 0x3, ""); - EXPECT(IntUtil::keepNOnesFromRight(0xF, 3) == 0x7, ""); - EXPECT(IntUtil::keepNOnesFromRight(0x5, 2) == 0x5, ""); - EXPECT(IntUtil::keepNOnesFromRight(0xD, 2) == 0x5, ""); - - INFO("IntUtil::isAligned()."); - EXPECT(IntUtil::isAligned(0xFFFF, 4) == false, ""); - EXPECT(IntUtil::isAligned(0xFFF4, 4) == true , ""); - EXPECT(IntUtil::isAligned(0xFFF8, 8) == true , ""); - EXPECT(IntUtil::isAligned(0xFFF0, 16) == true , ""); - - INFO("IntUtil::alignTo()."); - EXPECT(IntUtil::alignTo(0xFFFF, 4) == 0x10000, ""); - EXPECT(IntUtil::alignTo(0xFFF4, 4) == 0x0FFF4, ""); - EXPECT(IntUtil::alignTo(0xFFF8, 8) == 0x0FFF8, ""); - EXPECT(IntUtil::alignTo(0xFFF0, 16) == 0x0FFF0, ""); - EXPECT(IntUtil::alignTo(0xFFF0, 32) == 0x10000, ""); - - INFO("IntUtil::alignToPowerOf2()."); - EXPECT(IntUtil::alignToPowerOf2(0xFFFF) == 0x10000, ""); - EXPECT(IntUtil::alignToPowerOf2(0xF123) == 0x10000, ""); - EXPECT(IntUtil::alignToPowerOf2(0x0F00) == 0x01000, ""); - EXPECT(IntUtil::alignToPowerOf2(0x0100) == 0x00100, ""); - EXPECT(IntUtil::alignToPowerOf2(0x1001) == 0x02000, ""); - - INFO("IntUtil::deltaTo()."); - EXPECT(IntUtil::deltaTo(0xFFFF, 4) == 1, ""); - EXPECT(IntUtil::deltaTo(0xFFF4, 4) == 0, ""); - EXPECT(IntUtil::deltaTo(0xFFF8, 8) == 0, ""); - EXPECT(IntUtil::deltaTo(0xFFF0, 16) == 0, ""); - EXPECT(IntUtil::deltaTo(0xFFF0, 32) == 16, ""); -} -#endif // ASMJIT_TEST - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" diff --git a/src/asmjit/base/lock.h b/src/asmjit/base/lock.h deleted file mode 100644 index f2e2042..0000000 --- a/src/asmjit/base/lock.h +++ /dev/null @@ -1,131 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_LOCK_H -#define _ASMJIT_BASE_LOCK_H - -// [Dependencies - AsmJit] -#include "../build.h" - -// [Dependencies - Posix] -#if defined(ASMJIT_OS_POSIX) -# include -#endif // ASMJIT_OS_POSIX - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base_util -//! \{ - -// ============================================================================ -// [asmjit::Lock] -// ============================================================================ - -//! Lock - used in thread-safe code for locking. -struct Lock { - ASMJIT_NO_COPY(Lock) - - // -------------------------------------------------------------------------- - // [Windows] - // -------------------------------------------------------------------------- - -#if defined(ASMJIT_OS_WINDOWS) - typedef CRITICAL_SECTION Handle; - - //! Create a new `Lock` instance. - ASMJIT_INLINE Lock() { InitializeCriticalSection(&_handle); } - //! Destroy the `Lock` instance. - ASMJIT_INLINE ~Lock() { DeleteCriticalSection(&_handle); } - - //! Lock. - ASMJIT_INLINE void lock() { EnterCriticalSection(&_handle); } - //! Unlock. - ASMJIT_INLINE void unlock() { LeaveCriticalSection(&_handle); } - -#endif // ASMJIT_OS_WINDOWS - - // -------------------------------------------------------------------------- - // [Posix] - // -------------------------------------------------------------------------- - -#if defined(ASMJIT_OS_POSIX) - typedef pthread_mutex_t Handle; - - //! Create a new `Lock` instance. - ASMJIT_INLINE Lock() { pthread_mutex_init(&_handle, NULL); } - //! Destroy the `Lock` instance. - ASMJIT_INLINE ~Lock() { pthread_mutex_destroy(&_handle); } - - //! Lock. - ASMJIT_INLINE void lock() { pthread_mutex_lock(&_handle); } - //! Unlock. - ASMJIT_INLINE void unlock() { pthread_mutex_unlock(&_handle); } -#endif // ASMJIT_OS_POSIX - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get handle. - ASMJIT_INLINE Handle& getHandle() { - return _handle; - } - //! \overload - ASMJIT_INLINE const Handle& getHandle() const { - return _handle; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Handle. - Handle _handle; -}; - -// ============================================================================ -// [asmjit::AutoLock] -// ============================================================================ - -//! Scoped lock. -struct AutoLock { - ASMJIT_NO_COPY(AutoLock) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Autolock `target`, scoped. - ASMJIT_INLINE AutoLock(Lock& target) : _target(target) { - _target.lock(); - } - - //! Autounlock `target`. - ASMJIT_INLINE ~AutoLock() { - _target.unlock(); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Pointer to target (lock). - Lock& _target; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_LOCK_H diff --git a/src/asmjit/base/logger.cpp b/src/asmjit/base/logger.cpp index b2df1f5..cac2bf5 100644 --- a/src/asmjit/base/logger.cpp +++ b/src/asmjit/base/logger.cpp @@ -12,9 +12,9 @@ #if !defined(ASMJIT_DISABLE_LOGGER) // [Dependencies - AsmJit] -#include "../base/intutil.h" +#include "../base/containers.h" #include "../base/logger.h" -#include "../base/string.h" +#include "../base/utils.h" // [Dependencies - C] #include @@ -30,7 +30,7 @@ namespace asmjit { bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) { size_t currentLen = sb.getLength(); - size_t commentLen = comment ? StringUtil::nlen(comment, kMaxCommentLength) : 0; + size_t commentLen = comment ? Utils::strLen(comment, kMaxCommentLength) : 0; ASMJIT_ASSERT(binLen >= dispLen); @@ -119,7 +119,7 @@ void Logger::logBinary(uint32_t style, const void* data, size_t size) { ::memcpy(buffer, prefix, ASMJIT_ARRAY_SIZE(prefix) - 1); while (i) { - uint32_t n = static_cast(IntUtil::iMin(i, 16)); + uint32_t n = static_cast(Utils::iMin(i, 16)); char* p = buffer + ASMJIT_ARRAY_SIZE(prefix) - 1; i -= n; @@ -163,7 +163,7 @@ void Logger::setIndentation(const char* indentation) { if (!indentation) return; - size_t length = StringUtil::nlen(indentation, ASMJIT_ARRAY_SIZE(_indentation) - 1); + size_t length = Utils::strLen(indentation, ASMJIT_ARRAY_SIZE(_indentation) - 1); ::memcpy(_indentation, indentation, length); } diff --git a/src/asmjit/base/logger.h b/src/asmjit/base/logger.h index 2a5b69e..b6aa7c3 100644 --- a/src/asmjit/base/logger.h +++ b/src/asmjit/base/logger.h @@ -12,7 +12,7 @@ #if !defined(ASMJIT_DISABLE_LOGGER) // [Dependencies - AsmJit] -#include "../base/string.h" +#include "../base/containers.h" // [Dependencies - C] #include @@ -22,7 +22,7 @@ namespace asmjit { -//! \addtogroup asmjit_base_util +//! \addtogroup asmjit_base //! \{ // ============================================================================ @@ -87,7 +87,7 @@ struct LogUtil { //! //! This class also contain `_enabled` member that can be used to enable //! or disable logging. -struct ASMJIT_VCLASS Logger { +struct ASMJIT_VIRTAPI Logger { ASMJIT_NO_COPY(Logger) // -------------------------------------------------------------------------- @@ -162,7 +162,7 @@ struct ASMJIT_VCLASS Logger { // ============================================================================ //! Logger that can log to standard C `FILE*` stream. -struct ASMJIT_VCLASS FileLogger : public Logger { +struct ASMJIT_VIRTAPI FileLogger : public Logger { ASMJIT_NO_COPY(FileLogger) // -------------------------------------------------------------------------- @@ -211,7 +211,7 @@ struct ASMJIT_VCLASS FileLogger : public Logger { // ============================================================================ //! String logger. -struct ASMJIT_VCLASS StringLogger : public Logger { +struct ASMJIT_VIRTAPI StringLogger : public Logger { ASMJIT_NO_COPY(StringLogger) // -------------------------------------------------------------------------- diff --git a/src/asmjit/base/operand.cpp b/src/asmjit/base/operand.cpp index 04a8493..7313dd6 100644 --- a/src/asmjit/base/operand.cpp +++ b/src/asmjit/base/operand.cpp @@ -21,16 +21,29 @@ namespace asmjit { // Prevent static initialization. struct Operand { - uint8_t op; - uint8_t size; - uint8_t reserved_2_1; - uint8_t reserved_3_1; - uint32_t id; - uint64_t reserved_8_8; + struct BaseOp { + uint8_t op; + uint8_t size; + uint8_t reserved_2_1; + uint8_t reserved_3_1; + + uint32_t id; + + uint32_t reserved_8_4; + uint32_t reserved_12_4; + }; + + // Kept in union to prevent LTO warnings. + union { + BaseOp _base; + + // Required to properly align this _fake_ `Operand`, not used. + uint64_t _data[2]; + }; }; -ASMJIT_VAR const Operand noOperand; -const Operand noOperand = { 0, 0, 0, 0, kInvalidValue, 0 }; +ASMJIT_VARAPI const Operand noOperand; +const Operand noOperand = {{ 0, 0, 0, 0, kInvalidValue, 0, 0 }}; } // asmjit namespace diff --git a/src/asmjit/base/operand.h b/src/asmjit/base/operand.h index 153e220..d0e96fc 100644 --- a/src/asmjit/base/operand.h +++ b/src/asmjit/base/operand.h @@ -9,7 +9,7 @@ #define _ASMJIT_BASE_OPERAND_H // [Dependencies - AsmJit] -#include "../base/intutil.h" +#include "../base/utils.h" // [Api-Begin] #include "../apibegin.h" @@ -23,7 +23,7 @@ namespace asmjit { struct Assembler; struct Compiler; -//! \addtogroup asmjit_base_general +//! \addtogroup asmjit_base //! \{ // ============================================================================ @@ -125,6 +125,49 @@ ASMJIT_ENUM(MemType) { kMemTypeRip = 4 }; +// ============================================================================ +// [asmjit::VarType] +// ============================================================================ + +ASMJIT_ENUM(VarType) { + //! Variable is 8-bit signed integer. + kVarTypeInt8 = 0, + //! Variable is 8-bit unsigned integer. + kVarTypeUInt8 = 1, + //! Variable is 16-bit signed integer. + kVarTypeInt16 = 2, + //! Variable is 16-bit unsigned integer. + kVarTypeUInt16 = 3, + //! Variable is 32-bit signed integer. + kVarTypeInt32 = 4, + //! Variable is 32-bit unsigned integer. + kVarTypeUInt32 = 5, + //! Variable is 64-bit signed integer. + kVarTypeInt64 = 6, + //! Variable is 64-bit unsigned integer. + kVarTypeUInt64 = 7, + + //! Variable is target `intptr_t`, compatible with the target's `intptr_t` (not hosts). + kVarTypeIntPtr = 8, + //! Variable is target `uintptr_t`, compatible with the target's `uintptr_t` (not hosts). + kVarTypeUIntPtr = 9, + + //! Variable is 32-bit floating point (single precision). + kVarTypeFp32 = 10, + //! Variable is 64-bit floating point (double precision). + kVarTypeFp64 = 11, + + //! \internal + _kVarTypeIntStart = kVarTypeInt8, + //! \internal + _kVarTypeIntEnd = kVarTypeUIntPtr, + + //! \internal + _kVarTypeFpStart = kVarTypeFp32, + //! \internal + _kVarTypeFpEnd = kVarTypeFp64 +}; + // ============================================================================ // [asmjit::Operand] // ============================================================================ @@ -174,7 +217,7 @@ struct Operand { //! Register type and index access. struct { -#if defined(ASMJIT_ARCH_LE) +#if ASMJIT_ARCH_LE //! Register index. uint8_t index; //! Register type. @@ -202,8 +245,7 @@ struct Operand { //! \internal //! //! This is not needed or used, it's just to force compiler to always - //! align this struct to 8-bytes (so the struct is compatible to others - //! when it comes to alignment). It should fix VS linker warning as well. + //! align this struct to 8-bytes (it should fix LTO warning as well). uint64_t reserved8_8; }; }; @@ -319,10 +361,8 @@ struct Operand { // [Operand] // -------------------------------------------------------------------------- - //! Clone `Operand`. - ASMJIT_INLINE Operand clone() const { - return Operand(*this); - } + //! Clone the `Operand`. + ASMJIT_INLINE Operand clone() const { return Operand(*this); } // -------------------------------------------------------------------------- // [Init & Copy] @@ -342,11 +382,11 @@ struct Operand { // write. Because the 'a', 'b', 'c' and 'd' variables are usually compile // time constants the compiler can do a really nice job if they are joined // by using bitwise operations. - _packed[0].setPacked_2x32(IntUtil::pack32_4x8(op, sz, r0, r1), id); + _packed[0].setPacked_2x32(Utils::pack32_4x8(op, sz, r0, r1), id); } ASMJIT_INLINE void _init_packed_op_sz_w0_id(uint32_t op, uint32_t sz, uint32_t w0, uint32_t id) { - _packed[0].setPacked_2x32(IntUtil::pack32_2x8_1x16(op, sz, w0), id); + _packed[0].setPacked_2x32(Utils::pack32_2x8_1x16(op, sz, w0), id); } ASMJIT_INLINE void _init_packed_d0_d1(uint32_t u0, uint32_t u1) { @@ -369,14 +409,10 @@ struct Operand { // -------------------------------------------------------------------------- template - ASMJIT_INLINE T& getData() { - return reinterpret_cast(_base); - } + ASMJIT_INLINE T& getData() { return reinterpret_cast(_base); } template - ASMJIT_INLINE const T& getData() const { - return reinterpret_cast(_base); - } + ASMJIT_INLINE const T& getData() const { return reinterpret_cast(_base); } // -------------------------------------------------------------------------- // [Type] @@ -403,23 +439,18 @@ struct Operand { // -------------------------------------------------------------------------- //! Get register type. - ASMJIT_INLINE uint32_t getRegType() const { - return _vreg.type; - } - + ASMJIT_INLINE uint32_t getRegType() const { return _vreg.type; } //! Get register index. - ASMJIT_INLINE uint32_t getRegIndex() const { - return _vreg.index; - } + ASMJIT_INLINE uint32_t getRegIndex() const { return _vreg.index; } //! Get whether the operand is register of `type`. ASMJIT_INLINE bool isRegType(uint32_t type) const { - return (_packed[0].u32[0] & IntUtil::pack32_2x8_1x16(0xFF, 0, 0xFF00)) == IntUtil::pack32_2x8_1x16(kOperandTypeReg, 0, (type << 8)); + return (_packed[0].u32[0] & Utils::pack32_2x8_1x16(0xFF, 0, 0xFF00)) == Utils::pack32_2x8_1x16(kOperandTypeReg, 0, (type << 8)); } //! Get whether the operand is register and of `type` and `index`. ASMJIT_INLINE bool isRegCode(uint32_t type, uint32_t index) const { - return (_packed[0].u32[0] & IntUtil::pack32_2x8_1x16(0xFF, 0, 0xFFFF)) == IntUtil::pack32_2x8_1x16(kOperandTypeReg, 0, (type << 8) + index); + return (_packed[0].u32[0] & Utils::pack32_2x8_1x16(0xFF, 0, 0xFFFF)) == Utils::pack32_2x8_1x16(kOperandTypeReg, 0, (type << 8) + index); } //! Get whether the operand is a register or memory. @@ -441,9 +472,7 @@ struct Operand { // -------------------------------------------------------------------------- //! Get size of the operand in bytes. - ASMJIT_INLINE uint32_t getSize() const { - return _base.size; - } + ASMJIT_INLINE uint32_t getSize() const { return _base.size; } // -------------------------------------------------------------------------- // [Id] @@ -455,9 +484,7 @@ struct Operand { //! //! There is no way to change or remove operand id. Unneeded operands can be //! simply reassigned by `operator=`. - ASMJIT_INLINE uint32_t getId() const { - return _base.id; - } + ASMJIT_INLINE uint32_t getId() const { return _base.id; } // -------------------------------------------------------------------------- // [Members] @@ -763,19 +790,19 @@ struct Imm : public Operand { } //! Get whether the immediate can be casted to 8-bit signed integer. - ASMJIT_INLINE bool isInt8() const { return IntUtil::isInt8(_imm.value._i64[0]); } + ASMJIT_INLINE bool isInt8() const { return Utils::isInt8(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 8-bit unsigned integer. - ASMJIT_INLINE bool isUInt8() const { return IntUtil::isUInt8(_imm.value._i64[0]); } + ASMJIT_INLINE bool isUInt8() const { return Utils::isUInt8(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 16-bit signed integer. - ASMJIT_INLINE bool isInt16() const { return IntUtil::isInt16(_imm.value._i64[0]); } + ASMJIT_INLINE bool isInt16() const { return Utils::isInt16(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 16-bit unsigned integer. - ASMJIT_INLINE bool isUInt16() const { return IntUtil::isUInt16(_imm.value._i64[0]); } + ASMJIT_INLINE bool isUInt16() const { return Utils::isUInt16(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 32-bit signed integer. - ASMJIT_INLINE bool isInt32() const { return IntUtil::isInt32(_imm.value._i64[0]); } + ASMJIT_INLINE bool isInt32() const { return Utils::isInt32(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 32-bit unsigned integer. - ASMJIT_INLINE bool isUInt32() const { return IntUtil::isUInt32(_imm.value._i64[0]); } + ASMJIT_INLINE bool isUInt32() const { return Utils::isUInt32(_imm.value._i64[0]); } //! Get immediate value as 8-bit signed integer. ASMJIT_INLINE int8_t getInt8() const { return _imm.value._i8[_ASMJIT_ARCH_INDEX(8, 0)]; } @@ -1046,9 +1073,7 @@ struct Label : public Operand { // -------------------------------------------------------------------------- //! Get whether the label has been initialized by `Assembler` or `Compiler`. - ASMJIT_INLINE bool isInitialized() const { - return _label.id != kInvalidValue; - } + ASMJIT_INLINE bool isInitialized() const { return _label.id != kInvalidValue; } // -------------------------------------------------------------------------- // [Operator Overload] @@ -1060,29 +1085,70 @@ struct Label : public Operand { ASMJIT_INLINE bool operator!=(const Label& other) const { return _base.id != other._base.id; } }; +// ============================================================================ +// [asmjit::Var] +// ============================================================================ + +#if !defined(ASMJIT_DISABLE_COMPILER) +//! Base class for all variables. +struct Var : public Operand { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE Var() : Operand(NoInit) { + _init_packed_op_sz_b0_b1_id(kOperandTypeVar, 0, 0, 0, kInvalidValue); + _init_packed_d2_d3(kInvalidValue, kInvalidValue); + } + + ASMJIT_INLINE Var(const Var& other) : Operand(other) {} + + explicit ASMJIT_INLINE Var(const _NoInit&) : Operand(NoInit) {} + + // -------------------------------------------------------------------------- + // [Var Specific] + // -------------------------------------------------------------------------- + + //! Clone `Var` operand. + ASMJIT_INLINE Var clone() const { return Var(*this); } + + //! Reset Var operand. + ASMJIT_INLINE void reset() { + _init_packed_op_sz_b0_b1_id(kOperandTypeVar, 0, kInvalidReg, kInvalidReg, kInvalidValue); + _init_packed_d2_d3(kInvalidValue, kInvalidValue); + } + + //! Get whether the variable has been initialized by `Compiler`. + ASMJIT_INLINE bool isInitialized() const { return _vreg.id != kInvalidValue; } + //! Get variable type. + ASMJIT_INLINE uint32_t getVarType() const { return _vreg.vType; } + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE Var& operator=(const Var& other) { _copy(other); return *this; } + + ASMJIT_INLINE bool operator==(const Var& other) const { return _packed[0] == other._packed[0]; } + ASMJIT_INLINE bool operator!=(const Var& other) const { return !operator==(other); } +}; +#endif // !ASMJIT_DISABLE_COMPILER + // ============================================================================ // [asmjit::Operand - Globals] // ============================================================================ //! No operand, can be used to reset an operand by assignment or to refer to an //! operand that doesn't exist. -ASMJIT_VAR const Operand noOperand; +ASMJIT_VARAPI const Operand noOperand; -//! Create signed immediate value operand. -static ASMJIT_INLINE Imm imm(int64_t val) { - return Imm(val); -} - -//! Create unsigned immediate value operand. -static ASMJIT_INLINE Imm imm_u(uint64_t val) { - return Imm(static_cast(val)); -} - -//! Create a `void*` immediate value operand. +//! Create a signed immediate operand. +static ASMJIT_INLINE Imm imm(int64_t val) { return Imm(val); } +//! Create an unsigned immediate operand. +static ASMJIT_INLINE Imm imm_u(uint64_t val) { return Imm(static_cast(val)); } +//! Create a `void*` immediate operand. template -static ASMJIT_INLINE Imm imm_ptr(T p) { - return Imm(static_cast((intptr_t)p)); -} +static ASMJIT_INLINE Imm imm_ptr(T p) { return Imm(static_cast((intptr_t)p)); } //! \} diff --git a/src/asmjit/base/runtime.cpp b/src/asmjit/base/runtime.cpp index eae4ef6..9d48417 100644 --- a/src/asmjit/base/runtime.cpp +++ b/src/asmjit/base/runtime.cpp @@ -10,7 +10,6 @@ // [Dependencies - AsmJit] #include "../base/assembler.h" #include "../base/cpuinfo.h" -#include "../base/error.h" #include "../base/runtime.h" // [Api-Begin] @@ -24,24 +23,19 @@ namespace asmjit { Runtime::Runtime() { _sizeLimit = 0; + _baseAddress = kNoBaseAddress; _runtimeType = kRuntimeTypeNone; _allocType = kVMemAllocFreeable; ::memset(_reserved, 0, sizeof(_reserved)); - - _baseAddress = kNoBaseAddress; } - Runtime::~Runtime() {} // ============================================================================ // [asmjit::HostRuntime - Construction / Destruction] // ============================================================================ -HostRuntime::HostRuntime() { - _runtimeType = kRuntimeTypeJit; -} - +HostRuntime::HostRuntime() { _runtimeType = kRuntimeTypeJit; } HostRuntime::~HostRuntime() {} // ============================================================================ @@ -55,21 +49,11 @@ const CpuInfo* HostRuntime::getCpuInfo() { uint32_t HostRuntime::getStackAlignment() { uint32_t alignment = sizeof(intptr_t); -#if defined(ASMJIT_ARCH_X86) // Modern Linux, APPLE and UNIX guarantees 16-byte stack alignment, but I'm // not sure about all other UNIX operating systems, because 16-byte alignment // is addition to an older specification. -# if (defined(__linux__) || \ - defined(__linux) || \ - defined(__unix__) || \ - defined(__FreeBSD__) || \ - defined(__NetBSD__) || \ - defined(__OpenBSD__) || \ - defined(__DARWIN__) || \ - defined(__APPLE__) ) - alignment = 16; -# endif -#elif defined(ASMJIT_ARCH_X64) +#if (ASMJIT_ARCH_X64) || \ + (ASMJIT_ARCH_X86 && (ASMJIT_OS_LINUX || ASMJIT_OS_BSD || ASMJIT_OS_MAC)) alignment = 16; #endif @@ -78,10 +62,10 @@ uint32_t HostRuntime::getStackAlignment() { void HostRuntime::flush(void* p, size_t size) { // Only useful on non-x86 architectures. -#if !defined(ASMJIT_ARCH_X86) && !defined(ASMJIT_ARCH_X64) +#if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64 // Windows has built-in support in kernel32.dll. -#if defined(ASMJIT_OS_WINDOWS) +#if ASMJIT_OS_WINDOWS ::FlushInstructionCache(_memMgr.getProcessHandle(), p, size); #endif // ASMJIT_OS_WINDOWS @@ -96,7 +80,6 @@ StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) { _sizeLimit = sizeLimit; _baseAddress = static_cast((uintptr_t)baseAddress); } - StaticRuntime::~StaticRuntime() {} // ============================================================================ diff --git a/src/asmjit/base/runtime.h b/src/asmjit/base/runtime.h index c484e2c..8bf2ae4 100644 --- a/src/asmjit/base/runtime.h +++ b/src/asmjit/base/runtime.h @@ -9,7 +9,6 @@ #define _ASMJIT_BASE_RUNTIME_H // [Dependencies - AsmJit] -#include "../base/error.h" #include "../base/vmem.h" // [Api-Begin] @@ -24,7 +23,7 @@ namespace asmjit { struct Assembler; struct CpuInfo; -//! \addtogroup asmjit_base_general +//! \addtogroup asmjit_base //! \{ // ============================================================================ @@ -42,7 +41,7 @@ ASMJIT_ENUM(RuntimeType) { // ============================================================================ //! Base runtime. -struct ASMJIT_VCLASS Runtime { +struct ASMJIT_VIRTAPI Runtime { ASMJIT_NO_COPY(Runtime) // -------------------------------------------------------------------------- @@ -59,21 +58,12 @@ struct ASMJIT_VCLASS Runtime { // -------------------------------------------------------------------------- //! Get runtime type. - ASMJIT_INLINE uint32_t getRuntimeType() const { - return _runtimeType; - } + ASMJIT_INLINE uint32_t getRuntimeType() const { return _runtimeType; } //! Get whether the runtime has a base address. - //! - //! \sa \ref getBaseAddress() - ASMJIT_INLINE bool hasBaseAddress() const { - return _baseAddress == kNoBaseAddress; - } - + ASMJIT_INLINE bool hasBaseAddress() const { return _baseAddress != kNoBaseAddress; } //! Get the base address. - ASMJIT_INLINE Ptr getBaseAddress() const { - return _baseAddress; - } + ASMJIT_INLINE Ptr getBaseAddress() const { return _baseAddress; } // -------------------------------------------------------------------------- // [Interface] @@ -118,7 +108,7 @@ struct ASMJIT_VCLASS Runtime { // ============================================================================ //! Base runtime for JIT code generation. -struct ASMJIT_VCLASS HostRuntime : public Runtime { +struct ASMJIT_VIRTAPI HostRuntime : public Runtime { ASMJIT_NO_COPY(HostRuntime) // -------------------------------------------------------------------------- @@ -159,7 +149,7 @@ struct ASMJIT_VCLASS HostRuntime : public Runtime { //! //! JIT static runtime can be used to generate code to a memory location that //! is known. -struct ASMJIT_VCLASS StaticRuntime : public HostRuntime { +struct ASMJIT_VIRTAPI StaticRuntime : public HostRuntime { ASMJIT_NO_COPY(StaticRuntime) // -------------------------------------------------------------------------- @@ -181,15 +171,12 @@ struct ASMJIT_VCLASS StaticRuntime : public HostRuntime { // -------------------------------------------------------------------------- //! Get the base address. - ASMJIT_INLINE Ptr getBaseAddress() const { - return _baseAddress; - } + ASMJIT_INLINE Ptr getBaseAddress() const { return _baseAddress; } - //! Get the maximum size of the code that can be relocated to the target - //! address or zero if unlimited. - ASMJIT_INLINE size_t getSizeLimit() const { - return _sizeLimit; - } + //! Get the maximum size of the code that can be relocated/stored in the target. + //! + //! Returns zero if unlimited. + ASMJIT_INLINE size_t getSizeLimit() const { return _sizeLimit; } // -------------------------------------------------------------------------- // [Interface] @@ -204,7 +191,7 @@ struct ASMJIT_VCLASS StaticRuntime : public HostRuntime { // ============================================================================ //! JIT runtime. -struct ASMJIT_VCLASS JitRuntime : public HostRuntime { +struct ASMJIT_VIRTAPI JitRuntime : public HostRuntime { ASMJIT_NO_COPY(JitRuntime) // -------------------------------------------------------------------------- @@ -221,19 +208,12 @@ struct ASMJIT_VCLASS JitRuntime : public HostRuntime { // -------------------------------------------------------------------------- //! Get the type of allocation. - ASMJIT_INLINE uint32_t getAllocType() const { - return _allocType; - } - + ASMJIT_INLINE uint32_t getAllocType() const { return _allocType; } //! Set the type of allocation. - ASMJIT_INLINE void setAllocType(uint32_t allocType) { - _allocType = allocType; - } + ASMJIT_INLINE void setAllocType(uint32_t allocType) { _allocType = allocType; } //! Get the virtual memory manager. - ASMJIT_INLINE VMemMgr* getMemMgr() const { - return const_cast(&_memMgr); - } + ASMJIT_INLINE VMemMgr* getMemMgr() const { return const_cast(&_memMgr); } // -------------------------------------------------------------------------- // [Interface] diff --git a/src/asmjit/base/string.cpp b/src/asmjit/base/string.cpp deleted file mode 100644 index aa65dbe..0000000 --- a/src/asmjit/base/string.cpp +++ /dev/null @@ -1,374 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Dependencies - AsmJit] -#include "../base/intutil.h" -#include "../base/string.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -// Should be placed in read-only memory. -static const char StringBuilder_empty[4] = { 0 }; - -// ============================================================================ -// [asmjit::StringBuilder - Construction / Destruction] -// ============================================================================ - -StringBuilder::StringBuilder() : - _data(const_cast(StringBuilder_empty)), - _length(0), - _capacity(0), - _canFree(false) {} - -StringBuilder::~StringBuilder() { - if (_canFree) - ASMJIT_FREE(_data); -} - -// ============================================================================ -// [asmjit::StringBuilder - Prepare / Reserve] -// ============================================================================ - -char* StringBuilder::prepare(uint32_t op, size_t len) { - // -------------------------------------------------------------------------- - // [Set] - // -------------------------------------------------------------------------- - - if (op == kStringOpSet) { - // We don't care here, but we can't return a NULL pointer since it indicates - // failure in memory allocation. - if (len == 0) { - if (_data != StringBuilder_empty) - _data[0] = 0; - - _length = 0; - return _data; - } - - if (_capacity < len) { - if (len >= IntUtil::maxUInt() - sizeof(intptr_t) * 2) - return NULL; - - size_t to = IntUtil::alignTo(len, sizeof(intptr_t)); - if (to < 256 - sizeof(intptr_t)) - to = 256 - sizeof(intptr_t); - - char* newData = static_cast(ASMJIT_ALLOC(to + sizeof(intptr_t))); - if (newData == NULL) { - clear(); - return NULL; - } - - if (_canFree) - ASMJIT_FREE(_data); - - _data = newData; - _capacity = to + sizeof(intptr_t) - 1; - _canFree = true; - } - - _data[len] = 0; - _length = len; - - ASMJIT_ASSERT(_length <= _capacity); - return _data; - } - - // -------------------------------------------------------------------------- - // [Append] - // -------------------------------------------------------------------------- - - else { - // We don't care here, but we can't return a NULL pointer since it indicates - // failure in memory allocation. - if (len == 0) - return _data + _length; - - // Overflow. - if (IntUtil::maxUInt() - sizeof(intptr_t) * 2 - _length < len) - return NULL; - - size_t after = _length + len; - if (_capacity < after) { - size_t to = _capacity; - - if (to < 256) - to = 256; - - while (to < 1024 * 1024 && to < after) - to *= 2; - - if (to < after) { - to = after; - if (to < (IntUtil::maxUInt() - 1024 * 32)) - to = IntUtil::alignTo(to, 1024 * 32); - } - - to = IntUtil::alignTo(to, sizeof(intptr_t)); - char* newData = static_cast(ASMJIT_ALLOC(to + sizeof(intptr_t))); - - if (newData == NULL) - return NULL; - - ::memcpy(newData, _data, _length); - if (_canFree) - ASMJIT_FREE(_data); - - _data = newData; - _capacity = to + sizeof(intptr_t) - 1; - _canFree = true; - } - - char* ret = _data + _length; - _data[after] = 0; - _length = after; - - ASMJIT_ASSERT(_length <= _capacity); - return ret; - } -} - -bool StringBuilder::reserve(size_t to) { - if (_capacity >= to) - return true; - - if (to >= IntUtil::maxUInt() - sizeof(intptr_t) * 2) - return false; - - to = IntUtil::alignTo(to, sizeof(intptr_t)); - - char* newData = static_cast(ASMJIT_ALLOC(to + sizeof(intptr_t))); - if (newData == NULL) - return false; - - ::memcpy(newData, _data, _length + 1); - if (_canFree) - ASMJIT_FREE(_data); - - _data = newData; - _capacity = to + sizeof(intptr_t) - 1; - _canFree = true; - return true; -} - -// ============================================================================ -// [asmjit::StringBuilder - Clear] -// ============================================================================ - -void StringBuilder::clear() { - if (_data != StringBuilder_empty) - _data[0] = 0; - _length = 0; -} - -// ============================================================================ -// [asmjit::StringBuilder - Methods] -// ============================================================================ - -bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) { - if (len == kInvalidIndex) - len = str != NULL ? ::strlen(str) : static_cast(0); - - char* p = prepare(op, len); - if (p == NULL) - return false; - - ::memcpy(p, str, len); - return true; -} - -bool StringBuilder::_opChar(uint32_t op, char c) { - char* p = prepare(op, 1); - if (p == NULL) - return false; - - *p = c; - return true; -} - -bool StringBuilder::_opChars(uint32_t op, char c, size_t len) { - char* p = prepare(op, len); - if (p == NULL) - return false; - - ::memset(p, c, len); - return true; -} - -static const char StringBuilder_numbers[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - -bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) { - if (base < 2 || base > 36) - base = 10; - - char buf[128]; - char* p = buf + ASMJIT_ARRAY_SIZE(buf); - - uint64_t orig = i; - char sign = '\0'; - - // -------------------------------------------------------------------------- - // [Sign] - // -------------------------------------------------------------------------- - - if ((flags & kStringFormatSigned) != 0 && static_cast(i) < 0) { - i = static_cast(-static_cast(i)); - sign = '-'; - } - else if ((flags & kStringFormatShowSign) != 0) { - sign = '+'; - } - else if ((flags & kStringFormatShowSpace) != 0) { - sign = ' '; - } - - // -------------------------------------------------------------------------- - // [Number] - // -------------------------------------------------------------------------- - - do { - uint64_t d = i / base; - uint64_t r = i % base; - - *--p = StringBuilder_numbers[r]; - i = d; - } while (i); - - size_t numberLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p); - - // -------------------------------------------------------------------------- - // [Alternate Form] - // -------------------------------------------------------------------------- - - if ((flags & kStringFormatAlternate) != 0) { - if (base == 8) { - if (orig != 0) - *--p = '0'; - } - if (base == 16) { - *--p = 'x'; - *--p = '0'; - } - } - - // -------------------------------------------------------------------------- - // [Width] - // -------------------------------------------------------------------------- - - if (sign != 0) - *--p = sign; - - if (width > 256) - width = 256; - - if (width <= numberLength) - width = 0; - else - width -= numberLength; - - // -------------------------------------------------------------------------- - // Write] - // -------------------------------------------------------------------------- - - size_t prefixLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberLength; - char* data = prepare(op, prefixLength + width + numberLength); - - if (data == NULL) - return false; - - ::memcpy(data, p, prefixLength); - data += prefixLength; - - ::memset(data, '0', width); - data += width; - - ::memcpy(data, p + prefixLength, numberLength); - return true; -} - -bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) { - if (len >= IntUtil::maxUInt() / 2) - return false; - - char* dst = prepare(op, len * 2); - if (dst == NULL) - return false; - - const char* src = static_cast(data); - for (size_t i = 0; i < len; i++, dst += 2, src += 1) - { - dst[0] = StringBuilder_numbers[(src[0] >> 4) & 0xF]; - dst[1] = StringBuilder_numbers[(src[0] ) & 0xF]; - } - - return true; -} - -bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) { - char buf[1024]; - - vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); - buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; - - return _opString(op, buf); -} - -bool StringBuilder::setFormat(const char* fmt, ...) { - bool result; - - va_list ap; - va_start(ap, fmt); - result = _opVFormat(kStringOpSet, fmt, ap); - va_end(ap); - - return result; -} - -bool StringBuilder::appendFormat(const char* fmt, ...) { - bool result; - - va_list ap; - va_start(ap, fmt); - result = _opVFormat(kStringOpAppend, fmt, ap); - va_end(ap); - - return result; -} - -bool StringBuilder::eq(const char* str, size_t len) const { - const char* aData = _data; - const char* bData = str; - - size_t aLength = _length; - size_t bLength = len; - - if (bLength == kInvalidIndex) { - size_t i; - for (i = 0; i < aLength; i++) { - if (aData[i] != bData[i] || bData[i] == 0) - return false; - } - - return bData[i] == 0; - } - else { - if (aLength != bLength) - return false; - - return ::memcmp(aData, bData, aLength) == 0; - } -} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" diff --git a/src/asmjit/base/string.h b/src/asmjit/base/string.h deleted file mode 100644 index 1429a89..0000000 --- a/src/asmjit/base/string.h +++ /dev/null @@ -1,372 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_BASE_STRING_H -#define _ASMJIT_BASE_STRING_H - -// [Dependencies - AsmJit] -#include "../base/globals.h" - -// [Dependencies - C] -#include - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -//! \addtogroup asmjit_base_util -//! \{ - -// ============================================================================ -// [asmjit::StringOp] -// ============================================================================ - -//! \internal -//! -//! String operation. -ASMJIT_ENUM(StringOp) { - //! Replace the current string by a given content. - kStringOpSet = 0, - //! Append a given content to the current string. - kStringOpAppend = 1 -}; - -// ============================================================================ -// [asmjit::StringFormatFlags] -// ============================================================================ - -//! \internal -//! -//! String format flags. -ASMJIT_ENUM(StringFormatFlags) { - kStringFormatShowSign = 0x00000001, - kStringFormatShowSpace = 0x00000002, - kStringFormatAlternate = 0x00000004, - kStringFormatSigned = 0x80000000 -}; - -// ============================================================================ -// [asmjit::StringUtil] -// ============================================================================ - -//! String utilities. -struct StringUtil { - static ASMJIT_INLINE size_t nlen(const char* s, size_t maxlen) { - size_t i; - for (i = 0; i < maxlen; i++) - if (!s[i]) - break; - return i; - } -}; - -// ============================================================================ -// [asmjit::StringBuilder] -// ============================================================================ - -//! String builder. -//! -//! String builder was designed to be able to build a string using append like -//! operation to append numbers, other strings, or signle characters. It can -//! allocate it's own buffer or use a buffer created on the stack. -//! -//! String builder contains method specific to AsmJit functionality, used for -//! logging or HTML output. -struct StringBuilder { - ASMJIT_NO_COPY(StringBuilder) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_API StringBuilder(); - ASMJIT_API ~StringBuilder(); - - ASMJIT_INLINE StringBuilder(const _NoInit&) {} - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get string builder capacity. - ASMJIT_INLINE size_t getCapacity() const { return _capacity; } - //! Get length. - ASMJIT_INLINE size_t getLength() const { return _length; } - - //! Get null-terminated string data. - ASMJIT_INLINE char* getData() { return _data; } - //! Get null-terminated string data (const). - ASMJIT_INLINE const char* getData() const { return _data; } - - // -------------------------------------------------------------------------- - // [Prepare / Reserve] - // -------------------------------------------------------------------------- - - //! Prepare to set/append. - ASMJIT_API char* prepare(uint32_t op, size_t len); - - //! Reserve `to` bytes in string builder. - ASMJIT_API bool reserve(size_t to); - - // -------------------------------------------------------------------------- - // [Clear] - // -------------------------------------------------------------------------- - - //! Clear the content in String builder. - ASMJIT_API void clear(); - - // -------------------------------------------------------------------------- - // [Op] - // -------------------------------------------------------------------------- - - ASMJIT_API bool _opString(uint32_t op, const char* str, size_t len = kInvalidIndex); - ASMJIT_API bool _opVFormat(uint32_t op, const char* fmt, va_list ap); - ASMJIT_API bool _opChar(uint32_t op, char c); - ASMJIT_API bool _opChars(uint32_t op, char c, size_t len); - ASMJIT_API bool _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0); - ASMJIT_API bool _opHex(uint32_t op, const void* data, size_t len); - - // -------------------------------------------------------------------------- - // [Set] - // -------------------------------------------------------------------------- - - //! Replace the current content by `str` of `len`. - ASMJIT_INLINE bool setString(const char* str, size_t len = kInvalidIndex) { - return _opString(kStringOpSet, str, len); - } - - //! Replace the current content by formatted string `fmt`. - ASMJIT_INLINE bool setVFormat(const char* fmt, va_list ap) { - return _opVFormat(kStringOpSet, fmt, ap); - } - - //! Replace the current content by formatted string `fmt`. - ASMJIT_API bool setFormat(const char* fmt, ...); - - //! Replace the current content by `c` character. - ASMJIT_INLINE bool setChar(char c) { - return _opChar(kStringOpSet, c); - } - - //! Replace the current content by `c` of `len`. - ASMJIT_INLINE bool setChars(char c, size_t len) { - return _opChars(kStringOpSet, c, len); - } - - //! Replace the current content by formatted integer `i`. - ASMJIT_INLINE bool setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { - return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned); - } - - //! Replace the current content by formatted integer `i`. - ASMJIT_INLINE bool setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { - return _opNumber(kStringOpSet, i, base, width, flags); - } - - //! Replace the current content by the given `data` converted to a HEX string. - ASMJIT_INLINE bool setHex(const void* data, size_t len) { - return _opHex(kStringOpSet, data, len); - } - - // -------------------------------------------------------------------------- - // [Append] - // -------------------------------------------------------------------------- - - //! Append `str` of `len`. - ASMJIT_INLINE bool appendString(const char* str, size_t len = kInvalidIndex) { - return _opString(kStringOpAppend, str, len); - } - - //! Append a formatted string `fmt` to the current content. - ASMJIT_INLINE bool appendVFormat(const char* fmt, va_list ap) { - return _opVFormat(kStringOpAppend, fmt, ap); - } - - //! Append a formatted string `fmt` to the current content. - ASMJIT_API bool appendFormat(const char* fmt, ...); - - //! Append `c` character. - ASMJIT_INLINE bool appendChar(char c) { - return _opChar(kStringOpAppend, c); - } - - //! Append `c` of `len`. - ASMJIT_INLINE bool appendChars(char c, size_t len) { - return _opChars(kStringOpAppend, c, len); - } - - //! Append `i`. - ASMJIT_INLINE bool appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { - return _opNumber(kStringOpAppend, static_cast(i), base, width, flags | kStringFormatSigned); - } - - //! Append `i`. - ASMJIT_INLINE bool appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { - return _opNumber(kStringOpAppend, i, base, width, flags); - } - - //! Append the given `data` converted to a HEX string. - ASMJIT_INLINE bool appendHex(const void* data, size_t len) { - return _opHex(kStringOpAppend, data, len); - } - - // -------------------------------------------------------------------------- - // [_Append] - // -------------------------------------------------------------------------- - - //! Append `str` of `len`, inlined, without buffer overflow check. - ASMJIT_INLINE void _appendString(const char* str, size_t len = kInvalidIndex) { - // len should be a constant if we are inlining. - if (len == kInvalidIndex) { - char* p = &_data[_length]; - - while (*str) { - ASMJIT_ASSERT(p < _data + _capacity); - *p++ = *str++; - } - - *p = '\0'; - _length = (size_t)(p - _data); - } - else { - ASMJIT_ASSERT(_capacity - _length >= len); - - char* p = &_data[_length]; - char* pEnd = p + len; - - while (p < pEnd) - *p++ = *str++; - - *p = '\0'; - _length += len; - } - } - - //! Append `c` character, inlined, without buffer overflow check. - ASMJIT_INLINE void _appendChar(char c) { - ASMJIT_ASSERT(_capacity - _length >= 1); - - _data[_length] = c; - _length++; - _data[_length] = '\0'; - } - - //! Append `c` of `len`, inlined, without buffer overflow check. - ASMJIT_INLINE void _appendChars(char c, size_t len) { - ASMJIT_ASSERT(_capacity - _length >= len); - - char* p = &_data[_length]; - char* pEnd = p + len; - - while (p < pEnd) - *p++ = c; - - *p = '\0'; - _length += len; - } - - ASMJIT_INLINE void _appendUInt32(uint32_t i) { - char buf_[32]; - - char* pEnd = buf_ + ASMJIT_ARRAY_SIZE(buf_); - char* pBuf = pEnd; - - do { - uint32_t d = i / 10; - uint32_t r = i % 10; - - *--pBuf = static_cast(r + '0'); - i = d; - } while (i); - - ASMJIT_ASSERT(_capacity - _length >= (size_t)(pEnd - pBuf)); - char* p = &_data[_length]; - - do { - *p++ = *pBuf; - } while (++pBuf != pEnd); - - *p = '\0'; - _length = (size_t)(p - _data); - } - - // -------------------------------------------------------------------------- - // [Eq] - // -------------------------------------------------------------------------- - - //! Check for equality with other `str` of `len`. - ASMJIT_API bool eq(const char* str, size_t len = kInvalidIndex) const; - //! Check for equality with `other`. - ASMJIT_INLINE bool eq(const StringBuilder& other) const { - return eq(other._data); - } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE bool operator==(const StringBuilder& other) const { return eq(other); } - ASMJIT_INLINE bool operator!=(const StringBuilder& other) const { return !eq(other); } - - ASMJIT_INLINE bool operator==(const char* str) const { return eq(str); } - ASMJIT_INLINE bool operator!=(const char* str) const { return !eq(str); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! String data. - char* _data; - //! Length. - size_t _length; - //! Capacity. - size_t _capacity; - //! Whether the string can be freed. - size_t _canFree; -}; - -// ============================================================================ -// [asmjit::StringBuilderT] -// ============================================================================ - -//! \internal -template -struct StringBuilderT : public StringBuilder { - ASMJIT_NO_COPY(StringBuilderT) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE StringBuilderT() : StringBuilder(NoInit) { - _data = _embeddedData; - _data[0] = 0; - - _length = 0; - _capacity = N; - _canFree = false; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Embedded data. - char _embeddedData[static_cast( - N + 1 + sizeof(intptr_t)) & ~static_cast(sizeof(intptr_t) - 1)]; -}; - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // _ASMJIT_BASE_STRING_H diff --git a/src/asmjit/base/utils.cpp b/src/asmjit/base/utils.cpp new file mode 100644 index 0000000..0bc92d6 --- /dev/null +++ b/src/asmjit/base/utils.cpp @@ -0,0 +1,291 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Export] +#define ASMJIT_EXPORTS + +// [Dependencies - AsmJit] +#include "../base/utils.h" + +// [Dependencies - Posix] +#if ASMJIT_OS_POSIX +# include +# include +#endif // ASMJIT_OS_POSIX + +// [Dependencies - Mac] +#if ASMJIT_OS_MAC +# include +#endif // ASMJIT_OS_MAC + +// [Dependencies - Windows] +#if ASMJIT_OS_WINDOWS +// `_InterlockedCompareExchange` is only available as intrinsic (MS Compiler). +# if defined(_MSC_VER) && _MSC_VER >= 1400 +# include +# pragma intrinsic(_InterlockedCompareExchange) +# else +# define _InterlockedCompareExchange InterlockedCompareExchange +# endif // _MSC_VER +#endif // ASMJIT_OS_WINDOWS + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +// ============================================================================ +// [asmjit::CpuTicks - Windows] +// ============================================================================ + +#if ASMJIT_OS_WINDOWS +static volatile uint32_t Utils_hiResTicks; +static volatile double Utils_hiResFreq; + +uint32_t Utils::getTickCount() { + do { + uint32_t hiResOk = Utils_hiResTicks; + + if (hiResOk == 1) { + LARGE_INTEGER now; + if (!::QueryPerformanceCounter(&now)) + break; + return (int64_t)(double(now.QuadPart) / Utils_hiResFreq); + } + + if (hiResOk == 0) { + LARGE_INTEGER qpf; + if (!::QueryPerformanceFrequency(&qpf)) { + _InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 0xFFFFFFFF, 0); + break; + } + + LARGE_INTEGER now; + if (!::QueryPerformanceCounter(&now)) { + _InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 0xFFFFFFFF, 0); + break; + } + + double freqDouble = double(qpf.QuadPart) / 1000.0; + + Utils_hiResFreq = freqDouble; + _InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 1, 0); + + return static_cast( + static_cast(double(now.QuadPart) / freqDouble) & 0xFFFFFFFF); + } + } while (0); + + // Bail to a less precise GetTickCount(). + return ::GetTickCount(); +} + +// ============================================================================ +// [asmjit::CpuTicks - Mac] +// ============================================================================ + +#elif ASMJIT_OS_MAC +static mach_timebase_info_data_t CpuTicks_machTime; + +uint32_t Utils::getTickCount() { + // Initialize the first time CpuTicks::now() is called (See Apple's QA1398). + if (CpuTicks_machTime.denom == 0) { + if (mach_timebase_info(&CpuTicks_machTime) != KERN_SUCCESS) + return 0; + } + + // mach_absolute_time() returns nanoseconds, we need just milliseconds. + uint64_t t = mach_absolute_time() / 1000000; + + t = t * CpuTicks_machTime.numer / CpuTicks_machTime.denom; + return static_cast(t & 0xFFFFFFFFU); +} + +// ============================================================================ +// [asmjit::CpuTicks - Posix] +// ============================================================================ + +#else +uint32_t Utils::getTickCount() { +#if defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0 + struct timespec ts; + + if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) + return 0; + + uint64_t t = (uint64_t(ts.tv_sec ) * 1000) + (uint64_t(ts.tv_nsec) / 1000000); + return static_cast(t & 0xFFFFFFFFU); +#else // _POSIX_MONOTONIC_CLOCK +#error "[asmjit] Utils::getTickCount() is not implemented for your target OS." + return 0; +#endif // _POSIX_MONOTONIC_CLOCK +} +#endif // ASMJIT_OS + +#if defined(ASMJIT_TEST) +UNIT(base_utils) { + uint32_t i; + + INFO("IntTraits<>."); + EXPECT(IntTraits::kIsSigned,"IntTraits should report signed."); + EXPECT(IntTraits::kIsSigned, "IntTraits should report signed."); + EXPECT(IntTraits::kIsSigned, "IntTraits should report signed."); + EXPECT(IntTraits::kIsSigned, "IntTraits should report signed."); + + EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned."); + EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned."); + EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned."); + EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned."); + + EXPECT(IntTraits::kIsSigned, "IntTraits should report signed."); + EXPECT(IntTraits::kIsUnsigned, "IntTraits should report unsigned."); + + EXPECT(IntTraits::kIsIntPtr, "IntTraits should report intptr_t type."); + EXPECT(IntTraits::kIsIntPtr, "IntTraits should report intptr_t type."); + + INFO("Utils::iMin()/iMax()."); + EXPECT(Utils::iMin( 0, -1) == -1, "Utils::iMin should return a minimum value."); + EXPECT(Utils::iMin(-1, -2) == -2, "Utils::iMin should return a minimum value."); + EXPECT(Utils::iMin( 1, 2) == 1, "Utils::iMin should return a minimum value."); + + EXPECT(Utils::iMax( 0, -1) == 0, "Utils::iMax should return a maximum value."); + EXPECT(Utils::iMax(-1, -2) == -1, "Utils::iMax should return a maximum value."); + EXPECT(Utils::iMax( 1, 2) == 2, "Utils::iMax should return a maximum value."); + + INFO("Utils::inInterval()."); + EXPECT(Utils::inInterval(11 , 10, 20) == true , "Utils::inInterval should return true if inside."); + EXPECT(Utils::inInterval(101, 10, 20) == false, "Utils::inInterval should return false if outside."); + + INFO("Utils::isInt8()."); + EXPECT(Utils::isInt8(-128) == true , "Utils::isInt8<> should return true if inside."); + EXPECT(Utils::isInt8( 127) == true , "Utils::isInt8<> should return true if inside."); + EXPECT(Utils::isInt8(-129) == false, "Utils::isInt8<> should return false if outside."); + EXPECT(Utils::isInt8( 128) == false, "Utils::isInt8<> should return false if outside."); + + INFO("Utils::isInt16()."); + EXPECT(Utils::isInt16(-32768) == true , "Utils::isInt16<> should return true if inside."); + EXPECT(Utils::isInt16( 32767) == true , "Utils::isInt16<> should return true if inside."); + EXPECT(Utils::isInt16(-32769) == false, "Utils::isInt16<> should return false if outside."); + EXPECT(Utils::isInt16( 32768) == false, "Utils::isInt16<> should return false if outside."); + + INFO("Utils::isInt32()."); + EXPECT(Utils::isInt32( 2147483647 ) == true, "Utils::isInt32 should return true if inside."); + EXPECT(Utils::isInt32(-2147483647 - 1) == true, "Utils::isInt32 should return true if inside."); + EXPECT(Utils::isInt32(ASMJIT_UINT64_C(2147483648)) == false, "Utils::isInt32 should return false if outside."); + EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == false, "Utils::isInt32 should return false if outside."); + EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isInt32 should return false if outside."); + + INFO("Utils::isUInt8()."); + EXPECT(Utils::isUInt8(0) == true , "Utils::isUInt8<> should return true if inside."); + EXPECT(Utils::isUInt8(255) == true , "Utils::isUInt8<> should return true if inside."); + EXPECT(Utils::isUInt8(256) == false, "Utils::isUInt8<> should return false if outside."); + EXPECT(Utils::isUInt8(-1) == false, "Utils::isUInt8<> should return false if negative."); + + INFO("Utils::isUInt12()."); + EXPECT(Utils::isUInt12(0) == true , "Utils::isUInt12<> should return true if inside."); + EXPECT(Utils::isUInt12(4095) == true , "Utils::isUInt12<> should return true if inside."); + EXPECT(Utils::isUInt12(4096) == false, "Utils::isUInt12<> should return false if outside."); + EXPECT(Utils::isUInt12(-1) == false, "Utils::isUInt12<> should return false if negative."); + + INFO("Utils::isUInt16()."); + EXPECT(Utils::isUInt16(0) == true , "Utils::isUInt16<> should return true if inside."); + EXPECT(Utils::isUInt16(65535) == true , "Utils::isUInt16<> should return true if inside."); + EXPECT(Utils::isUInt16(65536) == false, "Utils::isUInt16<> should return false if outside."); + EXPECT(Utils::isUInt16(-1) == false, "Utils::isUInt16<> should return false if negative."); + + INFO("Utils::isUInt32()."); + EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == true, "Utils::isUInt32 should return true if inside."); + EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isUInt32 should return false if outside."); + EXPECT(Utils::isUInt32(-1) == false, "Utils::isUInt32 should return false if negative."); + + INFO("Utils::isPower2()."); + for (i = 0; i < 64; i++) { + EXPECT(Utils::isPowerOf2(static_cast(1) << i) == true, + "Utils::isPower2() didn't report power of 2."); + EXPECT(Utils::isPowerOf2((static_cast(1) << i) ^ 0x001101) == false, + "Utils::isPower2() didn't report not power of 2."); + } + + INFO("Utils::mask()."); + for (i = 0; i < 32; i++) { + EXPECT(Utils::mask(i) == (1 << i), + "Utils::mask(%u) should return %X.", i, (1 << i)); + } + + INFO("Utils::bits()."); + for (i = 0; i < 32; i++) { + uint32_t expectedBits = 0; + + for (uint32_t b = 0; b < i; b++) + expectedBits |= static_cast(1) << b; + + EXPECT(Utils::bits(i) == expectedBits, + "Utils::bits(%u) should return %X.", i, expectedBits); + } + + INFO("Utils::hasBit()."); + for (i = 0; i < 32; i++) { + EXPECT(Utils::hasBit((1 << i), i) == true, + "Utils::hasBit(%X, %u) should return true.", (1 << i), i); + } + + INFO("Utils::bitCount()."); + for (i = 0; i < 32; i++) { + EXPECT(Utils::bitCount((1 << i)) == 1, + "Utils::bitCount(%X) should return true.", (1 << i)); + } + EXPECT(Utils::bitCount(0x000000F0) == 4, ""); + EXPECT(Utils::bitCount(0x10101010) == 4, ""); + EXPECT(Utils::bitCount(0xFF000000) == 8, ""); + EXPECT(Utils::bitCount(0xFFFFFFF7) == 31, ""); + EXPECT(Utils::bitCount(0x7FFFFFFF) == 31, ""); + + INFO("Utils::findFirstBit()."); + for (i = 0; i < 32; i++) { + EXPECT(Utils::findFirstBit((1 << i)) == i, + "Utils::findFirstBit(%X) should return %u.", (1 << i), i); + } + + INFO("Utils::keepNOnesFromRight()."); + EXPECT(Utils::keepNOnesFromRight(0xF, 1) == 0x1, ""); + EXPECT(Utils::keepNOnesFromRight(0xF, 2) == 0x3, ""); + EXPECT(Utils::keepNOnesFromRight(0xF, 3) == 0x7, ""); + EXPECT(Utils::keepNOnesFromRight(0x5, 2) == 0x5, ""); + EXPECT(Utils::keepNOnesFromRight(0xD, 2) == 0x5, ""); + + INFO("Utils::isAligned()."); + EXPECT(Utils::isAligned(0xFFFF, 4) == false, ""); + EXPECT(Utils::isAligned(0xFFF4, 4) == true , ""); + EXPECT(Utils::isAligned(0xFFF8, 8) == true , ""); + EXPECT(Utils::isAligned(0xFFF0, 16) == true , ""); + + INFO("Utils::alignTo()."); + EXPECT(Utils::alignTo(0xFFFF, 4) == 0x10000, ""); + EXPECT(Utils::alignTo(0xFFF4, 4) == 0x0FFF4, ""); + EXPECT(Utils::alignTo(0xFFF8, 8) == 0x0FFF8, ""); + EXPECT(Utils::alignTo(0xFFF0, 16) == 0x0FFF0, ""); + EXPECT(Utils::alignTo(0xFFF0, 32) == 0x10000, ""); + + INFO("Utils::alignToPowerOf2()."); + EXPECT(Utils::alignToPowerOf2(0xFFFF) == 0x10000, ""); + EXPECT(Utils::alignToPowerOf2(0xF123) == 0x10000, ""); + EXPECT(Utils::alignToPowerOf2(0x0F00) == 0x01000, ""); + EXPECT(Utils::alignToPowerOf2(0x0100) == 0x00100, ""); + EXPECT(Utils::alignToPowerOf2(0x1001) == 0x02000, ""); + + INFO("Utils::alignDiff()."); + EXPECT(Utils::alignDiff(0xFFFF, 4) == 1, ""); + EXPECT(Utils::alignDiff(0xFFF4, 4) == 0, ""); + EXPECT(Utils::alignDiff(0xFFF8, 8) == 0, ""); + EXPECT(Utils::alignDiff(0xFFF0, 16) == 0, ""); + EXPECT(Utils::alignDiff(0xFFF0, 32) == 16, ""); +} +#endif // ASMJIT_TEST + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" diff --git a/src/asmjit/base/intutil.h b/src/asmjit/base/utils.h similarity index 64% rename from src/asmjit/base/intutil.h rename to src/asmjit/base/utils.h index 32b47ce..f9d26a1 100644 --- a/src/asmjit/base/intutil.h +++ b/src/asmjit/base/utils.h @@ -5,8 +5,8 @@ // Zlib - See LICENSE.md file in the package. // [Guard] -#ifndef _ASMJIT_BASE_INTUTIL_H -#define _ASMJIT_BASE_INTUTIL_H +#ifndef _ASMJIT_BASE_UTILS_H +#define _ASMJIT_BASE_UTILS_H // [Dependencies - AsmJit] #include "../base/globals.h" @@ -21,13 +21,32 @@ namespace asmjit { -//! \addtogroup asmjit_base_util +//! \addtogroup asmjit_base //! \{ // ============================================================================ // [asmjit::IntTraits] // ============================================================================ +//! \internal +//! \{ +template +struct IntTraitsPrivate { + // Let it fail if not specialized! +}; + +template<> struct IntTraitsPrivate<1, 0> { typedef int IntType; typedef int8_t SignedType; typedef uint8_t UnsignedType; }; +template<> struct IntTraitsPrivate<1, 1> { typedef int IntType; typedef int8_t SignedType; typedef uint8_t UnsignedType; }; + +template<> struct IntTraitsPrivate<2, 0> { typedef int IntType; typedef int16_t SignedType; typedef uint16_t UnsignedType; }; +template<> struct IntTraitsPrivate<2, 1> { typedef int IntType; typedef int16_t SignedType; typedef uint16_t UnsignedType; }; + +template<> struct IntTraitsPrivate<4, 0> { typedef int64_t IntType; typedef int32_t SignedType; typedef uint32_t UnsignedType; }; +template<> struct IntTraitsPrivate<4, 1> { typedef int IntType; typedef int32_t SignedType; typedef uint32_t UnsignedType; }; + +template<> struct IntTraitsPrivate<8, 0> { typedef int64_t IntType; typedef int64_t SignedType; typedef uint64_t UnsignedType; }; +template<> struct IntTraitsPrivate<8, 1> { typedef int64_t IntType; typedef int64_t SignedType; typedef uint64_t UnsignedType; }; + //! \internal template struct IntTraits { @@ -42,99 +61,87 @@ struct IntTraits { kIsIntPtr = sizeof(T) == sizeof(intptr_t) }; + + typedef typename IntTraitsPrivate::IntType IntType; + typedef typename IntTraitsPrivate::SignedType SignedType; + typedef typename IntTraitsPrivate::UnsignedType UnsignedType; + + //! Get a minimum value of `T`. + static ASMJIT_INLINE T minValue() { + if (kIsSigned) + return static_cast((~static_cast(0) >> 1) + static_cast(1)); + else + return static_cast(0); + } + + //! Get a maximum value of `T`. + static ASMJIT_INLINE T maxValue() { + if (kIsSigned) + return static_cast(~static_cast(0) >> 1); + else + return ~static_cast(0); + } }; -// \internal -template -struct AsInt_ { typedef int64_t Int; }; - -template<> struct AsInt_<1, 0> { typedef int Int; }; -template<> struct AsInt_<1, 1> { typedef int Int; }; -template<> struct AsInt_<2, 0> { typedef int Int; }; -template<> struct AsInt_<2, 1> { typedef int Int; }; -template<> struct AsInt_<4, 1> { typedef int Int; }; - -// \internal -// -// Map an integer `T` to an `int` or `int64_t`, depending on the type. Used -// internally by AsmJit to dispatch an argument of arbitrary integer type into -// a function that accepts either `int` or `int64_t`. -template -struct AsInt { - typedef typename AsInt_::kIsSigned>::Int Int; -}; - -template -ASMJIT_INLINE typename AsInt::Int asInt(T value) { - return static_cast::Int>(value); -} +//! \} // ============================================================================ -// [asmjit::IntUtil] +// [asmjit::Utils] // ============================================================================ -//! Integer utilities. -struct IntUtil { +//! AsmJit utilities - integer, string, etc... +struct Utils { // -------------------------------------------------------------------------- // [Float <-> Int] // -------------------------------------------------------------------------- //! \internal - union Float { + union FloatBits { int32_t i; float f; }; //! \internal - union Double { + union DoubleBits { int64_t i; double d; }; //! Bit-cast `float` to 32-bit integer. - static ASMJIT_INLINE int32_t floatAsInt(float f) { Float m; m.f = f; return m.i; } + static ASMJIT_INLINE int32_t floatAsInt(float f) { FloatBits m; m.f = f; return m.i; } //! Bit-cast 32-bit integer to `float`. - static ASMJIT_INLINE float intAsFloat(int32_t i) { Float m; m.i = i; return m.f; } + static ASMJIT_INLINE float intAsFloat(int32_t i) { FloatBits m; m.i = i; return m.f; } //! Bit-cast `double` to 64-bit integer. - static ASMJIT_INLINE int64_t doubleAsInt(double d) { Double m; m.d = d; return m.i; } + static ASMJIT_INLINE int64_t doubleAsInt(double d) { DoubleBits m; m.d = d; return m.i; } //! Bit-cast 64-bit integer to `double`. - static ASMJIT_INLINE double intAsDouble(int64_t i) { Double m; m.i = i; return m.d; } + static ASMJIT_INLINE double intAsDouble(int64_t i) { DoubleBits m; m.i = i; return m.d; } // -------------------------------------------------------------------------- - // [AsmJit - Pack / Unpack] + // [Pack / Unpack] // -------------------------------------------------------------------------- //! Pack two 8-bit integer and one 16-bit integer into a 32-bit integer as it //! is an array of `{u0,u1,w2}`. static ASMJIT_INLINE uint32_t pack32_2x8_1x16(uint32_t u0, uint32_t u1, uint32_t w2) { -#if defined(ASMJIT_ARCH_LE) - return u0 + (u1 << 8) + (w2 << 16); -#else - return (u0 << 24) + (u1 << 16) + (w2); -#endif + return ASMJIT_ARCH_LE ? u0 + (u1 << 8) + (w2 << 16) + : (u0 << 24) + (u1 << 16) + w2; } //! Pack four 8-bit integer into a 32-bit integer as it is an array of `{u0,u1,u2,u3}`. static ASMJIT_INLINE uint32_t pack32_4x8(uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) { -#if defined(ASMJIT_ARCH_LE) - return u0 + (u1 << 8) + (u2 << 16) + (u3 << 24); -#else - return (u0 << 24) + (u1 << 16) + (u2 << 8) + u3; -#endif + return ASMJIT_ARCH_LE ? u0 + (u1 << 8) + (u2 << 16) + (u3 << 24) + : (u0 << 24) + (u1 << 16) + (u2 << 8) + u3; } //! Pack two 32-bit integer into a 64-bit integer as it is an array of `{u0,u1}`. static ASMJIT_INLINE uint64_t pack64_2x32(uint32_t u0, uint32_t u1) { -#if defined(ASMJIT_ARCH_LE) - return (static_cast(u1) << 32) + u0; -#else - return (static_cast(u0) << 32) + u1; -#endif + return ASMJIT_ARCH_LE ? (static_cast(u1) << 32) + u0 + : (static_cast(u0) << 32) + u1; } // -------------------------------------------------------------------------- - // [AsmJit - Min/Max] + // [Min/Max] // -------------------------------------------------------------------------- // NOTE: Because some environments declare min() and max() as macros, it has @@ -149,83 +156,118 @@ struct IntUtil { static ASMJIT_INLINE T iMax(const T& a, const T& b) { return a > b ? a : b; } // -------------------------------------------------------------------------- - // [AsmJit - MaxUInt] + // [InInterval] // -------------------------------------------------------------------------- - //! Get maximum unsigned value of `T`. + //! Get whether `x` is greater than or equal to `a` and lesses than or equal to `b`. template - static ASMJIT_INLINE T maxUInt() { return ~T(0); } - - // -------------------------------------------------------------------------- - // [AsmJit - InInterval] - // -------------------------------------------------------------------------- - - //! Get whether `x` is greater or equal than `start` and less or equal than `end`. - template - static ASMJIT_INLINE bool inInterval(const T& x, const T& start, const T& end) { - return x >= start && x <= end; + static ASMJIT_INLINE bool inInterval(T x, T a, T b) { + return x >= a && x <= b; } // -------------------------------------------------------------------------- - // [AsmJit - IsInt/IsUInt] + // [AsInt] // -------------------------------------------------------------------------- - //! Get whether the given integer `x` can be casted to 8-bit signed integer. + //! Map an integer `x` of type `T` to an `int` or `int64_t`, depending on the + //! type. Used internally by AsmJit to dispatch an argument that can be an + //! arbitrary integer type into a function that accepts either `int` or + //! `int64_t`. + template + static ASMJIT_INLINE typename IntTraits::IntType asInt(T x) { + return static_cast::IntType>(x); + } + + // -------------------------------------------------------------------------- + // [IsInt / IsUInt] + // -------------------------------------------------------------------------- + + //! Get whether the given integer `x` can be casted to an 8-bit signed integer. template static ASMJIT_INLINE bool isInt8(T x) { + typedef typename IntTraits::SignedType SignedType; + typedef typename IntTraits::UnsignedType UnsignedType; + if (IntTraits::kIsSigned) - return sizeof(T) <= sizeof(int8_t) ? true : x >= T(-128) && x <= T(127); + return sizeof(T) <= 1 || inInterval(SignedType(x), -128, 127); else - return x <= T(127); + return UnsignedType(x) <= UnsignedType(127U); } - //! Get whether the given integer `x` can be casted to 8-bit unsigned integer. - template - static ASMJIT_INLINE bool isUInt8(T x) { - if (IntTraits::kIsSigned) - return x >= T(0) && (sizeof(T) <= sizeof(uint8_t) ? true : x <= T(255)); - else - return sizeof(T) <= sizeof(uint8_t) ? true : x <= T(255); - } - - //! Get whether the given integer `x` can be casted to 16-bit signed integer. + //! Get whether the given integer `x` can be casted to a 16-bit signed integer. template static ASMJIT_INLINE bool isInt16(T x) { + typedef typename IntTraits::SignedType SignedType; + typedef typename IntTraits::UnsignedType UnsignedType; + if (IntTraits::kIsSigned) - return sizeof(T) <= sizeof(int16_t) ? true : x >= T(-32768) && x <= T(32767); + return sizeof(T) <= 2 || inInterval(SignedType(x), -32768, 32767); else - return x >= T(0) && (sizeof(T) <= sizeof(int16_t) ? true : x <= T(32767)); + return sizeof(T) <= 1 || UnsignedType(x) <= UnsignedType(32767U); } - //! Get whether the given integer `x` can be casted to 16-bit unsigned integer. - template - static ASMJIT_INLINE bool isUInt16(T x) { - if (IntTraits::kIsSigned) - return x >= T(0) && (sizeof(T) <= sizeof(uint16_t) ? true : x <= T(65535)); - else - return sizeof(T) <= sizeof(uint16_t) ? true : x <= T(65535); - } - - //! Get whether the given integer `x` can be casted to 32-bit signed integer. + //! Get whether the given integer `x` can be casted to a 32-bit signed integer. template static ASMJIT_INLINE bool isInt32(T x) { + typedef typename IntTraits::SignedType SignedType; + typedef typename IntTraits::UnsignedType UnsignedType; + if (IntTraits::kIsSigned) - return sizeof(T) <= sizeof(int32_t) ? true : x >= T(-2147483647) - 1 && x <= T(2147483647); + return sizeof(T) <= 4 || inInterval(SignedType(x), -2147483647 - 1, 2147483647); else - return x >= T(0) && (sizeof(T) <= sizeof(int32_t) ? true : x <= T(2147483647)); + return sizeof(T) <= 2 || UnsignedType(x) <= UnsignedType(2147483647U); } - //! Get whether the given integer `x` can be casted to 32-bit unsigned integer. + //! Get whether the given integer `x` can be casted to an 8-bit unsigned integer. + template + static ASMJIT_INLINE bool isUInt8(T x) { + typedef typename IntTraits::SignedType SignedType; + typedef typename IntTraits::UnsignedType UnsignedType; + + if (IntTraits::kIsSigned) + return x >= T(0) && (sizeof(T) <= 1 ? true : x <= T(255)); + else + return sizeof(T) <= 1 || UnsignedType(x) <= UnsignedType(255U); + } + + //! Get whether the given integer `x` can be casted to a 12-bit unsigned integer (ARM specific). + template + static ASMJIT_INLINE bool isUInt12(T x) { + typedef typename IntTraits::SignedType SignedType; + typedef typename IntTraits::UnsignedType UnsignedType; + + if (IntTraits::kIsSigned) + return x >= T(0) && (sizeof(T) <= 1 ? true : x <= T(4095)); + else + return sizeof(T) <= 1 || UnsignedType(x) <= UnsignedType(4095U); + } + + //! Get whether the given integer `x` can be casted to a 16-bit unsigned integer. + template + static ASMJIT_INLINE bool isUInt16(T x) { + typedef typename IntTraits::SignedType SignedType; + typedef typename IntTraits::UnsignedType UnsignedType; + + if (IntTraits::kIsSigned) + return x >= T(0) && (sizeof(T) <= 2 ? true : x <= T(65535)); + else + return sizeof(T) <= 2 || UnsignedType(x) <= UnsignedType(65535U); + } + + //! Get whether the given integer `x` can be casted to a 32-bit unsigned integer. template static ASMJIT_INLINE bool isUInt32(T x) { + typedef typename IntTraits::SignedType SignedType; + typedef typename IntTraits::UnsignedType UnsignedType; + if (IntTraits::kIsSigned) - return x >= T(0) && (sizeof(T) <= sizeof(uint32_t) ? true : x <= T(4294967295U)); + return x >= T(0) && (sizeof(T) <= 4 ? true : x <= T(4294967295U)); else - return sizeof(T) <= sizeof(uint32_t) ? true : x <= T(4294967295U); + return sizeof(T) <= 4 || UnsignedType(x) <= UnsignedType(4294967295U); } // -------------------------------------------------------------------------- - // [AsmJit - IsPowerOf2] + // [IsPowerOf2] // -------------------------------------------------------------------------- //! Get whether the `n` value is a power of two (only one bit is set). @@ -235,13 +277,13 @@ struct IntUtil { } // -------------------------------------------------------------------------- - // [AsmJit - Mask] + // [Mask] // -------------------------------------------------------------------------- //! Generate a bit-mask that has `x` bit set. static ASMJIT_INLINE uint32_t mask(uint32_t x) { ASMJIT_ASSERT(x < 32); - return (1U << x); + return static_cast(1) << x; } //! Generate a bit-mask that has `x0` and `x1` bits set. @@ -261,71 +303,63 @@ struct IntUtil { //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3` and `x4` bits set. static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | - mask(x4) ; + return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) ; } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4` and `x5` bits set. static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | - mask(x4) | mask(x5) ; + return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) ; } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5` and `x6` bits set. static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | - mask(x4) | mask(x5) | mask(x6) ; + return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) | mask(x6) ; } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6` and `x7` bits set. static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | - mask(x4) | mask(x5) | mask(x6) | mask(x7) ; + return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) | mask(x6) | mask(x7) ; } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6`, `x7` and `x8` bits set. static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | - mask(x4) | mask(x5) | mask(x6) | mask(x7) | - mask(x8) ; + return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) | mask(x6) | mask(x7) | mask(x8) ; } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6`, `x7`, `x8` and `x9` bits set. static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8, uint32_t x9) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | - mask(x4) | mask(x5) | mask(x6) | mask(x7) | - mask(x8) | mask(x9) ; + return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) | mask(x6) | mask(x7) | mask(x8) | mask(x9) ; } // -------------------------------------------------------------------------- - // [AsmJit - Bits] + // [Bits] // -------------------------------------------------------------------------- //! Generate a bit-mask that has `x` most significant bits set. static ASMJIT_INLINE uint32_t bits(uint32_t x) { - // Shifting more bits that the type has has undefined behavior. Everything - // we need is that application shouldn't crash because of that, but the - // content of register after shift is not defined. So in case that the - // requested shift is too large for the type we correct this undefined - // behavior by setting all bits to ones (this is why we generate an overflow - // mask). - uint32_t overflow = static_cast( - -static_cast(x >= sizeof(uint32_t) * 8)); - + // Shifting more bits that the type has has undefined behavior. Everything + // we need is that application shouldn't crash because of that, but the + // content of register after shift is not defined. So in case that the + // requested shift is too large for the type we correct this undefined + // behavior by setting all bits to ones (this is why we generate an overflow + // mask). + uint32_t overflow = static_cast( + -static_cast(x >= sizeof(uint32_t) * 8)); + return ((static_cast(1) << x) - 1U) | overflow; } // -------------------------------------------------------------------------- - // [AsmJit - HasBit] + // [HasBit] // -------------------------------------------------------------------------- //! Get whether `x` has bit `n` set. static ASMJIT_INLINE bool hasBit(uint32_t x, uint32_t n) { - return static_cast((x >> n) & 0x1); + return (x & (static_cast(1) << n)) != 0; } // -------------------------------------------------------------------------- - // [AsmJit - BitCount] + // [BitCount] // -------------------------------------------------------------------------- //! Get count of bits in `x`. @@ -338,14 +372,14 @@ struct IntUtil { } // -------------------------------------------------------------------------- - // [AsmJit - FindFirstBit] + // [FindFirstBit] // -------------------------------------------------------------------------- //! \internal static ASMJIT_INLINE uint32_t findFirstBitSlow(uint32_t mask) { - // This is a reference (slow) implementation of findFirstBit(), used when - // we don't have compiler support for this task. The implementation speed - // has been improved to check for 2 bits per iteration. + // This is a reference (slow) implementation of `findFirstBit()`, used when + // we don't have a C++ compiler support. The implementation speed has been + // improved to check for 2 bits per iteration. uint32_t i = 1; while (mask != 0) { @@ -375,7 +409,7 @@ struct IntUtil { } // -------------------------------------------------------------------------- - // [AsmJit - Misc] + // [Misc] // -------------------------------------------------------------------------- static ASMJIT_INLINE uint32_t keepNOnesFromRight(uint32_t mask, uint32_t nBits) { @@ -414,7 +448,7 @@ struct IntUtil { } // -------------------------------------------------------------------------- - // [AsmJit - Alignment] + // [Alignment] // -------------------------------------------------------------------------- template @@ -457,9 +491,28 @@ struct IntUtil { //! Get delta required to align `base` to `alignment`. template - static ASMJIT_INLINE T deltaTo(T base, T alignment) { + static ASMJIT_INLINE T alignDiff(T base, T alignment) { return alignTo(base, alignment) - base; } + + // -------------------------------------------------------------------------- + // [String] + // -------------------------------------------------------------------------- + + static ASMJIT_INLINE size_t strLen(const char* s, size_t maxlen) { + size_t i; + for (i = 0; i < maxlen; i++) + if (!s[i]) + break; + return i; + } + + // -------------------------------------------------------------------------- + // [CpuTicks] + // -------------------------------------------------------------------------- + + //! Get the current CPU tick count, used for benchmarking (1ms resolution). + static ASMJIT_API uint32_t getTickCount(); }; // ============================================================================ @@ -523,7 +576,7 @@ union UInt64 { ASMJIT_INLINE UInt64& setPacked_2x32(uint32_t u0, uint32_t u1) { if (kArchHost64Bit) { - u64 = IntUtil::pack64_2x32(u0, u1); + u64 = Utils::pack64_2x32(u0, u1); } else { u32[0] = u0; @@ -713,6 +766,7 @@ union UInt64 { // [Members] // -------------------------------------------------------------------------- + //! 64-bit unsigned value. uint64_t u64; uint32_t u32[2]; @@ -720,7 +774,7 @@ union UInt64 { uint8_t u8[8]; struct { -#if defined(ASMJIT_ARCH_LE) +#if ASMJIT_ARCH_LE uint32_t lo, hi; #else uint32_t hi, lo; @@ -728,6 +782,87 @@ union UInt64 { }; }; +// ============================================================================ +// [asmjit::Lock] +// ============================================================================ + +//! \internal +//! +//! Lock. +struct Lock { + ASMJIT_NO_COPY(Lock) + + // -------------------------------------------------------------------------- + // [Windows] + // -------------------------------------------------------------------------- + +#if ASMJIT_OS_WINDOWS + typedef CRITICAL_SECTION Handle; + + //! Create a new `Lock` instance. + ASMJIT_INLINE Lock() { InitializeCriticalSection(&_handle); } + //! Destroy the `Lock` instance. + ASMJIT_INLINE ~Lock() { DeleteCriticalSection(&_handle); } + + //! Lock. + ASMJIT_INLINE void lock() { EnterCriticalSection(&_handle); } + //! Unlock. + ASMJIT_INLINE void unlock() { LeaveCriticalSection(&_handle); } +#endif // ASMJIT_OS_WINDOWS + + // -------------------------------------------------------------------------- + // [Posix] + // -------------------------------------------------------------------------- + +#if ASMJIT_OS_POSIX + typedef pthread_mutex_t Handle; + + //! Create a new `Lock` instance. + ASMJIT_INLINE Lock() { pthread_mutex_init(&_handle, NULL); } + //! Destroy the `Lock` instance. + ASMJIT_INLINE ~Lock() { pthread_mutex_destroy(&_handle); } + + //! Lock. + ASMJIT_INLINE void lock() { pthread_mutex_lock(&_handle); } + //! Unlock. + ASMJIT_INLINE void unlock() { pthread_mutex_unlock(&_handle); } +#endif // ASMJIT_OS_POSIX + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Native handle. + Handle _handle; +}; + +// ============================================================================ +// [asmjit::AutoLock] +// ============================================================================ + +//! \internal +//! +//! Scoped lock. +struct AutoLock { + ASMJIT_NO_COPY(AutoLock) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Lock `target`, scoped. + ASMJIT_INLINE AutoLock(Lock& target) : _target(target) { _target.lock(); } + //! Unlock `target`. + ASMJIT_INLINE ~AutoLock() { _target.unlock(); } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Reference to the `Lock`. + Lock& _target; +}; + //! \} } // asmjit namespace @@ -736,4 +871,4 @@ union UInt64 { #include "../apiend.h" // [Guard] -#endif // _ASMJIT_BASE_INTUTIL_H +#endif // _ASMJIT_BASE_UTILS_H diff --git a/src/asmjit/base/vectypes.h b/src/asmjit/base/vectypes.h index d675ec2..0e160aa 100644 --- a/src/asmjit/base/vectypes.h +++ b/src/asmjit/base/vectypes.h @@ -16,7 +16,7 @@ namespace asmjit { -//! \addtogroup asmjit_base_util +//! \addtogroup asmjit_base //! \{ // ============================================================================ diff --git a/src/asmjit/base/vmem.cpp b/src/asmjit/base/vmem.cpp index 26f31e5..5eb5408 100644 --- a/src/asmjit/base/vmem.cpp +++ b/src/asmjit/base/vmem.cpp @@ -8,14 +8,11 @@ #define ASMJIT_EXPORTS // [Dependencies - AsmJit] -#include "../base/error.h" #include "../base/globals.h" -#include "../base/intutil.h" -#include "../base/lock.h" #include "../base/vmem.h" // [Dependencies - Posix] -#if defined(ASMJIT_OS_POSIX) +#if ASMJIT_OS_POSIX # include # include # include @@ -70,7 +67,7 @@ namespace asmjit { // ============================================================================ // Windows specific implementation using `VirtualAllocEx` and `VirtualFree`. -#if defined(ASMJIT_OS_WINDOWS) +#if ASMJIT_OS_WINDOWS struct VMemLocal { // AsmJit allows to pass a `NULL` handle to `VMemUtil`. This function is just // a convenient way to convert such handle to the current process one. @@ -91,7 +88,7 @@ static const VMemLocal& vMemGet() { SYSTEM_INFO info; ::GetSystemInfo(&info); - vMem.pageSize = IntUtil::alignToPowerOf2(info.dwPageSize); + vMem.pageSize = Utils::alignToPowerOf2(info.dwPageSize); vMem.pageGranularity = info.dwAllocationGranularity; vMem.hProcess = ::GetCurrentProcess(); @@ -122,7 +119,7 @@ void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* alloc hProcess = vMem.getSafeProcessHandle(hProcess); // VirtualAlloc rounds allocated size to a page size automatically. - size_t mSize = IntUtil::alignTo(length, vMem.pageSize); + size_t mSize = Utils::alignTo(length, vMem.pageSize); // Windows XP SP2 / Vista allow Data Excution Prevention (DEP). DWORD protectFlags = 0; @@ -136,7 +133,7 @@ void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* alloc if (mBase == NULL) return NULL; - ASMJIT_ASSERT(IntUtil::isAligned( + ASMJIT_ASSERT(Utils::isAligned( reinterpret_cast(mBase), vMem.pageSize)); if (allocated != NULL) @@ -161,7 +158,7 @@ Error VMemUtil::releaseProcessMemory(HANDLE hProcess, void* addr, size_t /* leng // ============================================================================ // Posix specific implementation using `mmap` and `munmap`. -#if defined(ASMJIT_OS_POSIX) +#if ASMJIT_OS_POSIX // MacOS uses MAP_ANON instead of MAP_ANONYMOUS. #if !defined(MAP_ANONYMOUS) @@ -180,7 +177,7 @@ static const VMemLocal& vMemGet() { if (!vMem.pageSize) { size_t pageSize = ::getpagesize(); vMem.pageSize = pageSize; - vMem.pageGranularity = IntUtil::iMax(pageSize, 65536); + vMem.pageGranularity = Utils::iMax(pageSize, 65536); } return vMem; @@ -198,7 +195,7 @@ size_t VMemUtil::getPageGranularity() { void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) { const VMemLocal& vMem = vMemGet(); - size_t msize = IntUtil::alignTo(length, vMem.pageSize); + size_t msize = Utils::alignTo(length, vMem.pageSize); int protection = PROT_READ; if (flags & kVMemFlagWritable ) protection |= PROT_WRITE; @@ -430,7 +427,7 @@ struct VMemMgr::PermanentNode { //! Helper to avoid `#ifdef`s in the code. ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSize) { uint32_t flags = kVMemFlagWritable | kVMemFlagExecutable; -#if !defined(ASMJIT_OS_WINDOWS) +#if !ASMJIT_OS_WINDOWS return static_cast(VMemUtil::alloc(size, vSize, flags)); #else return static_cast(VMemUtil::allocProcessMemory(self->_hProcess, size, vSize, flags)); @@ -441,7 +438,7 @@ ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSiz //! //! Helper to avoid `#ifdef`s in the code. ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) { -#if !defined(ASMJIT_OS_WINDOWS) +#if !ASMJIT_OS_WINDOWS return VMemUtil::release(p, vSize); #else return VMemUtil::releaseProcessMemory(self->_hProcess, p, vSize); @@ -717,7 +714,7 @@ static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) { static const size_t permanentAlignment = 32; static const size_t permanentNodeSize = 32768; - vSize = IntUtil::alignTo(vSize, permanentAlignment); + vSize = Utils::alignTo(vSize, permanentAlignment); AutoLock locked(self->_lock); PermanentNode* node = self->_permanent; @@ -772,7 +769,7 @@ static void* vMemMgrAllocFreeable(VMemMgr* self, size_t vSize) { size_t minVSize; // Align to 32 bytes by default. - vSize = IntUtil::alignTo(vSize, 32); + vSize = Utils::alignTo(vSize, 32); if (vSize == 0) return NULL; @@ -923,7 +920,7 @@ static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) { // [asmjit::VMemMgr - Construction / Destruction] // ============================================================================ -#if !defined(ASMJIT_OS_WINDOWS) +#if !ASMJIT_OS_WINDOWS VMemMgr::VMemMgr() #else VMemMgr::VMemMgr(HANDLE hProcess) : diff --git a/src/asmjit/base/vmem.h b/src/asmjit/base/vmem.h index e8019da..dc7f200 100644 --- a/src/asmjit/base/vmem.h +++ b/src/asmjit/base/vmem.h @@ -9,15 +9,14 @@ #define _ASMJIT_BASE_VMEM_H // [Dependencies - AsmJit] -#include "../base/error.h" -#include "../base/lock.h" +#include "../base/utils.h" // [Api-Begin] #include "../apibegin.h" namespace asmjit { -//! \addtogroup asmjit_base_util +//! \addtogroup asmjit_base //! \{ // ============================================================================ @@ -72,21 +71,14 @@ struct VMemUtil { //! executable unless 'canExecute' is true. Returns the address of //! allocated memory, or NULL on failure. static ASMJIT_API void* alloc(size_t length, size_t* allocated, uint32_t flags); - -#if defined(ASMJIT_OS_WINDOWS) - //! Allocate virtual memory of `hProcess`. - //! - //! \note This function is Windows specific. - static ASMJIT_API void* allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags); -#endif // ASMJIT_OS_WINDOWS - //! Free memory allocated by `alloc()`. static ASMJIT_API Error release(void* addr, size_t length); -#if defined(ASMJIT_OS_WINDOWS) - //! Release virtual memory of `hProcess`. - //! - //! \note This function is Windows specific. +#if ASMJIT_OS_WINDOWS + //! Allocate virtual memory of `hProcess` (Windows only). + static ASMJIT_API void* allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags); + + //! Release virtual memory of `hProcess` (Windows only). static ASMJIT_API Error releaseProcessMemory(HANDLE hProcess, void* addr, size_t length); #endif // ASMJIT_OS_WINDOWS }; @@ -102,7 +94,7 @@ struct VMemMgr { // [Construction / Destruction] // -------------------------------------------------------------------------- -#if !defined(ASMJIT_OS_WINDOWS) +#if !ASMJIT_OS_WINDOWS //! Create a `VMemMgr` instance. ASMJIT_API VMemMgr(); #else @@ -128,7 +120,7 @@ struct VMemMgr { // [Accessors] // -------------------------------------------------------------------------- -#if defined(ASMJIT_OS_WINDOWS) +#if ASMJIT_OS_WINDOWS //! Get the handle of the process memory manager is bound to. ASMJIT_INLINE HANDLE getProcessHandle() const { return _hProcess; @@ -189,7 +181,7 @@ struct VMemMgr { // [Members] // -------------------------------------------------------------------------- -#if defined(ASMJIT_OS_WINDOWS) +#if ASMJIT_OS_WINDOWS //! Process passed to `VirtualAllocEx` and `VirtualFree`. HANDLE _hProcess; #endif // ASMJIT_OS_WINDOWS diff --git a/src/asmjit/base/zone.cpp b/src/asmjit/base/zone.cpp index f182392..5c8b08e 100644 --- a/src/asmjit/base/zone.cpp +++ b/src/asmjit/base/zone.cpp @@ -8,7 +8,7 @@ #define ASMJIT_EXPORTS // [Dependencies - AsmJit] -#include "../base/intutil.h" +#include "../base/utils.h" #include "../base/zone.h" // [Dependencies - C] @@ -82,7 +82,7 @@ void Zone::reset(bool releaseMemory) { void* Zone::_alloc(size_t size) { Block* curBlock = _block; - size_t blockSize = IntUtil::iMax(_blockSize, size); + size_t blockSize = Utils::iMax(_blockSize, size); // The `_alloc()` method can only be called if there is not enough space // in the current block, see `alloc()` implementation for more details. diff --git a/src/asmjit/base/zone.h b/src/asmjit/base/zone.h index 39f6253..9c6c627 100644 --- a/src/asmjit/base/zone.h +++ b/src/asmjit/base/zone.h @@ -16,7 +16,7 @@ namespace asmjit { -//! \addtogroup asmjit_base_util +//! \addtogroup asmjit_base //! \{ // ============================================================================ @@ -75,6 +75,15 @@ struct Zone { uint8_t data[sizeof(void*)]; }; + // -------------------------------------------------------------------------- + // [Enums] + // -------------------------------------------------------------------------- + + enum { + //! Zone allocator overhead. + kZoneOverhead = static_cast(sizeof(Block) - sizeof(void*)) + kMemAllocOverhead + }; + // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- @@ -205,11 +214,6 @@ struct Zone { size_t _blockSize; }; -enum { - //! Zone allocator overhead. - kZoneOverhead = static_cast(sizeof(Zone::Block) - sizeof(void*)) + kMemAllocOverhead -}; - //! \} } // asmjit namespace diff --git a/src/asmjit/build.h b/src/asmjit/build.h index e6c4f81..9495001 100644 --- a/src/asmjit/build.h +++ b/src/asmjit/build.h @@ -8,114 +8,770 @@ #ifndef _ASMJIT_BUILD_H #define _ASMJIT_BUILD_H -// [Config] +// ============================================================================ +// [asmjit::Build - Configuration] +// ============================================================================ + +// AsmJit is by default compiled only for a host processor for the purpose of +// JIT code generation. Both Assembler and Compiler code generators are compiled +// by default. Preprocessor macros can be used to change the default behavior. + +// External Config File +// -------------------- +// +// Define in case your configuration is generated in an external file to be +// included. + #if defined(ASMJIT_CONFIG_FILE) # include ASMJIT_CONFIG_FILE -#else -# include "./config.h" #endif // ASMJIT_CONFIG_FILE -// [MSC - Turn off deprecation warnings when compiling AsmJit] -#if defined(ASMJIT_EXPORTS) && defined(_MSC_VER) +// AsmJit Static Builds and Embedding +// ---------------------------------- +// +// These definitions can be used to enable static library build. Embed is used +// when AsmJit's source code is embedded directly in another project, implies +// static build as well. +// +// #define ASMJIT_EMBED // Asmjit is embedded (implies ASMJIT_STATIC). +// #define ASMJIT_STATIC // Define to enable static-library build. + +// AsmJit Build Modes +// ------------------ +// +// These definitions control the build mode and tracing support. The build mode +// should be auto-detected at compile time, but it's possible to override it in +// case that the auto-detection fails. +// +// Tracing is a feature that is never compiled by default and it's only used to +// debug AsmJit itself. +// +// #define ASMJIT_DEBUG // Define to enable debug-mode. +// #define ASMJIT_RELEASE // Define to enable release-mode. +// #define ASMJIT_TRACE // Define to enable tracing. + +// AsmJit Build Backends +// --------------------- +// +// These definitions control which backends to compile. If none of these is +// defined AsmJit will use host architecture by default (for JIT code generation). +// +// #define ASMJIT_BUILD_X86 // Define to enable x86 instruction set (32-bit). +// #define ASMJIT_BUILD_X64 // Define to enable x64 instruction set (64-bit). +// #define ASMJIT_BUILD_HOST // Define to enable host instruction set. + +// AsmJit Build Features +// --------------------- +// +// Flags can be defined to disable standard features. These are handy especially +// when building asmjit statically and some features are not needed or unwanted +// (like Compiler). +// +// AsmJit features are enabled by default. +// #define ASMJIT_DISABLE_COMPILER // Disable Compiler (completely). +// #define ASMJIT_DISABLE_LOGGER // Disable Logger (completely). +// #define ASMJIT_DISABLE_NAMES // Disable everything that uses strings +// // (instruction names, error names, ...). + +// Prevent compile-time errors caused by misconfiguration. +#if defined(ASMJIT_DISABLE_NAMES) && !defined(ASMJIT_DISABLE_LOGGER) +# error "[asmjit] ASMJIT_DISABLE_NAMES requires ASMJIT_DISABLE_LOGGER to be defined." +#endif // ASMJIT_DISABLE_NAMES && !ASMJIT_DISABLE_LOGGER + +// Detect ASMJIT_DEBUG and ASMJIT_RELEASE if not forced from outside. +#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE) && !defined(NDEBUG) +# define ASMJIT_DEBUG +#else +# define ASMJIT_RELEASE +#endif + +// ASMJIT_EMBED implies ASMJIT_STATIC. +#if defined(ASMJIT_EMBED) && !defined(ASMJIT_STATIC) +# define ASMJIT_STATIC +#endif + +// ============================================================================ +// [asmjit::Build - Version] +// ============================================================================ + +// [@VERSION{@] +#define ASMJIT_VERSION_MAJOR 1 +#define ASMJIT_VERSION_MINOR 0 +#define ASMJIT_VERSION_PATCH 0 +#define ASMJIT_VERSION_STRING "1.0.0" +// [@VERSION}@] + +// ============================================================================ +// [asmjit::Build - CxxTool] +// ============================================================================ + +// [@WIN32_CRT_NO_DEPRECATE{@] +#if defined(_MSC_VER) && defined(ASMJIT_EXPORTS) # if !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE -# endif // !_CRT_SECURE_NO_DEPRECATE +# endif # if !defined(_CRT_SECURE_NO_WARNINGS) # define _CRT_SECURE_NO_WARNINGS -# endif // !_CRT_SECURE_NO_WARNINGS -#endif // ASMJIT_EXPORTS +# endif +#endif +// [@WIN32_CRT_NO_DEPRECATE}@] -// [Dependencies - C] +#include #include #include #include #include -// [Dependencies - C++] -#include +// [@WIN32_LEAN_AND_MEAN{@] +#if (defined(_WIN32) || defined(_WINDOWS)) && !defined(_WINDOWS_) +# if !defined(WIN32_LEAN_AND_MEAN) +# define WIN32_LEAN_AND_MEAN +# define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN +# endif +# if !defined(NOMINMAX) +# define NOMINMAX +# define ASMJIT_UNDEF_NOMINMAX +# endif +# include +# if defined(ASMJIT_UNDEF_NOMINMAX) +# undef NOMINMAX +# undef ASMJIT_UNDEF_NOMINMAX +# endif +# if defined(ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN) +# undef WIN32_LEAN_AND_MEAN +# undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN +# endif +#endif +// [@WIN32_LEAN_AND_MEAN}@] -// ============================================================================ -// [asmjit::Build - Sanity] -// ============================================================================ - -#if defined(ASMJIT_DISABLE_NAMES) && !defined(ASMJIT_DISABLE_LOGGER) -# error "ASMJIT_DISABLE_NAMES requires ASMJIT_DISABLE_LOGGER to be defined." -#endif // ASMJIT_DISABLE_NAMES && !ASMJIT_DISABLE_LOGGER - -// ============================================================================ -// [asmjit::Build - OS] -// ============================================================================ - -#if defined(_WINDOWS) || defined(__WINDOWS__) || defined(_WIN32) || defined(_WIN64) -# define ASMJIT_OS_WINDOWS -#elif defined(__linux) || defined(__linux__) -# define ASMJIT_OS_POSIX -# define ASMJIT_OS_LINUX -#elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) -# define ASMJIT_OS_POSIX -# define ASMJIT_OS_BSD -#elif defined(__APPLE__) -# define ASMJIT_OS_POSIX -# define ASMJIT_OS_MAC -#elif defined(__CYGWIN__) -# define ASMJIT_OS_POSIX -# define ASMJIT_OS_CYGWIN +// [@OS{@] +#if defined(_WIN32) || defined(_WINDOWS) +#define ASMJIT_OS_WINDOWS (1) #else -# pragma message("AsmJit - Unable to detect host operating system, using ASMJIT_OS_POSIX.") -# define ASMJIT_OS_POSIX +#define ASMJIT_OS_WINDOWS (0) #endif -// ============================================================================ -// [asmjit::Build - Arch] -// ============================================================================ - -// [X64] -#if defined(__x86_64__) || \ - defined(__amd64__ ) || \ - defined(__LP64 ) || \ - defined(_M_X64 ) || \ - defined(_M_AMD64 ) || \ - defined(_WIN64 ) - -# define ASMJIT_ARCH_X64 -# define ASMJIT_ARCH_LE -# define ASMJIT_ARCH_UNALIGNED_16 -# define ASMJIT_ARCH_UNALIGNED_32 -# define ASMJIT_ARCH_UNALIGNED_64 - -// [X86] -#elif \ - defined(_M_IX86 ) || \ - defined(__INTEL__) || \ - defined(__i386__ ) - -# define ASMJIT_ARCH_X86 -# define ASMJIT_ARCH_LE -# define ASMJIT_ARCH_UNALIGNED_16 -# define ASMJIT_ARCH_UNALIGNED_32 -# define ASMJIT_ARCH_UNALIGNED_64 - -// [Arm] -#elif \ - defined(_ARM ) || \ - defined(_M_ARM_FP ) || \ - defined(__ARM_NEON__ ) || \ - defined(__arm ) || \ - defined(__arm__ ) || \ - defined(__TARGET_ARCH_ARM ) || \ - defined(__TARGET_ARCH_THUMB) || \ - defined(__thumb__ ) - -# define ASMJIT_ARCH_ARM -# define ASMJIT_ARCH_LE - -// [Unknown] +#if defined(__APPLE__) +# include +# define ASMJIT_OS_MAC (TARGET_OS_MAC) +# define ASMJIT_OS_IOS (TARGET_OS_IPHONE) #else -# error "AsmJit - Unable to detect host architecture." +# define ASMJIT_OS_MAC (0) +# define ASMJIT_OS_IOS (0) #endif +#if defined(__ANDROID__) +# define ASMJIT_OS_ANDROID (1) +#else +# define ASMJIT_OS_ANDROID (0) +#endif + +#if defined(__linux__) || defined(__ANDROID__) +# define ASMJIT_OS_LINUX (1) +#else +# define ASMJIT_OS_LINUX (0) +#endif + +#if defined(__DragonFly__) +# define ASMJIT_OS_DRAGONFLYBSD (1) +#else +# define ASMJIT_OS_DRAGONFLYBSD (0) +#endif + +#if defined(__FreeBSD__) +# define ASMJIT_OS_FREEBSD (1) +#else +# define ASMJIT_OS_FREEBSD (0) +#endif + +#if defined(__NetBSD__) +# define ASMJIT_OS_NETBSD (1) +#else +# define ASMJIT_OS_NETBSD (0) +#endif + +#if defined(__OpenBSD__) +# define ASMJIT_OS_OPENBSD (1) +#else +# define ASMJIT_OS_OPENBSD (0) +#endif + +#if defined(__QNXNTO__) +# define ASMJIT_OS_QNX (1) +#else +# define ASMJIT_OS_QNX (0) +#endif + +#if defined(__sun) +# define ASMJIT_OS_SOLARIS (1) +#else +# define ASMJIT_OS_SOLARIS (0) +#endif + +#if defined(__CYGWIN__) +# define ASMJIT_OS_CYGWIN (1) +#else +# define ASMJIT_OS_CYGWIN (0) +#endif + +#define ASMJIT_OS_BSD ( \ + ASMJIT_OS_FREEBSD || \ + ASMJIT_OS_DRAGONFLYBSD || \ + ASMJIT_OS_NETBSD || \ + ASMJIT_OS_OPENBSD || \ + ASMJIT_OS_MAC) +#define ASMJIT_OS_POSIX (!ASMJIT_OS_WINDOWS) +// [@OS}@] + +#if ASMJIT_OS_POSIX +# include +#endif // ASMJIT_OS_POSIX + +// [@ARCH{@] +// \def ASMJIT_ARCH_ARM +// Defined if the target architecture is a 32-bit ARM. +// +// \def ASMJIT_ARCH_ARM64 +// Defined if the target architecture is a 64-bit ARM. +// +// \def ASMJIT_ARCH_X64 +// Defined if the target architecture is a 64-bit X64/AMD64 +// +// \def ASMJIT_ARCH_X86 +// Defined if the target architecture is a 32-bit X86/IA32 +#if (defined(_M_X64 ) || defined(__x86_64) || defined(__x86_64__) || \ + defined(_M_AMD64) || defined(__amd64 ) || defined(__amd64__ )) +# define ASMJIT_ARCH_X64 (1) +#else +# define ASMJIT_ARCH_X64 (0) +#endif +#if (defined(_M_IX86 ) || defined(__X86__ ) || defined(__i386 ) || \ + defined(__IA32__) || defined(__I86__ ) || defined(__i386__) || \ + defined(__i486__) || defined(__i586__) || defined(__i686__)) +# define ASMJIT_ARCH_X86 (!ASMJIT_ARCH_X64) +#else +# define ASMJIT_ARCH_X86 (0) +#endif +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 +# define ASMJIT_ARCH_64BIT (ASMJIT_ARCH_X64) +# define ASMJIT_ARCH_BE (0) +# define ASMJIT_ARCH_LE (1) +# define ASMJIT_ARCH_UNALIGNED_16 (1) +# define ASMJIT_ARCH_UNALIGNED_32 (1) +# define ASMJIT_ARCH_UNALIGNED_64 (1) +# if !defined(ASMJIT_ARCH_MMX) && (!ASMJIT_ARCH_X64 && (defined(__MMX__) || defined(__i686__))) +# define ASMJIT_ARCH_MMX (1) +# endif +# if !defined(ASMJIT_ARCH_SSE) && (ASMJIT_ARCH_X64 || (defined(_M_IX86_FP) && _M_IX86_FP >= 1) || defined(__SSE__)) +# define ASMJIT_ARCH_SSE (1) +# endif +# if !defined(ASMJIT_ARCH_SSE2) && (ASMJIT_ARCH_X64 || (defined(_M_IX86_FP) && _M_IX86_FP >= 2) || defined(__SSE2__)) +# define ASMJIT_ARCH_SSE2 (1) +# endif +# if !defined(ASMJIT_ARCH_SSE3) && (defined(__SSE3__)) +# define ASMJIT_ARCH_SSE3 (1) +# endif +# if !defined(ASMJIT_ARCH_SSSE3) && (defined(__SSSE3__)) +# define ASMJIT_ARCH_SSSE3 (1) +# endif +# if !defined(ASMJIT_ARCH_SSE4_1) && (defined(__SSE4_1__)) +# define ASMJIT_ARCH_SSE4_1 (1) +# endif +# if !defined(ASMJIT_ARCH_SSE4_2) && (defined(__SSE4_2__)) +# define ASMJIT_ARCH_SSE4_2 (1) +# endif +# if !defined(ASMJIT_ARCH_AVX) && (defined(__AVX__)) +# define ASMJIT_ARCH_AVX (1) +# endif +# if !defined(ASMJIT_ARCH_AVX2) && (defined(__AVX2__)) +# define ASMJIT_ARCH_AVX2 (1) +# endif +#endif +#if !defined(ASMJIT_ARCH_AVX2) +# define ASMJIT_ARCH_AVX2 (0) +#endif +#if !defined(ASMJIT_ARCH_AVX) +# define ASMJIT_ARCH_AVX (ASMJIT_ARCH_AVX2) +#endif +#if !defined(ASMJIT_ARCH_SSE4_2) +# define ASMJIT_ARCH_SSE4_2 (ASMJIT_ARCH_AVX) +#endif +#if !defined(ASMJIT_ARCH_SSE4_1) +# define ASMJIT_ARCH_SSE4_1 (ASMJIT_ARCH_SSE4_2) +#endif +#if !defined(ASMJIT_ARCH_SSSE3) +# define ASMJIT_ARCH_SSSE3 (ASMJIT_ARCH_SSE4_1) +#endif +#if !defined(ASMJIT_ARCH_SSE3) +# define ASMJIT_ARCH_SSE3 (ASMJIT_ARCH_SSSE3) +#endif +#if !defined(ASMJIT_ARCH_SSE2) +# define ASMJIT_ARCH_SSE2 (ASMJIT_ARCH_SSE3) +#endif +#if !defined(ASMJIT_ARCH_SSE) +# define ASMJIT_ARCH_SSE (ASMJIT_ARCH_SSE2) +#endif +#if !defined(ASMJIT_ARCH_MMX) +# define ASMJIT_ARCH_MMX (0) +#endif + +#if (defined(_M_ARM ) || defined(__arm__ ) || defined(__arm) || \ + defined(_M_ARMT ) || defined(__thumb__)) +# define ASMJIT_ARCH_ARM (1) +# define ASMJIT_ARCH_ARM64 (0) +#else +# define ASMJIT_ARCH_ARM (0) +# define ASMJIT_ARCH_ARM64 (0) +#endif +#if ASMJIT_ARCH_ARM || ASMJIT_ARCH_ARM64 +# define ASMJIT_ARCH_64BIT (ASMJIT_ARCH_ARM64) +# define ASMJIT_ARCH_BE (0) +# define ASMJIT_ARCH_LE (1) +# define ASMJIT_ARCH_UNALIGNED_16 (0) +# define ASMJIT_ARCH_UNALIGNED_32 (0) +# define ASMJIT_ARCH_UNALIGNED_64 (0) +# if !defined(ASMJIT_ARCH_NEON) && defined(__ARM_NEON__) +# define ASMJIT_ARCH_NEON (1) +# endif +#endif +#if !defined(ASMJIT_ARCH_NEON) +# define ASMJIT_ARCH_NEON (0) +#endif +// [@ARCH}@] + +// [@ARCH_INCLUDE{@] +#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__) +# include +#endif + +#if ASMJIT_ARCH_SSE +# include +#endif +#if ASMJIT_ARCH_SSE2 +# include +#endif +#if ASMJIT_ARCH_SSE3 && !defined(_MSC_VER) +# include +#endif +#if ASMJIT_ARCH_SSSE3 +# include +#endif +#if ASMJIT_ARCH_SSE4_1 +# include +#endif +#if ASMJIT_ARCH_SSE4_2 +# include +#endif +#if ASMJIT_ARCH_AVX || ASMJIT_ARCH_AVX2 +# include +#endif + +#if ASMJIT_ARCH_NEON +# include +#endif +// [@ARCH_INCLUDE}@] + +// [@CC{@] +// \def ASMJIT_CC_CODEGEAR +// Defined if the detected C++ compiler is CodeGear or Borland (defined to __CODEGEARC__ or __BORLANDC__ version). +// +// \def ASMJIT_CC_CLANG +// Defined if the detected C++ compiler is CLANG (defined to __clang__ version). +// +// \def ASMJIT_CC_GCC +// Defined if the detected C++ compiler is GCC (defined to __GNUC__ value). +// +// \def ASMJIT_CC_MSC +// Defined if the detected C++ compiler is MSC (defined to _MSC_VER version). +// +// \def ASMJIT_CC_HAS_NATIVE_CHAR +// Defined if the C++ compiler treats char as a native type. +// +// \def ASMJIT_CC_HAS_NATIVE_WCHAR_T +// Defined if the C++ compiler treats wchar_t as a native type. +// +// \def ASMJIT_CC_HAS_NATIVE_CHAR16_T +// Defined if the C++ compiler treats char16_t as a native type. +// +// \def ASMJIT_CC_HAS_NATIVE_CHAR32_T +// Defined if the C++ compiler treats char32_t as a native type. +// +// \def ASMJIT_CC_HAS_OVERRIDE +// Defined if the C++ compiler supports override keyword. +// +// \def ASMJIT_CC_HAS_NOEXCEPT +// Defined if the C++ compiler supports noexcept keyword. +#define ASMJIT_CC_CLANG (0) +#define ASMJIT_CC_CODEGEAR (0) +#define ASMJIT_CC_GCC (0) +#define ASMJIT_CC_MSC (0) + +#if defined(__BORLANDC__) || defined(__CODEGEARC__) +# undef ASMJIT_CC_CODEGEAR +# if defined(__CODEGEARC__) +# define ASMJIT_CC_CODEGEAR (__CODEGEARC__) +# else +# define ASMJIT_CC_CODEGEAR (__BORLANDC__) +# endif +# define ASMJIT_CC_HAS_ASSUME (0) +# define ASMJIT_CC_HAS_ATTRIBUTE (0) +# define ASMJIT_CC_HAS_BUILTIN (0) +# define ASMJIT_CC_HAS_DECLSPEC (1) +# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_CODEGEAR >= 0x0610) +# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0) +# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0) +# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_CODEGEAR >= 0x0610) +# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_CODEGEAR >= 0x0610) +# define ASMJIT_CC_HAS_NATIVE_CHAR (1) +# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) +# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_CODEGEAR >= 0x0610) +# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_CODEGEAR >= 0x0610) +#elif defined(__clang__) && defined(__clang_minor__) +# undef ASMJIT_CC_CLANG +# define ASMJIT_CC_CLANG (__clang__) +# define ASMJIT_CC_CLANG_VERSION_EQ(x, y, z) (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__ >= x * 10000 + y * 100 + z) +# define ASMJIT_CC_CLANG_VERSION_GE(x, y, z) (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__ >= x * 10000 + y * 100 + z) +# define ASMJIT_CC_HAS_ASSUME (0) +# define ASMJIT_CC_HAS_ATTRIBUTE (1) +# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (__has_attribute(__aligned__)) +# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(__always_inline__)) +# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (__has_attribute(__noinline__)) +# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (__has_attribute(__noreturn__)) +# define ASMJIT_CC_HAS_BUILTIN (1) +# define ASMJIT_CC_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume)) +# define ASMJIT_CC_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect)) +# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable)) +# define ASMJIT_CC_HAS_CONSTEXPR (__has_extension(__cxx_constexpr__)) +# define ASMJIT_CC_HAS_DECLSPEC (0) +# define ASMJIT_CC_HAS_DECLTYPE (__has_extension(__cxx_decltype__)) +# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (__has_extension(__cxx_defaulted_functions__)) +# define ASMJIT_CC_HAS_DELETE_FUNCTION (__has_extension(__cxx_deleted_functions__)) +# define ASMJIT_CC_HAS_FINAL (__has_extension(__cxx_override_control__)) +# define ASMJIT_CC_HAS_INITIALIZER_LIST (__has_extension(__cxx_generalized_initializers__)) +# define ASMJIT_CC_HAS_LAMBDA (__has_extension(__cxx_lambdas__)) +# define ASMJIT_CC_HAS_NATIVE_CHAR (1) +# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (__has_extension(__cxx_unicode_literals__)) +# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (__has_extension(__cxx_unicode_literals__)) +# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) +# define ASMJIT_CC_HAS_NOEXCEPT (__has_extension(__cxx_noexcept__)) +# define ASMJIT_CC_HAS_NULLPTR (__has_extension(__cxx_nullptr__)) +# define ASMJIT_CC_HAS_OVERRIDE (__has_extension(__cxx_override_control__)) +# define ASMJIT_CC_HAS_RVALUE (__has_extension(__cxx_rvalue_references__)) +# define ASMJIT_CC_HAS_STATIC_ASSERT (__has_extension(__cxx_static_assert__)) +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) +# undef ASMJIT_CC_GCC +# define ASMJIT_CC_GCC (__GNUC__) +# define ASMJIT_CC_GCC_VERSION_EQ(x, y, z) (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ >= x * 10000 + y * 100 + z) +# define ASMJIT_CC_GCC_VERSION_GE(x, y, z) (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ >= x * 10000 + y * 100 + z) +# if defined(__GXX_EXPERIMENTAL_CXX0X__) +# define ASMJIT_CC_GCC_CXX0X (1) +# else +# define ASMJIT_CC_GCC_CXX0X (0) +# endif +# define ASMJIT_CC_HAS_ASSUME (0) +# define ASMJIT_CC_HAS_ATTRIBUTE (1) +# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_GCC_VERSION_GE(2, 7, 0)) +# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_GCC_VERSION_GE(4, 4, 0) && !defined(__MINGW32__)) +# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_GCC_VERSION_GE(3, 4, 0) && !defined(__MINGW32__)) +# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_GCC_VERSION_GE(2, 5, 0)) +# define ASMJIT_CC_HAS_BUILTIN (1) +# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0) +# define ASMJIT_CC_HAS_BUILTIN_EXPECT (1) +# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (ASMJIT_CC_GCC_VERSION_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_GCC_VERSION_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_DECLSPEC (0) +# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_GCC_VERSION_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_GCC_VERSION_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_GCC_VERSION_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_GCC_VERSION_GE(4, 7, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_GCC_VERSION_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_GCC_VERSION_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_NATIVE_CHAR (1) +# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_GCC_VERSION_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_GCC_VERSION_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) +# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_GCC_VERSION_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_GCC_VERSION_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_GCC_VERSION_GE(4, 7, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_GCC_VERSION_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X) +# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_GCC_VERSION_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X) +#elif defined(_MSC_VER) +# undef ASMJIT_CC_MSC +# define ASMJIT_CC_MSC (_MSC_VER) +# define ASMJIT_CC_HAS_ASSUME (1) +# define ASMJIT_CC_HAS_ATTRIBUTE (0) +# define ASMJIT_CC_HAS_BUILTIN (0) +# define ASMJIT_CC_HAS_CONSTEXPR (0) +# define ASMJIT_CC_HAS_DECLSPEC (1) +# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (1) +# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (1) +# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (1) +# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (1) +# define ASMJIT_CC_HAS_DECLTYPE (_MSC_VER >= 1600) +# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (_MSC_VER >= 1800) +# define ASMJIT_CC_HAS_DELETE_FUNCTION (_MSC_VER >= 1800) +# define ASMJIT_CC_HAS_FINAL (_MSC_VER >= 1400) +# define ASMJIT_CC_HAS_INITIALIZER_LIST (_MSC_VER >= 1800) +# define ASMJIT_CC_HAS_LAMBDA (_MSC_VER >= 1600) +# define ASMJIT_CC_HAS_NATIVE_CHAR (1) +# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (0) +# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (0) +# if defined(_NATIVE_WCHAR_T_DEFINED) +# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1) +# else +# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (0) +# endif +# define ASMJIT_CC_HAS_NOEXCEPT (_MSC_FULL_VER >= 180021114) +# define ASMJIT_CC_HAS_NULLPTR (_MSC_VER >= 1600) +# define ASMJIT_CC_HAS_OVERRIDE (_MSC_VER >= 1400) +# define ASMJIT_CC_HAS_RVALUE (_MSC_VER >= 1600) +# define ASMJIT_CC_HAS_STATIC_ASSERT (_MSC_VER >= 1600) +#else +# error "[asmjit] Unable to detect the C/C++ compiler." +#endif + +#if !defined(ASMJIT_CC_GCC_VERSION_EQ) +# define ASMJIT_CC_GCC_VERSION_EQ(x, y, z) (0) +# define ASMJIT_CC_GCC_VERSION_GE(x, y, z) (0) +#endif + +#if !defined(ASMJIT_CC_CLANG_VERSION_EQ) +# define ASMJIT_CC_CLANG_VERSION_EQ(x, y, z) (0) +# define ASMJIT_CC_CLANG_VERSION_GE(x, y, z) (0) +#endif + +#if !ASMJIT_CC_HAS_ATTRIBUTE +# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (0) +# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (0) +# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (0) +# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (0) +#endif + +#if !ASMJIT_CC_HAS_BUILTIN +# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0) +# define ASMJIT_CC_HAS_BUILTIN_EXPECT (0) +# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (0) +#endif + +#if !ASMJIT_CC_HAS_DECLSPEC +# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (0) +# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0) +# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0) +# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (0) +#endif +// [@CC}@] + +// [@CC_API{@] +// \def ASMJIT_API +// The decorated function is asmjit API and should be exported. +#if !defined(ASMJIT_API) +# if defined(ASMJIT_STATIC) +# define ASMJIT_API +# elif ASMJIT_OS_WINDOWS +# if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !defined(__MINGW32__) +# if defined(ASMJIT_EXPORTS) +# define ASMJIT_API __attribute__((__dllexport__)) +# else +# define ASMJIT_API __attribute__((__dllimport__)) +# endif +# else +# if defined(ASMJIT_EXPORTS) +# define ASMJIT_API __declspec(dllexport) +# else +# define ASMJIT_API __declspec(dllimport) +# endif +# endif +# else +# if ASMJIT_CC_CLANG || ASMJIT_CC_GCC_VERSION_GE(4, 0, 0) +# define ASMJIT_API __attribute__((__visibility__("default"))) +# endif +# endif +#endif +// [@CC_API}@] + +// [@CC_VARAPI{@] +// \def ASMJIT_VARAPI +// The decorated variable is part of asmjit API and is exported. +#if !defined(ASMJIT_VARAPI) +# define ASMJIT_VARAPI extern ASMJIT_API +#endif +// [@CC_VARAPI}@] + +// [@CC_VIRTAPI{@] +// \def ASMJIT_VIRTAPI +// The decorated class has a virtual table and is part of asmjit API. +// +// This is basically a workaround. When using MSVC and marking class as DLL +// export everything gets exported, which is unwanted in most projects. MSVC +// automatically exports typeinfo and vtable if at least one symbol of the +// class is exported. However, GCC has some strange behavior that even if +// one or more symbol is exported it doesn't export typeinfo unless the +// class itself is decorated with "visibility(default)" (i.e. asmjit_API). +#if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_OS_WINDOWS +# define ASMJIT_VIRTAPI ASMJIT_API +#else +# define ASMJIT_VIRTAPI +#endif +// [@CC_VIRTAPI}@] + +// [@CC_INLINE{@] +// \def ASMJIT_INLINE +// Always inline the decorated function. +#if ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE && ASMJIT_CC_CLANG +# define ASMJIT_INLINE inline __attribute__((__always_inline__, __visibility__("hidden"))) +#elif ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE +# define ASMJIT_INLINE inline __attribute__((__always_inline__)) +#elif ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE +# define ASMJIT_INLINE __forceinline +#else +# define ASMJIT_INLINE inline +#endif +// [@CC_INLINE}@] + +// [@CC_NOINLINE{@] +// \def ASMJIT_NOINLINE +// Never inline the decorated function. +#if ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE +# define ASMJIT_NOINLINE __attribute__((__noinline__)) +#elif ASMJIT_CC_HAS_DECLSPEC_NOINLINE +# define ASMJIT_NOINLINE __declspec(noinline) +#else +# define ASMJIT_NOINLINE +#endif +// [@CC_NOINLINE}@] + +// [@CC_CDECL{@] +// \def ASMJIT_CDECL +// Standard C function calling convention decorator (__cdecl). +#if ASMJIT_ARCH_X86 +# if ASMJIT_CC_HAS_ATTRIBUTE +# define ASMJIT_CDECL __attribute__((__cdecl__)) +# else +# define ASMJIT_CDECL __cdecl +# endif +#else +# define ASMJIT_CDECL +#endif +// [@CC_CDECL}@] + +// [@CC_STDCALL{@] +// \def ASMJIT_STDCALL +// StdCall function calling convention decorator (__stdcall). +#if ASMJIT_ARCH_X86 +# if ASMJIT_CC_HAS_ATTRIBUTE +# define ASMJIT_STDCALL __attribute__((__stdcall__)) +# else +# define ASMJIT_STDCALL __stdcall +# endif +#else +# define ASMJIT_STDCALL +#endif +// [@CC_STDCALL}@] + +// [@CC_FASTCALL{@] +// \def ASMJIT_FASTCALL +// FastCall function calling convention decorator (__fastcall). +#if ASMJIT_ARCH_X86 +# if ASMJIT_CC_HAS_ATTRIBUTE +# define ASMJIT_FASTCALL __attribute__((__fastcall__)) +# else +# define ASMJIT_FASTCALL __fastcall +# endif +#else +# define ASMJIT_FASTCALL +#endif +// [@CC_FASTCALL}@] + +// [@CC_REGPARM{@] +// \def ASMJIT_REGPARM(n) +// A custom calling convention which passes n arguments in registers. +#if ASMJIT_ARCH_X86 && (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) +# define ASMJIT_REGPARM(n) __attribute__((__regparm__(n))) +#else +# define ASMJIT_REGPARM(n) +#endif +// [@CC_REGPARM}@] + +// [@CC_NOP{@] +// \def ASMJIT_NOP +// No operation. +#if !defined(ASMJIT_NOP) +# define ASMJIT_NOP ((void)0) +#endif +// [@CC_NOP}@] + +// [@CC_EXPECT{@] +// \def ASMJIT_LIKELY(exp) +// Expression exp is likely to be true. +// +// \def ASMJIT_UNLIKELY(exp) +// Expression exp is likely to be false. +#if ASMJIT_HAS_BUILTIN_EXPECT +# define ASMJIT_LIKELY(exp) __builtin_expect(!!(exp), 1) +# define ASMJIT_UNLIKELY(exp) __builtin_expect(!!(exp), 0) +#else +# define ASMJIT_LIKELY(exp) exp +# define ASMJIT_UNLIKELY(exp) exp +#endif +// [@CC_EXPECT}@] + +// [@CC_UNUSED{@] +// \def ASMJIT_UNUSED(x) +// Mark a variable x as unused. +#define ASMJIT_UNUSED(x) (void)(x) +// [@CC_UNUSED}@] + +// [@CC_OFFSET_OF{@] +// \def ASMJIT_OFFSET_OF(x, y). +// Get the offset of a member y of a struct x at compile-time. +#define ASMJIT_OFFSET_OF(x, y) ((int)(intptr_t)((const char*)&((const x*)0x1)->y) - 1) +// [@CC_OFFSET_OF}@] + +// [@CC_ARRAY_SIZE{@] +// \def ASMJIT_ARRAY_SIZE(x) +// Get the array size of x at compile-time. +#define ASMJIT_ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) +// [@CC_ARRAY_SIZE}@] + +// [@STDTYPES{@] +#if defined(__MINGW32__) || defined(__MINGW64__) +# include +#endif +#if defined(_MSC_VER) && (_MSC_VER < 1600) +# include +# if !defined(ASMJIT_SUPPRESS_STD_TYPES) +# if (_MSC_VER < 1300) +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; +typedef signed __int64 int64_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; +# else +typedef __int8 int8_t; +typedef __int16 int16_t; +typedef __int32 int32_t; +typedef __int64 int64_t; +typedef unsigned __int8 uint8_t; +typedef unsigned __int16 uint16_t; +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +# endif +# endif +# define ASMJIT_INT64_C(x) (x##i64) +# define ASMJIT_UINT64_C(x) (x##ui64) +#else +# include +# include +# define ASMJIT_INT64_C(x) (x##ll) +# define ASMJIT_UINT64_C(x) (x##ull) +#endif +// [@STDTYPES}@] + // ============================================================================ -// [asmjit::Build - Build] +// [asmjit::Build - Additional] // ============================================================================ // Build host architecture if no architecture is selected. @@ -127,104 +783,25 @@ // Autodetect host architecture if enabled. #if defined(ASMJIT_BUILD_HOST) -# if defined(ASMJIT_ARCH_X86) && !defined(ASMJIT_BUILD_X86) +# if ASMJIT_ARCH_X86 && !defined(ASMJIT_BUILD_X86) # define ASMJIT_BUILD_X86 # endif // ASMJIT_ARCH_X86 && !ASMJIT_BUILD_X86 -# if defined(ASMJIT_ARCH_X64) && !defined(ASMJIT_BUILD_X64) +# if ASMJIT_ARCH_X64 && !defined(ASMJIT_BUILD_X64) # define ASMJIT_BUILD_X64 # endif // ASMJIT_ARCH_X64 && !ASMJIT_BUILD_X64 #endif // ASMJIT_BUILD_HOST -// ============================================================================ -// [asmjit::Build - Decorators] -// ============================================================================ - -#if defined(ASMJIT_EMBED) && !defined(ASMJIT_STATIC) -# define ASMJIT_STATIC -#endif // ASMJIT_EMBED && !ASMJIT_STATIC - -#if defined(ASMJIT_STATIC) -# define ASMJIT_API -#elif defined(ASMJIT_OS_WINDOWS) -# if (defined(__GNUC__) || defined(__clang__)) && !defined(__MINGW32__) -# if defined(ASMJIT_EXPORTS) -# define ASMJIT_API __attribute__((dllexport)) -# else -# define ASMJIT_API __attribute__((dllimport)) -# endif // ASMJIT_EXPORTS -# else -# if defined(ASMJIT_EXPORTS) -# define ASMJIT_API __declspec(dllexport) -# else -# define ASMJIT_API __declspec(dllimport) -# endif -# endif -#elif defined(__GNUC__) && (__GNUC__ >= 4) -# define ASMJIT_API __attribute__((visibility("default"))) -#endif - -#if !defined(ASMJIT_API) -# define ASMJIT_API -#endif // ASMJIT_API - -// This is basically a workaround. When using MSVC and marking class as DLL -// export everything is exported, which is unwanted since there are many -// inlines which mimic instructions. MSVC automatically exports typeinfo and -// vtable if at least one symbol of that class is exported. However, GCC has -// some strange behavior that even if one or more symbol is exported it doesn't -// export `typeinfo` unless the class itself is marked as "visibility(default)". -#if !defined(ASMJIT_OS_WINDOWS) && (defined(__GNUC__) || defined (__clang__)) -# define ASMJIT_VCLASS ASMJIT_API -#else -# define ASMJIT_VCLASS -#endif - -#if !defined(ASMJIT_VAR) -# define ASMJIT_VAR extern ASMJIT_API -#endif // !ASMJIT_VAR - -#if defined(_MSC_VER) -# define ASMJIT_INLINE __forceinline -#elif defined(__clang__) -# define ASMJIT_INLINE inline __attribute__((always_inline)) __attribute__((visibility("hidden"))) -#elif defined(__GNUC__) -# define ASMJIT_INLINE inline __attribute__((always_inline)) -#else -# define ASMJIT_INLINE inline -#endif - -#if defined(ASMJIT_ARCH_X86) -# if defined(__GNUC__) || defined(__clang__) -# define ASMJIT_REGPARM_1 __attribute__((regparm(1))) -# define ASMJIT_REGPARM_2 __attribute__((regparm(2))) -# define ASMJIT_REGPARM_3 __attribute__((regparm(3))) -# define ASMJIT_FASTCALL __attribute__((fastcall)) -# define ASMJIT_STDCALL __attribute__((stdcall)) -# define ASMJIT_CDECL __attribute__((cdecl)) -# else -# define ASMJIT_FASTCALL __fastcall -# define ASMJIT_STDCALL __stdcall -# define ASMJIT_CDECL __cdecl -# endif -#else -# define ASMJIT_FASTCALL -# define ASMJIT_STDCALL -# define ASMJIT_CDECL -#endif // ASMJIT_ARCH_X86 - -// ============================================================================ -// [asmjit::Build - Enum] -// ============================================================================ - #if defined(_MSC_VER) && _MSC_VER >= 1400 # define ASMJIT_ENUM(_Name_) enum _Name_ : uint32_t #else # define ASMJIT_ENUM(_Name_) enum _Name_ #endif -// ============================================================================ -// [asmjit::Build - Memory Management] -// ============================================================================ +#if ASMJIT_ARCH_LE +# define _ASMJIT_ARCH_INDEX(_Total_, _Index_) (_Index_) +#else +# define _ASMJIT_ARCH_INDEX(_Total_, _Index_) ((_Total_) - 1 - (_Index_)) +#endif #if !defined(ASMJIT_ALLOC) && !defined(ASMJIT_REALLOC) && !defined(ASMJIT_FREE) # define ASMJIT_ALLOC(_Size_) ::malloc(_Size_) @@ -232,81 +809,10 @@ # define ASMJIT_FREE(_Ptr_) ::free(_Ptr_) #else # if !defined(ASMJIT_ALLOC) || !defined(ASMJIT_REALLOC) || !defined(ASMJIT_FREE) -# error "AsmJit - You must redefine ASMJIT_ALLOC, ASMJIT_REALLOC and ASMJIT_FREE." +# error "[asmjit] You must redefine ASMJIT_ALLOC, ASMJIT_REALLOC and ASMJIT_FREE." # endif #endif // !ASMJIT_ALLOC && !ASMJIT_REALLOC && !ASMJIT_FREE -// ============================================================================ -// [asmjit::Build - _ASMJIT_ARCH_INDEX] -// ============================================================================ - -#if defined(ASMJIT_ARCH_LE) -# define _ASMJIT_ARCH_INDEX(_Total_, _Index_) (_Index_) -#else -# define _ASMJIT_ARCH_INDEX(_Total_, _Index_) ((_Total_) - 1 - (_Index_)) -#endif - -// ============================================================================ -// [asmjit::Build - ASMJIT_OFFSET_OF] -// ============================================================================ - -//! Cross-platform solution to get offset of `_Field_` in `_Struct_`. -#define ASMJIT_OFFSET_OF(_Struct_, _Field_) \ - static_cast((intptr_t) ((const uint8_t*) &((const _Struct_*)0x1)->_Field_) - 1) - -// ============================================================================ -// [asmjit::Build - ASMJIT_ARRAY_SIZE] -// ============================================================================ - -#define ASMJIT_ARRAY_SIZE(_Array_) \ - (sizeof(_Array_) / sizeof(*_Array_)) - -// ============================================================================ -// [asmjit::Build - ASMJIT_DEBUG / ASMJIT_TRACE] -// ============================================================================ - -// If ASMJIT_DEBUG and ASMJIT_RELEASE is not defined ASMJIT_DEBUG will be -// detected using the compiler specific macros. This enables to set the build -// type using IDE. -#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE) -# if defined(_DEBUG) -# define ASMJIT_DEBUG -# endif // _DEBUG -#endif // !ASMJIT_DEBUG && !ASMJIT_RELEASE - -// ASMJIT_TRACE is only used by sources and private headers. It's safe to make -// it unavailable outside of AsmJit. -#if defined(ASMJIT_EXPORTS) -namespace asmjit { static inline int disabledTrace(...) { return 0; } } -# if defined(ASMJIT_TRACE) -# define ASMJIT_TSEC(_Section_) _Section_ -# define ASMJIT_TLOG ::printf -# else -# define ASMJIT_TSEC(_Section_) do {} while(0) -# define ASMJIT_TLOG 0 && ::asmjit::disabledTrace -# endif // ASMJIT_TRACE -#endif // ASMJIT_EXPORTS - -// ============================================================================ -// [asmjit::Build - ASMJIT_UNUSED] -// ============================================================================ - -#if !defined(ASMJIT_UNUSED) -# define ASMJIT_UNUSED(_Var_) ((void)_Var_) -#endif // ASMJIT_UNUSED - -// ============================================================================ -// [asmjit::Build - ASMJIT_NOP] -// ============================================================================ - -#if !defined(ASMJIT_NOP) -# define ASMJIT_NOP() ((void)0) -#endif // ASMJIT_NOP - -// ============================================================================ -// [asmjit::Build - ASMJIT_NO_COPY] -// ============================================================================ - #define ASMJIT_NO_COPY(_Type_) \ private: \ ASMJIT_INLINE _Type_(const _Type_& other); \ @@ -314,77 +820,36 @@ private: \ public: // ============================================================================ -// [asmjit::Build - StdInt] +// [asmjit::Build - Relative Path] // ============================================================================ -#if defined(__MINGW32__) -# include -#endif // __MINGW32__ +namespace asmjit { +namespace DebugUtils { -#if defined(_MSC_VER) && (_MSC_VER < 1600) -# if !defined(ASMJIT_SUPPRESS_STD_TYPES) -# if (_MSC_VER < 1300) -typedef signed char int8_t; -typedef signed short int16_t; -typedef signed int int32_t; -typedef signed __int64 int64_t; -typedef unsigned char uint8_t; -typedef unsigned short uint16_t; -typedef unsigned int uint32_t; -typedef unsigned __int64 uint64_t; -# else -typedef signed __int8 int8_t; -typedef signed __int16 int16_t; -typedef signed __int32 int32_t; -typedef signed __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -# endif // _MSC_VER -# endif // ASMJIT_SUPPRESS_STD_TYPES -#else -# include -# include -#endif +// Workaround that is used to convert an absolute path to a relative one at +// a C macro level, used by asserts and tracing. This workaround is needed +// as some build systems always convert the source code files to use absolute +// paths. Please note that if absolute paths are used this doesn't remove them +// from the compiled binary and can be still considered a security risk. +enum { + kSourceRelativePathOffset = int(sizeof(__FILE__) - sizeof("asmjit/build.h")) +}; -#if defined(_MSC_VER) -# define ASMJIT_INT64_C(_Num_) _Num_##i64 -# define ASMJIT_UINT64_C(_Num_) _Num_##ui64 -#else -# define ASMJIT_INT64_C(_Num_) _Num_##LL -# define ASMJIT_UINT64_C(_Num_) _Num_##ULL -#endif +// ASMJIT_TRACE is only used by sources and private headers. It's safe to make +// it unavailable outside of AsmJit. +#if defined(ASMJIT_EXPORTS) +static inline int disabledTrace(...) { return 0; } +# if defined(ASMJIT_TRACE) +# define ASMJIT_TSEC(section) section +# define ASMJIT_TLOG ::printf +# else +# define ASMJIT_TSEC(section) ASMJIT_NOP +# define ASMJIT_TLOG 0 && ::asmjit::DebugUtils::disabledTrace +# endif // ASMJIT_TRACE +#endif // ASMJIT_EXPORTS -// ============================================================================ -// [asmjit::Build - Windows] -// ============================================================================ - -#if defined(ASMJIT_OS_WINDOWS) && !defined(ASMJIT_SUPPRESS_WINDOWS_H) - -# if !defined(WIN32_LEAN_AND_MEAN) -# define WIN32_LEAN_AND_MEAN -# define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN -# endif // !WIN32_LEAN_AND_MEAN - -# if !defined(NOMINMAX) -# define NOMINMAX -# define ASMJIT_UNDEF_NOMINMAX -# endif // !NOMINMAX - -# include - -# if defined(ASMJIT_UNDEF_NOMINMAX) -# undef NOMINMAX -# undef ASMJIT_UNDEF_NOMINMAX -# endif - -# if defined(ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN) -# undef WIN32_LEAN_AND_MEAN -# undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN -# endif - -#endif // ASMJIT_OS_WINDOWS && !ASMJIT_SUPPRESS_WINDOWS_H +} // DebugUtils namespace +} // asmjit namespace // ============================================================================ // [asmjit::Build - Test] @@ -392,7 +857,7 @@ typedef unsigned __int64 uint64_t; // Include a unit testing package if this is a `asmjit_test` build. #if defined(ASMJIT_TEST) -#include "../test/broken.h" +# include "../test/broken.h" #endif // ASMJIT_TEST // [Guard] diff --git a/src/asmjit/config.h b/src/asmjit/config.h deleted file mode 100644 index 4fd4d2a..0000000 --- a/src/asmjit/config.h +++ /dev/null @@ -1,53 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_CONFIG_H -#define _ASMJIT_CONFIG_H - -// This file can be used to modify built-in features of AsmJit. AsmJit is by -// default compiled only for host processor to enable JIT compilation. Both -// Assembler and Compiler code generators are compiled by default. -// -// ASMJIT_BUILD_... flags can be defined to build additional backends that can -// be used for remote code generation. -// -// ASMJIT_DISABLE_... flags can be defined to disable standard features. These -// are handy especially when building asmjit statically and some features are -// not needed or unwanted (like Compiler). - -// ============================================================================ -// [AsmJit - Build-Type] -// ============================================================================ - -// #define ASMJIT_EMBED // Asmjit is embedded (implies ASMJIT_STATIC). -// #define ASMJIT_STATIC // Define to enable static-library build. - -// ============================================================================ -// [AsmJit - Build-Mode] -// ============================================================================ - -// #define ASMJIT_DEBUG // Define to enable debug-mode. -// #define ASMJIT_RELEASE // Define to enable release-mode. -// #define ASMJIT_TRACE // Define to enable tracing. - -// ============================================================================ -// [AsmJit - Features] -// ============================================================================ - -// If none of these is defined AsmJit will select host architecture by default. -// #define ASMJIT_BUILD_X86 // Define to enable x86 instruction set (32-bit). -// #define ASMJIT_BUILD_X64 // Define to enable x64 instruction set (64-bit). -// #define ASMJIT_BUILD_HOST // Define to enable host instruction set. - -// AsmJit features are enabled by default. -// #define ASMJIT_DISABLE_COMPILER // Disable Compiler (completely). -// #define ASMJIT_DISABLE_LOGGER // Disable Logger (completely). -// #define ASMJIT_DISABLE_NAMES // Disable everything that uses strings - // (instruction names, error names, ...). - -// [Guard] -#endif // _ASMJIT_CONFIG_H diff --git a/src/asmjit/host.h b/src/asmjit/host.h index 9c0e5eb..9d051d2 100644 --- a/src/asmjit/host.h +++ b/src/asmjit/host.h @@ -15,7 +15,7 @@ // [asmjit::host - X86 / X64] // ============================================================================ -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 #include "./x86.h" namespace asmjit { diff --git a/src/asmjit/x86.h b/src/asmjit/x86.h index a1be281..b73d428 100644 --- a/src/asmjit/x86.h +++ b/src/asmjit/x86.h @@ -13,6 +13,7 @@ #include "./x86/x86assembler.h" #include "./x86/x86compiler.h" +#include "./x86/x86compilerfunc.h" #include "./x86/x86cpuinfo.h" #include "./x86/x86inst.h" #include "./x86/x86operand.h" diff --git a/src/asmjit/x86/x86assembler.cpp b/src/asmjit/x86/x86assembler.cpp index 286fc58..d459001 100644 --- a/src/asmjit/x86/x86assembler.cpp +++ b/src/asmjit/x86/x86assembler.cpp @@ -12,10 +12,10 @@ #if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64) // [Dependencies - AsmJit] -#include "../base/intutil.h" +#include "../base/containers.h" #include "../base/logger.h" #include "../base/runtime.h" -#include "../base/string.h" +#include "../base/utils.h" #include "../base/vmem.h" #include "../x86/x86assembler.h" #include "../x86/x86cpuinfo.h" @@ -155,7 +155,7 @@ static ASMJIT_INLINE bool x86RexIsInvalid(uint32_t rex) { //! Encode ModR/M. static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) { - ASMJIT_ASSERT(m <= 7); + ASMJIT_ASSERT(m <= 3); ASMJIT_ASSERT(o <= 7); ASMJIT_ASSERT(rm <= 7); return (m << 6) + (o << 3) + rm; @@ -163,7 +163,7 @@ static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) //! Encode SIB. static ASMJIT_INLINE uint32_t x86EncodeSib(uint32_t s, uint32_t i, uint32_t b) { - ASMJIT_ASSERT(s <= 7); + ASMJIT_ASSERT(s <= 3); ASMJIT_ASSERT(i <= 7); ASMJIT_ASSERT(b <= 7); return (s << 6) + (i << 3) + b; @@ -173,7 +173,7 @@ static ASMJIT_INLINE uint32_t x86EncodeSib(uint32_t s, uint32_t i, uint32_t b) { //! displacement, which fits into a signed 32-bit integer. static ASMJIT_INLINE bool x64IsRelative(Ptr a, Ptr b) { SignedPtr diff = static_cast(a) - static_cast(b); - return IntUtil::isInt32(diff); + return Utils::isInt32(diff); } //! Cast `reg` to `X86Reg` and get the register index. @@ -299,18 +299,18 @@ static ASMJIT_INLINE bool x86IsYmm(const X86Reg* reg) { return reg->isYmm(); } // [asmjit::X86Assembler - Construction / Destruction] // ============================================================================ -X86Assembler::X86Assembler(Runtime* runtime, uint32_t arch) : - Assembler(runtime), - zax(NoInit), - zcx(NoInit), - zdx(NoInit), - zbx(NoInit), - zsp(NoInit), - zbp(NoInit), - zsi(NoInit), - zdi(NoInit) { - - setArch(arch); +X86Assembler::X86Assembler(Runtime* runtime, uint32_t arch) + : Assembler(runtime), + zax(NoInit), + zcx(NoInit), + zdx(NoInit), + zbx(NoInit), + zsp(NoInit), + zbp(NoInit), + zsi(NoInit), + zdi(NoInit) { + ASMJIT_ASSERT(arch == kArchX86 || arch == kArchX64); + _setArch(arch); } X86Assembler::~X86Assembler() {} @@ -319,10 +319,10 @@ X86Assembler::~X86Assembler() {} // [asmjit::X86Assembler - Arch] // ============================================================================ -Error X86Assembler::setArch(uint32_t arch) { +Error X86Assembler::_setArch(uint32_t arch) { #if defined(ASMJIT_BUILD_X86) if (arch == kArchX86) { - _arch = kArchX86; + _arch = arch; _regSize = 4; _regCount.reset(); @@ -338,7 +338,7 @@ Error X86Assembler::setArch(uint32_t arch) { #if defined(ASMJIT_BUILD_X64) if (arch == kArchX64) { - _arch = kArchX64; + _arch = arch; _regSize = 8; _regCount.reset(); @@ -399,7 +399,7 @@ Error X86Assembler::embedLabel(const Label& op) { } if (_relocList.append(rd) != kErrorOk) - return setError(kErrorNoHeapMemory); + return setLastError(kErrorNoHeapMemory); // Emit dummy intptr_t (4 or 8 bytes; depends on the address size). if (regSize == 4) @@ -422,10 +422,10 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) { "%s.align %u\n", _logger->getIndentation(), static_cast(offset)); #endif // !ASMJIT_DISABLE_LOGGER - if (offset <= 1 || !IntUtil::isPowerOf2(offset) || offset > 64) - return setError(kErrorInvalidArgument); + if (alignMode > kAlignZero || offset <= 1 || !Utils::isPowerOf2(offset) || offset > 64) + return setLastError(kErrorInvalidArgument); - uint32_t i = static_cast(IntUtil::deltaTo(getOffset(), offset)); + uint32_t i = static_cast(Utils::alignDiff(getOffset(), offset)); if (i == 0) return kErrorOk; @@ -433,57 +433,29 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) { ASMJIT_PROPAGATE_ERROR(_grow(i)); uint8_t* cursor = getCursor(); - uint8_t alignPattern = 0xCC; + uint8_t pattern = 0x00; - if (alignMode == kAlignCode) { - alignPattern = 0x90; + switch (alignMode) { + case kAlignCode: { + if (hasFeature(kAssemblerFeatureOptimizedAlign)) { + // Intel 64 and IA-32 Architectures Software Developer's Manual - Volume 2B (NOP). + enum { kMaxNopSize = 9 }; - if (hasFeature(kCodeGenOptimizedAlign)) { - const X86CpuInfo* cpuInfo = static_cast(getRuntime()->getCpuInfo()); + static const uint8_t nopData[kMaxNopSize][kMaxNopSize] = { + { 0x90 }, + { 0x66, 0x90 }, + { 0x0F, 0x1F, 0x00 }, + { 0x0F, 0x1F, 0x40, 0x00 }, + { 0x0F, 0x1F, 0x44, 0x00, 0x00 }, + { 0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00 }, + { 0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00 }, + { 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 } + }; - // NOPs optimized for Intel: - // Intel 64 and IA-32 Architectures Software Developer's Manual - // - Volume 2B - // - Instruction Set Reference N-Z - // - NOP - - // NOPs optimized for AMD: - // Software Optimization Guide for AMD Family 10h Processors (Quad-Core) - // - 4.13 - Code Padding with Operand-Size Override and Multibyte NOP - - // Intel and AMD. - static const uint8_t nop1[] = { 0x90 }; - static const uint8_t nop2[] = { 0x66, 0x90 }; - static const uint8_t nop3[] = { 0x0F, 0x1F, 0x00 }; - static const uint8_t nop4[] = { 0x0F, 0x1F, 0x40, 0x00 }; - static const uint8_t nop5[] = { 0x0F, 0x1F, 0x44, 0x00, 0x00 }; - static const uint8_t nop6[] = { 0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00 }; - static const uint8_t nop7[] = { 0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00 }; - static const uint8_t nop8[] = { 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 }; - static const uint8_t nop9[] = { 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 }; - - // AMD. - static const uint8_t nop10[] = { 0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 }; - static const uint8_t nop11[] = { 0x66, 0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 }; - - const uint8_t* p; - uint32_t n; - - if (cpuInfo->getVendorId() == kCpuVendorIntel && ( - (cpuInfo->getFamily() & 0x0F) == 0x06 || - (cpuInfo->getFamily() & 0x0F) == 0x0F)) { do { - switch (i) { - case 1: p = nop1; n = 1; break; - case 2: p = nop2; n = 2; break; - case 3: p = nop3; n = 3; break; - case 4: p = nop4; n = 4; break; - case 5: p = nop5; n = 5; break; - case 6: p = nop6; n = 6; break; - case 7: p = nop7; n = 7; break; - case 8: p = nop8; n = 8; break; - default: p = nop9; n = 9; break; - } + uint32_t n = Utils::iMin(i, kMaxNopSize); + const uint8_t* p = nopData[(n - 1) * kMaxNopSize]; i -= n; do { @@ -491,33 +463,24 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) { } while (--n); } while (i); } - else if (cpuInfo->getVendorId() == kCpuVendorAmd && cpuInfo->getFamily() >= 0x0F) { - do { - switch (i) { - case 1: p = nop1 ; n = 1; break; - case 2: p = nop2 ; n = 2; break; - case 3: p = nop3 ; n = 3; break; - case 4: p = nop4 ; n = 4; break; - case 5: p = nop5 ; n = 5; break; - case 6: p = nop6 ; n = 6; break; - case 7: p = nop7 ; n = 7; break; - case 8: p = nop8 ; n = 8; break; - case 9: p = nop9 ; n = 9; break; - case 10: p = nop10; n = 10; break; - default: p = nop11; n = 11; break; - } - i -= n; - do { - EMIT_BYTE(*p++); - } while (--n); - } while (i); - } + pattern = 0x90; + break; + } + + case kAlignData: { + pattern = 0xCC; + break; + } + + case kAlignZero: { + // Already set to zero. + break; } } while (i) { - EMIT_BYTE(alignPattern); + EMIT_BYTE(pattern); i--; } @@ -578,7 +541,7 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const { case kRelocTrampoline: ptr -= baseAddress + rd.from + 4; - if (!IntUtil::isInt32(static_cast(ptr))) { + if (!Utils::isInt32(static_cast(ptr))) { ptr = (Ptr)tramp - (baseAddress + rd.from + 4); useTrampoline = true; } @@ -740,6 +703,10 @@ _EmitNE: sb._appendString(®16[index * 4]); return; + case kX86RegTypeK: + sb._appendString("k", 1); + goto _EmitID; + case kX86RegTypeFp: sb._appendString("fp", 2); goto _EmitID; @@ -756,6 +723,10 @@ _EmitNE: sb._appendString("ymm", 3); goto _EmitID; + case kX86RegTypeZmm: + sb._appendString("zmm", 3); + goto _EmitID; + case kX86RegTypeSeg: if (index >= kX86SegCount) goto _EmitNE; @@ -952,19 +923,33 @@ static const Operand::VRegOp x86PatchedHiRegs[4] = { #undef HI_REG template -static Error ASMJIT_CDECL X86Assembler_emit(Assembler* self_, uint32_t code, const Operand* o0, const Operand* o1, const Operand* o2, const Operand* o3) { +static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, const Operand* o0, const Operand* o1, const Operand* o2, const Operand* o3) { X86Assembler* self = static_cast(self_); - - uint8_t* cursor = self->getCursor(); - uint32_t encoded = o0->getOp() + (o1->getOp() << 3) + (o2->getOp() << 6); uint32_t options = self->getInstOptionsAndReset(); // Invalid instruction. if (code >= _kX86InstIdCount) { self->_comment = NULL; - return self->setError(kErrorUnknownInst); + return self->setLastError(kErrorUnknownInst); } + // -------------------------------------------------------------------------- + // [Grow] + // -------------------------------------------------------------------------- + + // Grow request happens rarely. + uint8_t* cursor = self->getCursor(); + if (ASMJIT_UNLIKELY((size_t)(self->_end - cursor) < 16)) { + ASMJIT_PROPAGATE_ERROR(self->_grow(16)); + cursor = self->getCursor(); + } + + // -------------------------------------------------------------------------- + // [Prepare] + // -------------------------------------------------------------------------- + + uint32_t encoded = o0->getOp() + (o1->getOp() << 3) + (o2->getOp() << 6); + // Instruction opcode. uint32_t opCode; // ModR/M opcode or register code. @@ -1005,16 +990,6 @@ static Error ASMJIT_CDECL X86Assembler_emit(Assembler* self_, uint32_t code, con const X86InstInfo& info = _x86InstInfo[code]; const X86InstExtendedInfo& extendedInfo = info.getExtendedInfo(); - // Grow request happens rarely. C++ compiler generates better code if it is - // handled at the end of the function. - if ((size_t)(self->_end - cursor) < 16) - goto _GrowBuffer; - - // -------------------------------------------------------------------------- - // [Prepare] - // -------------------------------------------------------------------------- - -_Prepare: opCode = info.getPrimaryOpCode(); opReg = x86ExtractO(opCode); @@ -1196,7 +1171,7 @@ _Prepare: if (encoded == ENC_OPS(Reg, Imm, None)) { imVal = static_cast(o1)->getInt64(); - imLen = IntUtil::isInt8(imVal) ? static_cast(1) : IntUtil::iMin(o0->getSize(), 4); + imLen = Utils::isInt8(imVal) ? static_cast(1) : Utils::iMin(o0->getSize(), 4); rmReg = x86OpReg(o0); ADD_66H_P_BY_SIZE(o0->getSize()); @@ -1206,7 +1181,7 @@ _Prepare: if (rmReg == 0 && (o0->getSize() == 1 || imLen != 1)) { opCode &= kX86InstOpCode_PP_66 | kX86InstOpCode_W; opCode |= ((opReg << 3) | (0x04 + (o0->getSize() != 1))); - imLen = IntUtil::iMin(o0->getSize(), 4); + imLen = Utils::iMin(o0->getSize(), 4); goto _EmitX86Op; } @@ -1221,7 +1196,7 @@ _Prepare: goto _IllegalInst; imVal = static_cast(o1)->getInt64(); - imLen = IntUtil::isInt8(imVal) ? static_cast(1) : IntUtil::iMin(memSize, 4); + imLen = Utils::isInt8(imVal) ? static_cast(1) : Utils::iMin(memSize, 4); opCode += memSize != 1 ? (imLen != 1 ? 1 : 3) : 0; ADD_66H_P_BY_SIZE(memSize); @@ -1388,7 +1363,7 @@ _Prepare: imVal = static_cast(o1)->getInt64(); imLen = 1; - if (!IntUtil::isInt8(imVal)) { + if (!Utils::isInt8(imVal)) { opCode -= 2; imLen = o0->getSize() == 2 ? 2 : 4; } @@ -1404,7 +1379,7 @@ _Prepare: imVal = static_cast(o2)->getInt64(); imLen = 1; - if (!IntUtil::isInt8(imVal)) { + if (!Utils::isInt8(imVal)) { opCode -= 2; imLen = o0->getSize() == 2 ? 2 : 4; } @@ -1420,7 +1395,7 @@ _Prepare: imVal = static_cast(o2)->getInt64(); imLen = 1; - if (!IntUtil::isInt8(imVal)) { + if (!Utils::isInt8(imVal)) { opCode -= 2; imLen = o0->getSize() == 2 ? 2 : 4; } @@ -1477,7 +1452,7 @@ _Prepare: if (encoded == ENC_OPS(Label, None, None)) { label = self->getLabelData(static_cast(o0)->getId()); - if (self->hasFeature(kCodeGenPredictedJumps)) { + if (self->hasFeature(kAssemblerFeaturePredictedJumps)) { if (options & kInstOptionTaken) EMIT_BYTE(0x3E); if (options & kInstOptionNotTaken) @@ -1492,7 +1467,7 @@ _Prepare: intptr_t offs = label->offset - (intptr_t)(cursor - self->_buffer); ASMJIT_ASSERT(offs <= 0); - if ((options & kInstOptionLongForm) == 0 && IntUtil::isInt8(offs - kRel8Size)) { + if ((options & kInstOptionLongForm) == 0 && Utils::isInt8(offs - kRel8Size)) { EMIT_OP(opCode); EMIT_BYTE(offs - kRel8Size); @@ -1544,7 +1519,7 @@ _Prepare: if (label->offset != -1) { // Bound label. intptr_t offs = label->offset - (intptr_t)(cursor - self->_buffer) - 1; - if (!IntUtil::isInt8(offs)) + if (!Utils::isInt8(offs)) goto _IllegalInst; EMIT_BYTE(offs); @@ -1588,7 +1563,7 @@ _Prepare: intptr_t offs = label->offset - (intptr_t)(cursor - self->_buffer); - if ((options & kInstOptionLongForm) == 0 && IntUtil::isInt8(offs - kRel8Size)) { + if ((options & kInstOptionLongForm) == 0 && Utils::isInt8(offs - kRel8Size)) { options |= kInstOptionShortForm; EMIT_BYTE(0xEB); @@ -1746,7 +1721,7 @@ _Prepare: rmReg = x86OpReg(o0); // Optimize instruction size by using 32-bit immediate if possible. - if (Arch == kArchX64 && imLen == 8 && IntUtil::isInt32(imVal)) { + if (Arch == kArchX64 && imLen == 8 && Utils::isInt32(imVal)) { opCode = 0xC7; ADD_REX_W(1); imLen = 4; @@ -1769,7 +1744,7 @@ _Prepare: goto _IllegalInst; imVal = static_cast(o1)->getInt64(); - imLen = IntUtil::iMin(memSize, 4); + imLen = Utils::iMin(memSize, 4); opCode = 0xC6 + (memSize != 1); opReg = 0; @@ -1869,7 +1844,7 @@ _Prepare: if (encoded == ENC_OPS(Imm, None, None)) { imVal = static_cast(o0)->getInt64(); - imLen = IntUtil::isInt8(imVal) ? 1 : 4; + imLen = Utils::isInt8(imVal) ? 1 : 4; EMIT_BYTE(imLen == 1 ? 0x6A : 0x68); goto _EmitImm; @@ -2074,7 +2049,7 @@ _GroupPop_Gp: if (encoded == ENC_OPS(Reg, Imm, None)) { imVal = static_cast(o1)->getInt64(); - imLen = IntUtil::iMin(o0->getSize(), 4); + imLen = Utils::iMin(o0->getSize(), 4); ADD_66H_P_BY_SIZE(o0->getSize()); ADD_REX_W_BY_SIZE(o0->getSize()); @@ -2095,7 +2070,7 @@ _GroupPop_Gp: goto _IllegalInst; imVal = static_cast(o1)->getInt64(); - imLen = IntUtil::iMin(o0->getSize(), 4); + imLen = Utils::iMin(o0->getSize(), 4); ADD_66H_P_BY_SIZE(o0->getSize()); ADD_REX_W_BY_SIZE(o0->getSize()); @@ -2115,7 +2090,7 @@ _GroupPop_Gp: rmMem = x86OpMem(o1); goto _EmitX86M; } - // ... fall through ... + // ... Fall through ... case kX86InstEncodingIdX86Xadd: if (encoded == ENC_OPS(Reg, Reg, None)) { @@ -3478,21 +3453,21 @@ _AvxRmMr_AfterRegRegCheck: // -------------------------------------------------------------------------- _IllegalInst: - self->setError(kErrorIllegalInst); + self->setLastError(kErrorIllegalInst); #if defined(ASMJIT_DEBUG) assertIllegal = true; #endif // ASMJIT_DEBUG goto _EmitDone; _IllegalAddr: - self->setError(kErrorIllegalAddresing); + self->setLastError(kErrorIllegalAddresing); #if defined(ASMJIT_DEBUG) assertIllegal = true; #endif // ASMJIT_DEBUG goto _EmitDone; _IllegalDisp: - self->setError(kErrorIllegalDisplacement); + self->setLastError(kErrorIllegalDisplacement); #if defined(ASMJIT_DEBUG) assertIllegal = true; #endif // ASMJIT_DEBUG @@ -3658,7 +3633,7 @@ _EmitSib: EMIT_BYTE(x86EncodeMod(0, opReg, 4)); EMIT_BYTE(x86EncodeSib(0, 4, 4)); } - else if (IntUtil::isInt8(dispOffset)) { + else if (Utils::isInt8(dispOffset)) { // [Esp/Rsp/R12 + Disp8]. EMIT_BYTE(x86EncodeMod(1, opReg, 4)); EMIT_BYTE(x86EncodeSib(0, 4, 4)); @@ -3675,7 +3650,7 @@ _EmitSib: // [Base]. EMIT_BYTE(x86EncodeMod(0, opReg, mBase)); } - else if (IntUtil::isInt8(dispOffset)) { + else if (Utils::isInt8(dispOffset)) { // [Base + Disp8]. EMIT_BYTE(x86EncodeMod(1, opReg, mBase)); EMIT_BYTE(static_cast(dispOffset)); @@ -3698,7 +3673,7 @@ _EmitSib: EMIT_BYTE(x86EncodeMod(0, opReg, 4)); EMIT_BYTE(x86EncodeSib(shift, mIndex, mBase)); } - else if (IntUtil::isInt8(dispOffset)) { + else if (Utils::isInt8(dispOffset)) { // [Base + Index * Scale + Disp8]. EMIT_BYTE(x86EncodeMod(1, opReg, 4)); EMIT_BYTE(x86EncodeSib(shift, mIndex, mBase)); @@ -3742,7 +3717,7 @@ _EmitSib: rd.data = static_cast(dispOffset); if (self->_relocList.append(rd) != kErrorOk) - return self->setError(kErrorNoHeapMemory); + return self->setLastError(kErrorNoHeapMemory); if (label->offset != -1) { // Bound label. @@ -3767,7 +3742,7 @@ _EmitSib: rd.data = rd.from + static_cast(dispOffset); if (self->_relocList.append(rd) != kErrorOk) - return self->setError(kErrorNoHeapMemory); + return self->setLastError(kErrorNoHeapMemory); EMIT_DWORD(0); } @@ -3997,7 +3972,7 @@ _EmitAvxV: EMIT_BYTE(x86EncodeMod(0, opReg, 4)); EMIT_BYTE(x86EncodeSib(shift, mIndex, mBase)); } - else if (IntUtil::isInt8(dispOffset)) { + else if (Utils::isInt8(dispOffset)) { // [Base + Index * Scale + Disp8]. EMIT_BYTE(x86EncodeMod(1, opReg, 4)); EMIT_BYTE(x86EncodeSib(shift, mIndex, mBase)); @@ -4033,7 +4008,7 @@ _EmitAvxV: rd.data = static_cast(dispOffset); if (self->_relocList.append(rd) != kErrorOk) - return self->setError(kErrorNoHeapMemory); + return self->setLastError(kErrorNoHeapMemory); } if (label->offset != -1) { @@ -4147,7 +4122,6 @@ _EmitXopM: // trampoline, it's better to use 6-byte `jmp/call` (prefixing it with REX // prefix) and to patch the `jmp/call` instruction to read the address from // a memory in case the trampoline is needed. - // _EmitJmpOrCallAbs: { RelocData rd; @@ -4159,7 +4133,7 @@ _EmitJmpOrCallAbs: uint32_t trampolineSize = 0; if (Arch == kArchX64) { - Ptr baseAddress = self->getBaseAddress(); + Ptr baseAddress = self->getRuntime()->getBaseAddress(); // If the base address of the output is known, it's possible to determine // the need for a trampoline here. This saves possible REX prefix in @@ -4182,10 +4156,10 @@ _EmitJmpOrCallAbs: EMIT_DWORD(0); if (self->_relocList.append(rd) != kErrorOk) - return self->setError(kErrorNoHeapMemory); + return self->setLastError(kErrorNoHeapMemory); // Reserve space for a possible trampoline. - self->_trampolineSize += trampolineSize; + self->_trampolinesSize += trampolineSize; } goto _EmitDone; @@ -4227,7 +4201,7 @@ _EmitDone: # else if (self->_logger) { # endif // ASMJIT_DEBUG - StringBuilderT<512> sb; + StringBuilderTmp<512> sb; uint32_t loggerOptions = 0; if (self->_logger) { @@ -4248,9 +4222,9 @@ _EmitDone: self->_logger->logString(kLoggerStyleDefault, sb.getData(), sb.getLength()); # if defined(ASMJIT_DEBUG) - // Raise an assertion failure, because this situation shouldn't happen. + // This shouldn't happen. if (assertIllegal) - assertionFailed(sb.getData(), __FILE__, __LINE__); + DebugUtils::assertionFailed(__FILE__, __LINE__, sb.getData()); # endif // ASMJIT_DEBUG } #else @@ -4263,12 +4237,6 @@ _EmitDone: self->setCursor(cursor); return kErrorOk; - -_GrowBuffer: - ASMJIT_PROPAGATE_ERROR(self->_grow(16)); - - cursor = self->getCursor(); - goto _Prepare; } Error X86Assembler::_emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { diff --git a/src/asmjit/x86/x86assembler.h b/src/asmjit/x86/x86assembler.h index 709bc3a..db0a8c3 100644 --- a/src/asmjit/x86/x86assembler.h +++ b/src/asmjit/x86/x86assembler.h @@ -379,13 +379,13 @@ namespace asmjit { //! functions available that return a new register operand. //! //! \sa X86Compiler. -struct ASMJIT_VCLASS X86Assembler : public Assembler { +struct ASMJIT_VIRTAPI X86Assembler : public Assembler { // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- ASMJIT_API X86Assembler(Runtime* runtime, uint32_t arch -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 = kArchHost #endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 ); @@ -395,17 +395,18 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler { // [Arch] // -------------------------------------------------------------------------- + //! \internal + //! + //! Set the assembler architecture to `kArchX86` or `kArchX64`. + ASMJIT_API Error _setArch(uint32_t arch); + //! Get count of registers of the current architecture and mode. - ASMJIT_INLINE const X86RegCount& getRegCount() const { - return _regCount; - } + ASMJIT_INLINE const X86RegCount& getRegCount() const { return _regCount; } - //! Get Gpd or Gpq register depending on the current architecture. - ASMJIT_INLINE X86GpReg gpz(uint32_t index) const { - return X86GpReg(zax, index); - } + //! Get DWORD or QWORD register depending on the current architecture. + ASMJIT_INLINE X86GpReg gpz(uint32_t index) const { return X86GpReg(zax, index); } - //! Create an architecture dependent intptr_t memory operand. + //! Create an `intptr_t` memory operand depending on the current architecture. ASMJIT_INLINE X86Mem intptr_ptr(const X86GpReg& base, int32_t disp = 0) const { return x86::ptr(base, disp, _regSize); } @@ -434,8 +435,6 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler { return x86::ptr_abs(pAbs, index, shift, disp, _regSize); } - ASMJIT_API Error setArch(uint32_t arch); - // -------------------------------------------------------------------------- // [Embed] // -------------------------------------------------------------------------- @@ -492,7 +491,7 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler { // [Align] // -------------------------------------------------------------------------- - ASMJIT_API virtual Error align(uint32_t mode, uint32_t offset); + ASMJIT_API virtual Error align(uint32_t alignMode, uint32_t offset); // -------------------------------------------------------------------------- // [Reloc] @@ -559,13 +558,13 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler { #define INST_1i(_Inst_, _Code_, _Op0_) \ ASMJIT_INLINE Error _Inst_(const _Op0_& o0) { return emit(_Code_, o0); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(int o0) { return emit(_Code_, asInt(o0)); } \ + ASMJIT_INLINE Error _Inst_(int o0) { return emit(_Code_, Utils::asInt(o0)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(unsigned int o0) { return emit(_Code_, asInt(o0)); } \ + ASMJIT_INLINE Error _Inst_(unsigned int o0) { return emit(_Code_, Utils::asInt(o0)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(int64_t o0) { return emit(_Code_, asInt(o0)); } \ + ASMJIT_INLINE Error _Inst_(int64_t o0) { return emit(_Code_, Utils::asInt(o0)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(uint64_t o0) { return emit(_Code_, asInt(o0)); } + ASMJIT_INLINE Error _Inst_(uint64_t o0) { return emit(_Code_, Utils::asInt(o0)); } #define INST_1cc(_Inst_, _Code_, _Translate_, _Op0_) \ ASMJIT_INLINE Error _Inst_(uint32_t cc, const _Op0_& o0) { \ @@ -617,13 +616,13 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler { #define INST_2i(_Inst_, _Code_, _Op0_, _Op1_) \ ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_, o0, o1); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int o1) { return emit(_Code_, o0, asInt(o1)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, unsigned int o1) { return emit(_Code_, o0, asInt(o1)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, unsigned int o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int64_t o1) { return emit(_Code_, o0, asInt(o1)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int64_t o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, uint64_t o1) { return emit(_Code_, o0, asInt(o1)); } + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, uint64_t o1) { return emit(_Code_, o0, Utils::asInt(o1)); } #define INST_2cc(_Inst_, _Code_, _Translate_, _Op0_, _Op1_) \ ASMJIT_INLINE Error _Inst_(uint32_t cc, const _Op0_& o0, const _Op1_& o1) { \ @@ -667,24 +666,24 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler { #define INST_3i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { return emit(_Code_, o0, o1, o2); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int o2) { return emit(_Code_, o0, o1, asInt(o2)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2) { return emit(_Code_, o0, o1, asInt(o2)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2) { return emit(_Code_, o0, o1, asInt(o2)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2) { return emit(_Code_, o0, o1, asInt(o2)); } + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } #define INST_3ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { return emit(_Code_, o0, o1, o2); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int o1, int o2) { return emit(_Code_, o0, Imm(o1), asInt(o2)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int o1, int o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, unsigned int o1, unsigned int o2) { return emit(_Code_, o0, Imm(o1), asInt(o2)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, unsigned int o1, unsigned int o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int64_t o1, int64_t o2) { return emit(_Code_, o0, Imm(o1), asInt(o2)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int64_t o1, int64_t o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, uint64_t o1, uint64_t o2) { return emit(_Code_, o0, Imm(o1), asInt(o2)); } + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, uint64_t o1, uint64_t o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } #define INST_4x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \ ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); } @@ -692,24 +691,24 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler { #define INST_4i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \ ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int o3) { return emit(_Code_, o0, o1, o2, asInt(o3)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, unsigned int o3) { return emit(_Code_, o0, o1, o2, asInt(o3)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, unsigned int o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int64_t o3) { return emit(_Code_, o0, o1, o2, asInt(o3)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int64_t o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, uint64_t o3) { return emit(_Code_, o0, o1, o2, asInt(o3)); } + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, uint64_t o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } #define INST_4ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \ ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int o2, int o3) { return emit(_Code_, o0, o1, Imm(o2), asInt(o3)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int o2, int o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2, unsigned int o3) { return emit(_Code_, o0, o1, Imm(o2), asInt(o3)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2, unsigned int o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2, int64_t o3) { return emit(_Code_, o0, o1, Imm(o2), asInt(o3)); } \ + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2, int64_t o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2, uint64_t o3) { return emit(_Code_, o0, o1, Imm(o2), asInt(o3)); } + ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2, uint64_t o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } // -------------------------------------------------------------------------- // [X86/X64] diff --git a/src/asmjit/x86/x86compiler.cpp b/src/asmjit/x86/x86compiler.cpp index 8fdc7ca..435977c 100644 --- a/src/asmjit/x86/x86compiler.cpp +++ b/src/asmjit/x86/x86compiler.cpp @@ -12,11 +12,11 @@ #if !defined(ASMJIT_DISABLE_COMPILER) && (defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)) // [Dependencies - AsmJit] -#include "../base/intutil.h" -#include "../base/string.h" +#include "../base/containers.h" +#include "../base/utils.h" #include "../x86/x86assembler.h" #include "../x86/x86compiler.h" -#include "../x86/x86context_p.h" +#include "../x86/x86compilercontext_p.h" // [Api-Begin] #include "../apibegin.h" @@ -28,10 +28,10 @@ namespace asmjit { // ============================================================================ #if !defined(ASMJIT_DEBUG) -#define ASMJIT_ASSERT_UNINITIALIZED(op) \ +#define ASMJIT_ASSERT_OPERAND(op) \ do {} while(0) #else -#define ASMJIT_ASSERT_UNINITIALIZED(op) \ +#define ASMJIT_ASSERT_OPERAND(op) \ do { \ if (op.isVar() || op.isLabel()) { \ ASMJIT_ASSERT(op.getId() != kInvalidValue); \ @@ -137,533 +137,12 @@ const uint8_t _x64VarMapping[kX86VarTypeCount] = { }; #endif // ASMJIT_BUILD_X64 -// ============================================================================ -// [asmjit::X86FuncDecl - Helpers] -// ============================================================================ - -static ASMJIT_INLINE bool x86ArgIsInt(uint32_t aType) { - ASMJIT_ASSERT(aType < kX86VarTypeCount); - return IntUtil::inInterval(aType, _kVarTypeIntStart, _kVarTypeIntEnd); -} - -static ASMJIT_INLINE bool x86ArgIsFp(uint32_t aType) { - ASMJIT_ASSERT(aType < kX86VarTypeCount); - return IntUtil::inInterval(aType, _kVarTypeFpStart, _kVarTypeFpEnd); -} - -static ASMJIT_INLINE uint32_t x86ArgTypeToXmmType(uint32_t aType) { - if (aType == kVarTypeFp32) - return kX86VarTypeXmmSs; - if (aType == kVarTypeFp64) - return kX86VarTypeXmmSd; - return aType; -} - -//! Get an architecture from calling convention. -//! -//! Returns `kArchX86` or `kArchX64` depending on `conv`. -static ASMJIT_INLINE uint32_t x86GetArchFromCConv(uint32_t conv) { - return IntUtil::inInterval(conv, kX86FuncConvW64, kX86FuncConvU64) ? kArchX64 : kArchX86; -} - -// ============================================================================ -// [asmjit::X86FuncDecl - SetPrototype] -// ============================================================================ - -#define R(_Index_) kX86RegIndex##_Index_ -static uint32_t X86FuncDecl_initConv(X86FuncDecl* self, uint32_t arch, uint32_t conv) { - // Setup defaults. - self->_argStackSize = 0; - self->_redZoneSize = 0; - self->_spillZoneSize = 0; - - self->_convention = static_cast(conv); - self->_calleePopsStack = false; - self->_direction = kFuncDirRtl; - - self->_passed.reset(); - self->_preserved.reset(); - - ::memset(self->_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderGp)); - ::memset(self->_passedOrderXmm, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderXmm)); - - // -------------------------------------------------------------------------- - // [X86 Support] - // -------------------------------------------------------------------------- - -#if defined(ASMJIT_BUILD_X86) - if (arch == kArchX86) { - self->_preserved.set(kX86RegClassGp, IntUtil::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di))); - - switch (conv) { - case kX86FuncConvCDecl: - break; - - case kX86FuncConvStdCall: - self->_calleePopsStack = true; - break; - - case kX86FuncConvMsThisCall: - self->_calleePopsStack = true; - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Cx))); - self->_passedOrderGp[0] = R(Cx); - break; - - case kX86FuncConvMsFastCall: - self->_calleePopsStack = true; - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Cx), R(Cx))); - self->_passedOrderGp[0] = R(Cx); - self->_passedOrderGp[1] = R(Dx); - break; - - case kX86FuncConvBorlandFastCall: - self->_calleePopsStack = true; - self->_direction = kFuncDirLtr; - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Ax), R(Dx), R(Cx))); - self->_passedOrderGp[0] = R(Ax); - self->_passedOrderGp[1] = R(Dx); - self->_passedOrderGp[2] = R(Cx); - break; - - case kX86FuncConvGccFastCall: - self->_calleePopsStack = true; - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Cx), R(Dx))); - self->_passedOrderGp[0] = R(Cx); - self->_passedOrderGp[1] = R(Dx); - break; - - case kX86FuncConvGccRegParm1: - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Ax))); - self->_passedOrderGp[0] = R(Ax); - break; - - case kX86FuncConvGccRegParm2: - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Ax), R(Dx))); - self->_passedOrderGp[0] = R(Ax); - self->_passedOrderGp[1] = R(Dx); - break; - - case kX86FuncConvGccRegParm3: - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Ax), R(Dx), R(Cx))); - self->_passedOrderGp[0] = R(Ax); - self->_passedOrderGp[1] = R(Dx); - self->_passedOrderGp[2] = R(Cx); - break; - - default: - ASMJIT_ASSERT(!"Reached"); - } - - return kErrorOk; - } -#endif // ASMJIT_BUILD_X86 - - // -------------------------------------------------------------------------- - // [X64 Support] - // -------------------------------------------------------------------------- - -#if defined(ASMJIT_BUILD_X64) - switch (conv) { - case kX86FuncConvW64: - self->_spillZoneSize = 32; - - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Cx), R(Dx), 8, 9)); - self->_passedOrderGp[0] = R(Cx); - self->_passedOrderGp[1] = R(Dx); - self->_passedOrderGp[2] = 8; - self->_passedOrderGp[3] = 9; - - self->_passed.set(kX86RegClassXyz, IntUtil::mask(0, 1, 2, 3)); - self->_passedOrderXmm[0] = 0; - self->_passedOrderXmm[1] = 1; - self->_passedOrderXmm[2] = 2; - self->_passedOrderXmm[3] = 3; - - self->_preserved.set(kX86RegClassGp , IntUtil::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di), 12, 13, 14, 15)); - self->_preserved.set(kX86RegClassXyz, IntUtil::mask(6, 7, 8, 9, 10, 11, 12, 13, 14, 15)); - break; - - case kX86FuncConvU64: - self->_redZoneSize = 128; - - self->_passed.set(kX86RegClassGp, IntUtil::mask(R(Di), R(Si), R(Dx), R(Cx), 8, 9)); - self->_passedOrderGp[0] = R(Di); - self->_passedOrderGp[1] = R(Si); - self->_passedOrderGp[2] = R(Dx); - self->_passedOrderGp[3] = R(Cx); - self->_passedOrderGp[4] = 8; - self->_passedOrderGp[5] = 9; - - self->_passed.set(kX86RegClassXyz, IntUtil::mask(0, 1, 2, 3, 4, 5, 6, 7)); - self->_passedOrderXmm[0] = 0; - self->_passedOrderXmm[1] = 1; - self->_passedOrderXmm[2] = 2; - self->_passedOrderXmm[3] = 3; - self->_passedOrderXmm[4] = 4; - self->_passedOrderXmm[5] = 5; - self->_passedOrderXmm[6] = 6; - self->_passedOrderXmm[7] = 7; - - self->_preserved.set(kX86RegClassGp, IntUtil::mask(R(Bx), R(Sp), R(Bp), 12, 13, 14, 15)); - break; - - default: - ASMJIT_ASSERT(!"Reached"); - } -#endif // ASMJIT_BUILD_X64 - - return kErrorOk; -} -#undef R - -static Error X86FuncDecl_initFunc(X86FuncDecl* self, uint32_t arch, - uint32_t ret, const uint32_t* argList, uint32_t argCount) { - - ASMJIT_ASSERT(argCount <= kFuncArgCount); - - uint32_t conv = self->_convention; - uint32_t regSize = (arch == kArchX86) ? 4 : 8; - - int32_t i = 0; - int32_t gpPos = 0; - int32_t xmmPos = 0; - int32_t stackOffset = 0; - const uint8_t* varMapping = NULL; - -#if defined(ASMJIT_BUILD_X86) - if (arch == kArchX86) - varMapping = _x86VarMapping; -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_X64) - if (arch == kArchX64) - varMapping = _x64VarMapping; -#endif // ASMJIT_BUILD_X64 - - ASMJIT_ASSERT(varMapping != NULL); - self->_argCount = static_cast(argCount); - self->_retCount = 0; - - for (i = 0; i < static_cast(argCount); i++) { - FuncInOut& arg = self->getArg(i); - arg._varType = static_cast(varMapping[argList[i]]); - arg._regIndex = kInvalidReg; - arg._stackOffset = kFuncStackInvalid; - } - - for (; i < kFuncArgCount; i++) { - self->_argList[i].reset(); - } - - self->_retList[0].reset(); - self->_retList[1].reset(); - self->_argStackSize = 0; - self->_used.reset(); - - if (ret != kInvalidVar) { - ret = varMapping[ret]; - switch (ret) { - case kVarTypeInt64: - case kVarTypeUInt64: - // 64-bit value is returned in EDX:EAX on x86. -#if defined(ASMJIT_BUILD_X86) - if (arch == kArchX86) { - self->_retCount = 2; - self->_retList[0]._varType = kVarTypeUInt32; - self->_retList[0]._regIndex = kX86RegIndexAx; - self->_retList[1]._varType = static_cast(ret - 2); - self->_retList[1]._regIndex = kX86RegIndexDx; - } -#endif // ASMJIT_BUILD_X86 - // ... Fall through ... - - case kVarTypeInt8: - case kVarTypeUInt8: - case kVarTypeInt16: - case kVarTypeUInt16: - case kVarTypeInt32: - case kVarTypeUInt32: - self->_retCount = 1; - self->_retList[0]._varType = static_cast(ret); - self->_retList[0]._regIndex = kX86RegIndexAx; - break; - - case kX86VarTypeMm: - self->_retCount = 1; - self->_retList[0]._varType = static_cast(ret); - self->_retList[0]._regIndex = 0; - break; - - case kVarTypeFp32: - self->_retCount = 1; - if (arch == kArchX86) { - self->_retList[0]._varType = kVarTypeFp32; - self->_retList[0]._regIndex = 0; - } - else { - self->_retList[0]._varType = kX86VarTypeXmmSs; - self->_retList[0]._regIndex = 0; - } - break; - - case kVarTypeFp64: - self->_retCount = 1; - if (arch == kArchX86) { - self->_retList[0]._varType = kVarTypeFp64; - self->_retList[0]._regIndex = 0; - } - else { - self->_retList[0]._varType = kX86VarTypeXmmSd; - self->_retList[0]._regIndex = 0; - break; - } - break; - - case kX86VarTypeXmm: - case kX86VarTypeXmmSs: - case kX86VarTypeXmmSd: - case kX86VarTypeXmmPs: - case kX86VarTypeXmmPd: - self->_retCount = 1; - self->_retList[0]._varType = static_cast(ret); - self->_retList[0]._regIndex = 0; - break; - } - } - - if (self->_argCount == 0) - return kErrorOk; - -#if defined(ASMJIT_BUILD_X86) - if (arch == kArchX86) { - // Register arguments (Integer), always left-to-right. - for (i = 0; i != static_cast(argCount); i++) { - FuncInOut& arg = self->getArg(i); - uint32_t varType = varMapping[arg.getVarType()]; - - if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) - continue; - - if (self->_passedOrderGp[gpPos] == kInvalidReg) - continue; - - arg._regIndex = self->_passedOrderGp[gpPos++]; - self->_used.or_(kX86RegClassGp, IntUtil::mask(arg.getRegIndex())); - } - - // Stack arguments. - int32_t iStart = static_cast(argCount - 1); - int32_t iEnd = -1; - int32_t iStep = -1; - - if (self->_direction == kFuncDirLtr) { - iStart = 0; - iEnd = static_cast(argCount); - iStep = 1; - } - - for (i = iStart; i != iEnd; i += iStep) { - FuncInOut& arg = self->getArg(i); - uint32_t varType = varMapping[arg.getVarType()]; - - if (arg.hasRegIndex()) - continue; - - if (x86ArgIsInt(varType)) { - stackOffset -= 4; - arg._stackOffset = static_cast(stackOffset); - } - else if (x86ArgIsFp(varType)) { - int32_t size = static_cast(_x86VarInfo[varType].getSize()); - stackOffset -= size; - arg._stackOffset = static_cast(stackOffset); - } - } - } -#endif // ASMJIT_BUILD_X86 - -#if defined(ASMJIT_BUILD_X64) - if (arch == kArchX64) { - if (conv == kX86FuncConvW64) { - int32_t argMax = IntUtil::iMin(argCount, 4); - - // Register arguments (Gp/Xmm), always left-to-right. - for (i = 0; i != argMax; i++) { - FuncInOut& arg = self->getArg(i); - uint32_t varType = varMapping[arg.getVarType()]; - - if (x86ArgIsInt(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) { - arg._regIndex = self->_passedOrderGp[i]; - self->_used.or_(kX86RegClassGp, IntUtil::mask(arg.getRegIndex())); - continue; - } - - if (x86ArgIsFp(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderXmm)) { - arg._varType = static_cast(x86ArgTypeToXmmType(varType)); - arg._regIndex = self->_passedOrderXmm[i]; - self->_used.or_(kX86RegClassXyz, IntUtil::mask(arg.getRegIndex())); - } - } - - // Stack arguments (always right-to-left). - for (i = argCount - 1; i != -1; i--) { - FuncInOut& arg = self->getArg(i); - uint32_t varType = varMapping[arg.getVarType()]; - - if (arg.hasRegIndex()) - continue; - - if (x86ArgIsInt(varType)) { - stackOffset -= 8; // Always 8 bytes. - arg._stackOffset = stackOffset; - } - else if (x86ArgIsFp(varType)) { - stackOffset -= 8; // Always 8 bytes (float/double). - arg._stackOffset = stackOffset; - } - } - - // 32 bytes shadow space (X64W calling convention specific). - stackOffset -= 4 * 8; - } - else { - // Register arguments (Gp), always left-to-right. - for (i = 0; i != static_cast(argCount); i++) { - FuncInOut& arg = self->getArg(i); - uint32_t varType = varMapping[arg.getVarType()]; - - if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) - continue; - - if (self->_passedOrderGp[gpPos] == kInvalidReg) - continue; - - arg._regIndex = self->_passedOrderGp[gpPos++]; - self->_used.or_(kX86RegClassGp, IntUtil::mask(arg.getRegIndex())); - } - - // Register arguments (Xmm), always left-to-right. - for (i = 0; i != static_cast(argCount); i++) { - FuncInOut& arg = self->getArg(i); - uint32_t varType = varMapping[arg.getVarType()]; - - if (x86ArgIsFp(varType)) { - arg._varType = static_cast(x86ArgTypeToXmmType(varType)); - arg._regIndex = self->_passedOrderXmm[xmmPos++]; - self->_used.or_(kX86RegClassXyz, IntUtil::mask(arg.getRegIndex())); - } - } - - // Stack arguments. - for (i = argCount - 1; i != -1; i--) { - FuncInOut& arg = self->getArg(i); - uint32_t varType = varMapping[arg.getVarType()]; - - if (arg.hasRegIndex()) - continue; - - if (x86ArgIsInt(varType)) { - stackOffset -= 8; - arg._stackOffset = static_cast(stackOffset); - } - else if (x86ArgIsFp(varType)) { - int32_t size = static_cast(_x86VarInfo[varType].getSize()); - - stackOffset -= size; - arg._stackOffset = static_cast(stackOffset); - } - } - } - } -#endif // ASMJIT_BUILD_X64 - - // Modify the stack offset, thus in result all parameters would have positive - // non-zero stack offset. - for (i = 0; i < static_cast(argCount); i++) { - FuncInOut& arg = self->getArg(i); - if (!arg.hasRegIndex()) { - arg._stackOffset += static_cast(static_cast(regSize) - stackOffset); - } - } - - self->_argStackSize = static_cast(-stackOffset); - return kErrorOk; -} - -Error X86FuncDecl::setPrototype(uint32_t conv, const FuncPrototype& p) { - if (conv == kFuncConvNone || conv >= _kX86FuncConvCount) - return kErrorInvalidArgument; - - if (p.getArgCount() > kFuncArgCount) - return kErrorInvalidArgument; - - // Validate that the required convention is supported by the current asmjit - // configuration, if only one target is compiled. - uint32_t arch = x86GetArchFromCConv(conv); -#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64) - if (arch == kArchX64) - return kErrorInvalidState; -#endif // ASMJIT_BUILD_X86 && !ASMJIT_BUILD_X64 - -#if !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64) - if (arch == kArchX86) - return kErrorInvalidState; -#endif // !ASMJIT_BUILD_X86 && ASMJIT_BUILD_X64 - - ASMJIT_PROPAGATE_ERROR(X86FuncDecl_initConv(this, arch, conv)); - ASMJIT_PROPAGATE_ERROR(X86FuncDecl_initFunc(this, arch, p.getRet(), p.getArgList(), p.getArgCount())); - - return kErrorOk; -} - -// ============================================================================ -// [asmjit::X86FuncDecl - Reset] -// ============================================================================ - -void X86FuncDecl::reset() { - uint32_t i; - - _convention = kFuncConvNone; - _calleePopsStack = false; - _direction = kFuncDirRtl; - _reserved0 = 0; - - _argCount = 0; - _retCount = 0; - - _argStackSize = 0; - _redZoneSize = 0; - _spillZoneSize = 0; - - for (i = 0; i < ASMJIT_ARRAY_SIZE(_argList); i++) { - _argList[i].reset(); - } - - _retList[0].reset(); - _retList[1].reset(); - - _used.reset(); - _passed.reset(); - _preserved.reset(); - - ::memset(_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderGp)); - ::memset(_passedOrderXmm, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderXmm)); -} - -// ============================================================================ -// [asmjit::X86CallNode - Prototype] -// ============================================================================ - -Error X86CallNode::setPrototype(uint32_t conv, const FuncPrototype& p) { - return _x86Decl.setPrototype(conv, p); -} - // ============================================================================ // [asmjit::X86CallNode - Arg / Ret] // ============================================================================ bool X86CallNode::_setArg(uint32_t i, const Operand& op) { - if ((i & ~kFuncArgHi) >= _x86Decl.getArgCount()) + if ((i & ~kFuncArgHi) >= _x86Decl.getNumArgs()) return false; _args[i] = op; @@ -679,106 +158,151 @@ bool X86CallNode::_setRet(uint32_t i, const Operand& op) { } // ============================================================================ -// [asmjit::X86Compiler - Helpers (Private)] +// [asmjit::X86Compiler - Construction / Destruction] // ============================================================================ -static Error X86Compiler_emitConstPool(X86Compiler* self, - Label& label, ConstPool& pool) { +X86Compiler::X86Compiler(X86Assembler* assembler) + : Compiler(), + zax(NoInit), + zcx(NoInit), + zdx(NoInit), + zbx(NoInit), + zsp(NoInit), + zbp(NoInit), + zsi(NoInit), + zdi(NoInit) { - if (label.getId() == kInvalidValue) - return kErrorOk; + _regCount.reset(); + zax = x86::noGpReg; + zcx = x86::noGpReg; + zdx = x86::noGpReg; + zbx = x86::noGpReg; + zsp = x86::noGpReg; + zbp = x86::noGpReg; + zsi = x86::noGpReg; + zdi = x86::noGpReg; - self->align(kAlignData, static_cast(pool.getAlignment())); - self->bind(label); + if (assembler != NULL) + attach(assembler); +} - EmbedNode* embedNode = self->embed(NULL, static_cast(pool.getSize())); - if (embedNode == NULL) - return kErrorNoHeapMemory; +X86Compiler::~X86Compiler() { + reset(true); +} - pool.fill(embedNode->getData()); - pool.reset(); - label.reset(); +// ============================================================================ +// [asmjit::X86Compiler - Attach / Reset] +// ============================================================================ + +Error X86Compiler::attach(Assembler* assembler) { + ASMJIT_ASSERT(assembler != NULL); + + if (_assembler != NULL) + return kErrorInvalidState; + + uint32_t arch = assembler->getArch(); + switch (arch) { +#if defined(ASMJIT_BUILD_X86) + case kArchX86: + _targetVarMapping = _x86VarMapping; + break; +#endif // ASMJIT_BUILD_X86 + +#if defined(ASMJIT_BUILD_X64) + case kArchX64: + _targetVarMapping = _x64VarMapping; + break; +#endif // ASMJIT_BUILD_X64 + + default: + return kErrorInvalidArch; + } + + assembler->_attached(this); + + _arch = static_cast(arch); + _regSize = static_cast(assembler->getRegSize()); + _regCount = static_cast(assembler)->getRegCount(); + _finalized = false; + + zax = static_cast(assembler)->zax; + zcx = static_cast(assembler)->zcx; + zdx = static_cast(assembler)->zdx; + zbx = static_cast(assembler)->zbx; + zsp = static_cast(assembler)->zsp; + zbp = static_cast(assembler)->zbp; + zsi = static_cast(assembler)->zsi; + zdi = static_cast(assembler)->zdi; return kErrorOk; } -// ============================================================================ -// [asmjit::X86Compiler - Construction / Destruction] -// ============================================================================ +void X86Compiler::reset(bool releaseMemory) { + Compiler::reset(releaseMemory); -X86Compiler::X86Compiler(Runtime* runtime, uint32_t arch) : - Compiler(runtime), - zax(NoInit), - zcx(NoInit), - zdx(NoInit), - zbx(NoInit), - zsp(NoInit), - zbp(NoInit), - zsi(NoInit), - zdi(NoInit) { - - setArch(arch); + _regCount.reset(); + zax = x86::noGpReg; + zcx = x86::noGpReg; + zdx = x86::noGpReg; + zbx = x86::noGpReg; + zsp = x86::noGpReg; + zbp = x86::noGpReg; + zsi = x86::noGpReg; + zdi = x86::noGpReg; } -X86Compiler::~X86Compiler() {} - // ============================================================================ -// [asmjit::X86Compiler - Arch] +// [asmjit::X86Compiler - Finalize] // ============================================================================ -Error X86Compiler::setArch(uint32_t arch) { -#if defined(ASMJIT_BUILD_X86) - if (arch == kArchX86) { - _arch = kArchX86; - _regSize = 4; - - _regCount.reset(); - _regCount._gp = 8; - _regCount._mm = 8; - _regCount._k = 8; - _regCount._xyz = 8; - - zax = x86::eax; - zcx = x86::ecx; - zdx = x86::edx; - zbx = x86::ebx; - zsp = x86::esp; - zbp = x86::ebp; - zsi = x86::esi; - zdi = x86::edi; - - _targetVarMapping = _x86VarMapping; +Error X86Compiler::finalize() { + X86Assembler* assembler = getAssembler(); + if (assembler == NULL) return kErrorOk; + + // Flush the global constant pool. + if (_globalConstPoolLabel.isInitialized()) { + embedConstPool(_globalConstPoolLabel, _globalConstPool); + + _globalConstPoolLabel.reset(); + _globalConstPool.reset(); } -#endif // ASMJIT_BUILD_X86 -#if defined(ASMJIT_BUILD_X64) - if (arch == kArchX64) { - _arch = kArchX64; - _regSize = 8; - - _regCount.reset(); - _regCount._gp = 16; - _regCount._mm = 8; - _regCount._k = 8; - _regCount._xyz = 16; - - zax = x86::rax; - zcx = x86::rcx; - zdx = x86::rdx; - zbx = x86::rbx; - zsp = x86::rsp; - zbp = x86::rbp; - zsi = x86::rsi; - zdi = x86::rdi; - - _targetVarMapping = _x64VarMapping; + if (_firstNode == NULL) return kErrorOk; - } -#endif // ASMJIT_BUILD_X64 - ASMJIT_ASSERT(!"Reached"); - return kErrorInvalidArgument; + X86Context context(this); + Error error = kErrorOk; + + HLNode* node = _firstNode; + HLNode* start; + + // Find all functions and use the `X86Context` to translate/emit them. + do { + start = node; + _resetTokenGenerator(); + + if (node->getType() == kHLNodeTypeFunc) { + node = static_cast(start)->getEnd(); + error = context.compile(static_cast(start)); + + if (error != kErrorOk) + break; + } + + do { + node = node->getNext(); + } while (node != NULL && node->getType() != kHLNodeTypeFunc); + + error = context.serialize(assembler, start, node); + context.cleanup(); + + if (error != kErrorOk) + break; + } while (node != NULL); + + reset(false); + return error; } // ============================================================================ @@ -787,50 +311,50 @@ Error X86Compiler::setArch(uint32_t arch) { //! Get compiler instruction item size without operands assigned. static ASMJIT_INLINE size_t X86Compiler_getInstSize(uint32_t code) { - return IntUtil::inInterval(code, _kX86InstIdJbegin, _kX86InstIdJend) ? sizeof(JumpNode) : sizeof(InstNode); + return Utils::inInterval(code, _kX86InstIdJbegin, _kX86InstIdJend) ? sizeof(HLJump) : sizeof(HLInst); } -static InstNode* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) { - if (IntUtil::inInterval(code, _kX86InstIdJbegin, _kX86InstIdJend)) { - JumpNode* node = new(p) JumpNode(self, code, options, opList, opCount); - TargetNode* jTarget = NULL; +static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) { + if (Utils::inInterval(code, _kX86InstIdJbegin, _kX86InstIdJend)) { + HLJump* node = new(p) HLJump(self, code, options, opList, opCount); + HLLabel* jTarget = NULL; if ((options & kInstOptionUnfollow) == 0) { if (opList[0].isLabel()) - jTarget = self->getTargetById(opList[0].getId()); + jTarget = self->getHLLabel(static_cast(opList[0])); else options |= kInstOptionUnfollow; } - node->orFlags(code == kX86InstIdJmp ? kNodeFlagIsJmp | kNodeFlagIsTaken : kNodeFlagIsJcc); + node->orFlags(code == kX86InstIdJmp ? kHLNodeFlagIsJmp | kHLNodeFlagIsTaken : kHLNodeFlagIsJcc); node->_target = jTarget; node->_jumpNext = NULL; if (jTarget) { - node->_jumpNext = static_cast(jTarget->_from); + node->_jumpNext = static_cast(jTarget->_from); jTarget->_from = node; jTarget->addNumRefs(); } // The 'jmp' is always taken, conditional jump can contain hint, we detect it. if (code == kX86InstIdJmp) - node->orFlags(kNodeFlagIsTaken); + node->orFlags(kHLNodeFlagIsTaken); else if (options & kInstOptionTaken) - node->orFlags(kNodeFlagIsTaken); + node->orFlags(kHLNodeFlagIsTaken); node->addOptions(options); return node; } else { - InstNode* node = new(p) InstNode(self, code, options, opList, opCount); + HLInst* node = new(p) HLInst(self, code, options, opList, opCount); node->addOptions(options); return node; } } -InstNode* X86Compiler::newInst(uint32_t code) { +HLInst* X86Compiler::newInst(uint32_t code) { size_t size = X86Compiler_getInstSize(code); - InstNode* inst = static_cast(_baseZone.alloc(size)); + HLInst* inst = static_cast(_zoneAllocator.alloc(size)); if (inst == NULL) goto _NoMemory; @@ -838,13 +362,13 @@ InstNode* X86Compiler::newInst(uint32_t code) { return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), NULL, 0); _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0) { size_t size = X86Compiler_getInstSize(code); - InstNode* inst = static_cast(_baseZone.alloc(size + 1 * sizeof(Operand))); + HLInst* inst = static_cast(_zoneAllocator.alloc(size + 1 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -852,18 +376,18 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0) { { Operand* opList = reinterpret_cast(reinterpret_cast(inst) + size); opList[0] = o0; - ASMJIT_ASSERT_UNINITIALIZED(o0); + ASMJIT_ASSERT_OPERAND(o0); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 1); } _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) { size_t size = X86Compiler_getInstSize(code); - InstNode* inst = static_cast(_baseZone.alloc(size + 2 * sizeof(Operand))); + HLInst* inst = static_cast(_zoneAllocator.alloc(size + 2 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -872,19 +396,19 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& Operand* opList = reinterpret_cast(reinterpret_cast(inst) + size); opList[0] = o0; opList[1] = o1; - ASMJIT_ASSERT_UNINITIALIZED(o0); - ASMJIT_ASSERT_UNINITIALIZED(o1); + ASMJIT_ASSERT_OPERAND(o0); + ASMJIT_ASSERT_OPERAND(o1); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 2); } _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { size_t size = X86Compiler_getInstSize(code); - InstNode* inst = static_cast(_baseZone.alloc(size + 3 * sizeof(Operand))); + HLInst* inst = static_cast(_zoneAllocator.alloc(size + 3 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -894,20 +418,20 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& opList[0] = o0; opList[1] = o1; opList[2] = o2; - ASMJIT_ASSERT_UNINITIALIZED(o0); - ASMJIT_ASSERT_UNINITIALIZED(o1); - ASMJIT_ASSERT_UNINITIALIZED(o2); + ASMJIT_ASSERT_OPERAND(o0); + ASMJIT_ASSERT_OPERAND(o1); + ASMJIT_ASSERT_OPERAND(o2); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 3); } _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { size_t size = X86Compiler_getInstSize(code); - InstNode* inst = static_cast(_baseZone.alloc(size + 4 * sizeof(Operand))); + HLInst* inst = static_cast(_zoneAllocator.alloc(size + 4 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -918,21 +442,21 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& opList[1] = o1; opList[2] = o2; opList[3] = o3; - ASMJIT_ASSERT_UNINITIALIZED(o0); - ASMJIT_ASSERT_UNINITIALIZED(o1); - ASMJIT_ASSERT_UNINITIALIZED(o2); - ASMJIT_ASSERT_UNINITIALIZED(o3); + ASMJIT_ASSERT_OPERAND(o0); + ASMJIT_ASSERT_OPERAND(o1); + ASMJIT_ASSERT_OPERAND(o2); + ASMJIT_ASSERT_OPERAND(o3); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 4); } _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) { size_t size = X86Compiler_getInstSize(code); - InstNode* inst = static_cast(_baseZone.alloc(size + 5 * sizeof(Operand))); + HLInst* inst = static_cast(_zoneAllocator.alloc(size + 5 * sizeof(Operand))); if (inst == NULL) goto _NoMemory; @@ -944,130 +468,130 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& opList[2] = o2; opList[3] = o3; opList[4] = o4; - ASMJIT_ASSERT_UNINITIALIZED(o0); - ASMJIT_ASSERT_UNINITIALIZED(o1); - ASMJIT_ASSERT_UNINITIALIZED(o2); - ASMJIT_ASSERT_UNINITIALIZED(o3); - ASMJIT_ASSERT_UNINITIALIZED(o4); + ASMJIT_ASSERT_OPERAND(o0); + ASMJIT_ASSERT_OPERAND(o1); + ASMJIT_ASSERT_OPERAND(o2); + ASMJIT_ASSERT_OPERAND(o3); + ASMJIT_ASSERT_OPERAND(o4); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 5); } _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -InstNode* X86Compiler::emit(uint32_t code) { - InstNode* node = newInst(code); +HLInst* X86Compiler::emit(uint32_t code) { + HLInst* node = newInst(code); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0) { - InstNode* node = newInst(code, o0); +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0) { + HLInst* node = newInst(code, o0); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1){ - InstNode* node = newInst(code, o0, o1); +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1){ + HLInst* node = newInst(code, o0, o1); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { - InstNode* node = newInst(code, o0, o1, o2); +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { + HLInst* node = newInst(code, o0, o1, o2); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3){ - InstNode* node = newInst(code, o0, o1, o2, o3); +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3){ + HLInst* node = newInst(code, o0, o1, o2, o3); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) { - InstNode* node = newInst(code, o0, o1, o2, o3, o4); +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) { + HLInst* node = newInst(code, o0, o1, o2, o3, o4); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, int o0_) { +HLInst* X86Compiler::emit(uint32_t code, int o0_) { Imm o0(o0_); - InstNode* node = newInst(code, o0); + HLInst* node = newInst(code, o0); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, uint64_t o0_) { +HLInst* X86Compiler::emit(uint32_t code, uint64_t o0_) { Imm o0(o0_); - InstNode* node = newInst(code, o0); + HLInst* node = newInst(code, o0); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) { Imm o1(o1_); - InstNode* node = newInst(code, o0, o1); + HLInst* node = newInst(code, o0, o1); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) { Imm o1(o1_); - InstNode* node = newInst(code, o0, o1); + HLInst* node = newInst(code, o0, o1); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2_) { Imm o2(o2_); - InstNode* node = newInst(code, o0, o1, o2); + HLInst* node = newInst(code, o0, o1, o2); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2_) { Imm o2(o2_); - InstNode* node = newInst(code, o0, o1, o2); + HLInst* node = newInst(code, o0, o1, o2); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3_) { Imm o3(o3_); - InstNode* node = newInst(code, o0, o1, o2, o3); + HLInst* node = newInst(code, o0, o1, o2, o3); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } -InstNode* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3_) { Imm o3(o3_); - InstNode* node = newInst(code, o0, o1, o2, o3); + HLInst* node = newInst(code, o0, o1, o2, o3); if (node == NULL) return NULL; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } // ============================================================================ // [asmjit::X86Compiler - Func] // ============================================================================ -X86FuncNode* X86Compiler::newFunc(uint32_t conv, const FuncPrototype& p) { +X86FuncNode* X86Compiler::newFunc(const FuncPrototype& p) { X86FuncNode* func = newNode(); Error error; @@ -1075,19 +599,16 @@ X86FuncNode* X86Compiler::newFunc(uint32_t conv, const FuncPrototype& p) { goto _NoMemory; // Create helper nodes. - func->_entryNode = newTarget(); - func->_exitNode = newTarget(); - func->_end = newNode(); + func->_entryNode = newLabelNode(); + func->_exitNode = newLabelNode(); + func->_end = newNode(); if (func->_entryNode == NULL || func->_exitNode == NULL || func->_end == NULL) goto _NoMemory; - // Emit push/pop sequence by default. - func->_funcHints |= IntUtil::mask(kX86FuncHintPushPop); - // Function prototype. - if ((error = func->_x86Decl.setPrototype(conv, p)) != kErrorOk) { - setError(error); + if ((error = func->_x86Decl.setPrototype(p)) != kErrorOk) { + setLastError(error); return NULL; } @@ -1102,55 +623,61 @@ X86FuncNode* X86Compiler::newFunc(uint32_t conv, const FuncPrototype& p) { func->_requiredStackAlignment = 0; // Allocate space for function arguments. - func->_argList = NULL; - if (func->getArgCount() != 0) { - func->_argList = _baseZone.allocT(func->getArgCount() * sizeof(VarData*)); - if (func->_argList == NULL) + func->_args = NULL; + if (func->getNumArgs() != 0) { + func->_args = _zoneAllocator.allocT(func->getNumArgs() * sizeof(VarData*)); + if (func->_args == NULL) goto _NoMemory; - ::memset(func->_argList, 0, func->getArgCount() * sizeof(VarData*)); + ::memset(func->_args, 0, func->getNumArgs() * sizeof(VarData*)); } return func; _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -X86FuncNode* X86Compiler::addFunc(uint32_t conv, const FuncPrototype& p) { - X86FuncNode* func = newFunc(conv, p); +X86FuncNode* X86Compiler::addFunc(const FuncPrototype& p) { + X86FuncNode* func = newFunc(p); if (func == NULL) { - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } ASMJIT_ASSERT(_func == NULL); _func = func; - addNode(func); - addNode(func->getEntryNode()); + addNode(func); // Add function node. + addNode(func->getEntryNode()); // Add function entry. + HLNode* cursor = getCursor(); + + addNode(func->getExitNode()); // Add function exit / epilog marker. + addNode(func->getEnd()); // Add function end. + setCursor(cursor); return func; } -EndNode* X86Compiler::endFunc() { +HLSentinel* X86Compiler::endFunc() { X86FuncNode* func = getFunc(); ASMJIT_ASSERT(func != NULL); - // App function exit / epilog marker. - addNode(func->getExitNode()); - // Add local constant pool at the end of the function (if exist). - X86Compiler_emitConstPool(this, _localConstPoolLabel, _localConstPool); + setCursor(func->getExitNode()); + + if (_localConstPoolLabel.isInitialized()) { + embedConstPool(_localConstPoolLabel, _localConstPool); + _localConstPoolLabel.reset(); + _localConstPool.reset(); + } - // Add function end marker. - addNode(func->getEnd()); - - // Finalize... + // Finalize. func->addFuncFlags(kFuncFlagIsFinished); _func = NULL; + setCursor(func->getEnd()); return func->getEnd(); } @@ -1158,29 +685,29 @@ EndNode* X86Compiler::endFunc() { // [asmjit::X86Compiler - Ret] // ============================================================================ -RetNode* X86Compiler::newRet(const Operand& o0, const Operand& o1) { - RetNode* node = newNode(o0, o1); +HLRet* X86Compiler::newRet(const Operand& o0, const Operand& o1) { + HLRet* node = newNode(o0, o1); if (node == NULL) goto _NoMemory; return node; _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -RetNode* X86Compiler::addRet(const Operand& o0, const Operand& o1) { - RetNode* node = newRet(o0, o1); +HLRet* X86Compiler::addRet(const Operand& o0, const Operand& o1) { + HLRet* node = newRet(o0, o1); if (node == NULL) return node; - return static_cast(addNode(node)); + return static_cast(addNode(node)); } // ============================================================================ // [asmjit::X86Compiler - Call] // ============================================================================ -X86CallNode* X86Compiler::newCall(const Operand& o0, uint32_t conv, const FuncPrototype& p) { +X86CallNode* X86Compiler::newCall(const Operand& o0, const FuncPrototype& p) { X86CallNode* node = newNode(o0); Error error; uint32_t nArgs; @@ -1188,16 +715,16 @@ X86CallNode* X86Compiler::newCall(const Operand& o0, uint32_t conv, const FuncPr if (node == NULL) goto _NoMemory; - if ((error = node->_x86Decl.setPrototype(conv, p)) != kErrorOk) { - setError(error); + if ((error = node->_x86Decl.setPrototype(p)) != kErrorOk) { + setLastError(error); return NULL; } // If there are no arguments skip the allocation. - if ((nArgs = p.getArgCount()) == 0) + if ((nArgs = p.getNumArgs()) == 0) return node; - node->_args = static_cast(_baseZone.alloc(nArgs * sizeof(Operand))); + node->_args = static_cast(_zoneAllocator.alloc(nArgs * sizeof(Operand))); if (node->_args == NULL) goto _NoMemory; @@ -1205,12 +732,12 @@ X86CallNode* X86Compiler::newCall(const Operand& o0, uint32_t conv, const FuncPr return node; _NoMemory: - setError(kErrorNoHeapMemory); + setLastError(kErrorNoHeapMemory); return NULL; } -X86CallNode* X86Compiler::addCall(const Operand& o0, uint32_t conv, const FuncPrototype& p) { - X86CallNode* node = newCall(o0, conv, p); +X86CallNode* X86Compiler::addCall(const Operand& o0, const FuncPrototype& p) { + X86CallNode* node = newCall(o0, p); if (node == NULL) return NULL; return static_cast(addNode(node)); @@ -1220,7 +747,7 @@ X86CallNode* X86Compiler::addCall(const Operand& o0, uint32_t conv, const FuncPr // [asmjit::X86Compiler - Vars] // ============================================================================ -Error X86Compiler::setArg(uint32_t argIndex, Var& var) { +Error X86Compiler::setArg(uint32_t argIndex, const Var& var) { X86FuncNode* func = getFunc(); if (func == NULL) @@ -1235,24 +762,32 @@ Error X86Compiler::setArg(uint32_t argIndex, Var& var) { return kErrorOk; } -Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name) { +Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name, va_list ap) { ASMJIT_ASSERT(vType < kX86VarTypeCount); - vType = _targetVarMapping[vType]; ASMJIT_ASSERT(vType != kInvalidVar); - // There is not ASSERT in release mode and this should be checked. + // The assertion won't be compiled in release build, however, we want to check + // this anyway. if (vType == kInvalidVar) { static_cast(var)->reset(); return kErrorInvalidArgument; } const X86VarInfo& vInfo = _x86VarInfo[vType]; - VarData* vd = _newVd(vType, vInfo.getSize(), vInfo.getClass(), name); + char buf[64]; + // Format the name if `ap` is given. + if (ap) { + vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), name, ap); + buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; + name = buf; + } + + VarData* vd = _newVd(vType, vInfo.getSize(), vInfo.getClass(), name); if (vd == NULL) { static_cast(var)->reset(); - return getError(); + return getLastError(); } var->_init_packed_op_sz_w0_id(kOperandTypeVar, vd->getSize(), vInfo.getReg() << 8, vd->getId()); @@ -1274,7 +809,7 @@ Error X86Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, co VarData* vd = _newVd(kInvalidVar, size, kInvalidReg, name); if (vd == NULL) { static_cast(mem)->reset(); - return getError(); + return getLastError(); } vd->_isStack = true; @@ -1313,9 +848,11 @@ Error X86Compiler::_newConst(BaseMem* mem, uint32_t scope, const void* data, siz goto _OnError; if (dstLabel->getId() == kInvalidValue) { - error = _newLabel(dstLabel); - if (error != kErrorOk) + *dstLabel = newLabel(); + if (!dstLabel->isInitialized()) { + error = kErrorNoHeapMemory; goto _OnError; + } } *static_cast(mem) = x86::ptr(*dstLabel, static_cast(offset), static_cast(size)); @@ -1325,80 +862,6 @@ _OnError: return error; } -// ============================================================================ -// [asmjit::X86Compiler - Make] -// ============================================================================ - -void* X86Compiler::make() { - Assembler* assembler = getAssembler(); - if (assembler == NULL) { - setError(kErrorNoHeapMemory); - return NULL; - } - - Error error = serialize(assembler); - if (error != kErrorOk) { - setError(error); - return NULL; - } - - void* result = assembler->make(); - return result; -} - -// ============================================================================ -// [asmjit::X86Compiler - Assembler] -// ============================================================================ - -Assembler* X86Compiler::_newAssembler() { - return new(std::nothrow) X86Assembler(_runtime, _arch); -} - -// ============================================================================ -// [asmjit::X86Compiler - Serialize] -// ============================================================================ - -Error X86Compiler::serialize(Assembler* assembler) { - // Flush the global constant pool. - X86Compiler_emitConstPool(this, _globalConstPoolLabel, _globalConstPool); - - if (_firstNode == NULL) - return kErrorOk; - - X86Context context(this); - Error error = kErrorOk; - - Node* node = _firstNode; - Node* start; - - // Find function and use the context to translate/emit. - do { - start = node; - - if (node->getType() == kNodeTypeFunc) { - node = static_cast(start)->getEnd(); - error = context.compile(static_cast(start)); - - if (error != kErrorOk) - goto _Error; - } - - do { - node = node->getNext(); - } while (node != NULL && node->getType() != kNodeTypeFunc); - - error = context.serialize(assembler, start, node); - if (error != kErrorOk) - goto _Error; - context.cleanup(); - } while (node != NULL); - return kErrorOk; - -_Error: - context.cleanup(); - return error; -} - } // asmjit namespace // [Api-End] diff --git a/src/asmjit/x86/x86compiler.h b/src/asmjit/x86/x86compiler.h index ac9daaf..1edb5fa 100644 --- a/src/asmjit/x86/x86compiler.h +++ b/src/asmjit/x86/x86compiler.h @@ -15,6 +15,7 @@ #include "../base/compiler.h" #include "../base/vectypes.h" #include "../x86/x86assembler.h" +#include "../x86/x86compilerfunc.h" // [Api-Begin] #include "../apibegin.h" @@ -27,427 +28,10 @@ namespace asmjit { struct X86CallNode; struct X86FuncNode; -struct X86VarState; //! \addtogroup asmjit_x86_compiler //! \{ -// ============================================================================ -// [asmjit::k86VarType] -// ============================================================================ - -//! X86/X64 variable type. -ASMJIT_ENUM(X86VarType) { - //! Variable is SP-FP (x87). - kX86VarTypeFp32 = kVarTypeFp32, - //! Variable is DP-FP (x87). - kX86VarTypeFp64 = kVarTypeFp64, - - //! Variable is Mm (MMX). - kX86VarTypeMm = 12, - - //! Variable is K (AVX512+) - kX86VarTypeK, - - //! Variable is Xmm (SSE+). - kX86VarTypeXmm, - //! Variable is a scalar Xmm SP-FP number. - kX86VarTypeXmmSs, - //! Variable is a packed Xmm SP-FP number (4 floats). - kX86VarTypeXmmPs, - //! Variable is a scalar Xmm DP-FP number. - kX86VarTypeXmmSd, - //! Variable is a packed Xmm DP-FP number (2 doubles). - kX86VarTypeXmmPd, - - //! Variable is Ymm (AVX+). - kX86VarTypeYmm, - //! Variable is a packed Ymm SP-FP number (8 floats). - kX86VarTypeYmmPs, - //! Variable is a packed Ymm DP-FP number (4 doubles). - kX86VarTypeYmmPd, - - //! Variable is Zmm (AVX512+). - kX86VarTypeZmm, - //! Variable is a packed Zmm SP-FP number (16 floats). - kX86VarTypeZmmPs, - //! Variable is a packed Zmm DP-FP number (8 doubles). - kX86VarTypeZmmPd, - - //! Count of variable types. - kX86VarTypeCount, - - //! \internal - //! \{ - _kX86VarTypeMmStart = kX86VarTypeMm, - _kX86VarTypeMmEnd = kX86VarTypeMm, - - _kX86VarTypeXmmStart = kX86VarTypeXmm, - _kX86VarTypeXmmEnd = kX86VarTypeXmmPd, - - _kX86VarTypeYmmStart = kX86VarTypeYmm, - _kX86VarTypeYmmEnd = kX86VarTypeYmmPd, - - _kX86VarTypeZmmStart = kX86VarTypeZmm, - _kX86VarTypeZmmEnd = kX86VarTypeZmmPd - //! \} -}; - -// ============================================================================ -// [asmjit::X86VarAttr] -// ============================================================================ - -//! X86/X64 VarAttr flags. -ASMJIT_ENUM(X86VarAttr) { - kX86VarAttrGpbLo = 0x10000000, - kX86VarAttrGpbHi = 0x20000000, - kX86VarAttrFld4 = 0x40000000, - kX86VarAttrFld8 = 0x80000000 -}; - -// ============================================================================ -// [asmjit::X86FuncConv] -// ============================================================================ - -//! X86 function calling conventions. -//! -//! Calling convention is scheme how function arguments are passed into -//! function and how functions returns values. In assembler programming -//! it's needed to always comply with function calling conventions, because -//! even small inconsistency can cause undefined behavior or crash. -//! -//! List of calling conventions for 32-bit x86 mode: -//! - `kX86FuncConvCDecl` - Calling convention for C runtime. -//! - `kX86FuncConvStdCall` - Calling convention for WinAPI functions. -//! - `kX86FuncConvMsThisCall` - Calling convention for C++ members under -//! Windows (produced by MSVC and all MSVC compatible compilers). -//! - `kX86FuncConvMsFastCall` - Fastest calling convention that can be used -//! by MSVC compiler. -//! - `kX86FuncConvBorlandFastCall` - Borland fastcall convention. -//! - `kX86FuncConvGccFastCall` - GCC fastcall convention (2 register arguments). -//! - `kX86FuncConvGccRegParm1` - GCC regparm(1) convention. -//! - `kX86FuncConvGccRegParm2` - GCC regparm(2) convention. -//! - `kX86FuncConvGccRegParm3` - GCC regparm(3) convention. -//! -//! List of calling conventions for 64-bit x86 mode (x64): -//! - `kX86FuncConvW64` - Windows 64-bit calling convention (WIN64 ABI). -//! - `kX86FuncConvU64` - Unix 64-bit calling convention (AMD64 ABI). -//! -//! There is also `kFuncConvHost` that is defined to fit the host calling -//! convention. -//! -//! These types are used together with `Compiler::addFunc()` method. -ASMJIT_ENUM(X86FuncConv) { - // -------------------------------------------------------------------------- - // [X64] - // -------------------------------------------------------------------------- - - //! X64 calling convention for Windows platform (WIN64 ABI). - //! - //! For first four arguments are used these registers: - //! - 1. 32/64-bit integer or floating point argument - rcx/xmm0 - //! - 2. 32/64-bit integer or floating point argument - rdx/xmm1 - //! - 3. 32/64-bit integer or floating point argument - r8/xmm2 - //! - 4. 32/64-bit integer or floating point argument - r9/xmm3 - //! - //! Note first four arguments here means arguments at positions from 1 to 4 - //! (included). For example if second argument is not passed in register then - //! rdx/xmm1 register is unused. - //! - //! All other arguments are pushed on the stack in right-to-left direction. - //! Stack is aligned by 16 bytes. There is 32-byte shadow space on the stack - //! that can be used to save up to four 64-bit registers (probably designed to - //! be used to save first four arguments passed in registers). - //! - //! Arguments direction: - //! - Right to Left (except for first 4 parameters that's in registers) - //! - //! Stack is cleaned by: - //! - Caller. - //! - //! Return value: - //! - Integer types - Rax register. - //! - Floating points - Xmm0 register. - //! - //! Stack is always aligned by 16 bytes. - //! - //! More information about this calling convention can be found on MSDN: - //! http://msdn.microsoft.com/en-us/library/9b372w95.aspx . - kX86FuncConvW64 = 1, - - //! X64 calling convention for Unix platforms (AMD64 ABI). - //! - //! First six 32 or 64-bit integer arguments are passed in rdi, rsi, rdx, - //! rcx, r8, r9 registers. First eight floating point or Xmm arguments - //! are passed in xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 registers. - //! This means that in registers can be transferred up to 14 arguments total. - //! - //! There is also RED ZONE below the stack pointer that can be used for - //! temporary storage. The red zone is the space from [rsp-128] to [rsp-8]. - //! - //! Arguments direction: - //! - Right to Left (Except for arguments passed in registers). - //! - //! Stack is cleaned by: - //! - Caller. - //! - //! Return value: - //! - Integer types - Rax register. - //! - Floating points - Xmm0 register. - //! - //! Stack is always aligned by 16 bytes. - kX86FuncConvU64 = 2, - - // -------------------------------------------------------------------------- - // [X86] - // -------------------------------------------------------------------------- - - //! Cdecl calling convention (used by C runtime). - //! - //! Compatible across MSVC and GCC. - //! - //! Arguments direction: - //! - Right to Left - //! - //! Stack is cleaned by: - //! - Caller. - kX86FuncConvCDecl = 3, - - //! Stdcall calling convention (used by WinAPI). - //! - //! Compatible across MSVC and GCC. - //! - //! Arguments direction: - //! - Right to Left - //! - //! Stack is cleaned by: - //! - Callee. - //! - //! Return value: - //! - Integer types - EAX:EDX registers. - //! - Floating points - fp0 register. - kX86FuncConvStdCall = 4, - - //! MSVC specific calling convention used by MSVC/Intel compilers - //! for struct/class methods. - //! - //! This is MSVC (and Intel) only calling convention used in Windows - //! world for C++ class methods. Implicit 'this' pointer is stored in - //! ECX register instead of storing it on the stack. - //! - //! Arguments direction: - //! - Right to Left (except this pointer in ECX) - //! - //! Stack is cleaned by: - //! - Callee. - //! - //! Return value: - //! - Integer types - EAX:EDX registers. - //! - Floating points - fp0 register. - //! - //! C++ class methods that have variable count of arguments uses different - //! calling convention called cdecl. - //! - //! \note This calling convention is always used by MSVC for class methods, - //! it's implicit and there is no way how to override it. - kX86FuncConvMsThisCall = 5, - - //! MSVC specific fastcall. - //! - //! Two first parameters (evaluated from left-to-right) are in ECX:EDX - //! registers, all others on the stack in right-to-left order. - //! - //! Arguments direction: - //! - Right to Left (except to first two integer arguments in ECX:EDX) - //! - //! Stack is cleaned by: - //! - Callee. - //! - //! Return value: - //! - Integer types - EAX:EDX registers. - //! - Floating points - fp0 register. - //! - //! \note This calling convention differs to GCC one in stack cleaning - //! mechanism. - kX86FuncConvMsFastCall = 6, - - //! Borland specific fastcall with 2 parameters in registers. - //! - //! Two first parameters (evaluated from left-to-right) are in ECX:EDX - //! registers, all others on the stack in left-to-right order. - //! - //! Arguments direction: - //! - Left to Right (except to first two integer arguments in ECX:EDX) - //! - //! Stack is cleaned by: - //! - Callee. - //! - //! Return value: - //! - Integer types - EAX:EDX registers. - //! - Floating points - fp0 register. - //! - //! \note Arguments on the stack are in left-to-right order that differs - //! to other fastcall conventions used in different compilers. - kX86FuncConvBorlandFastCall = 7, - - //! GCC specific fastcall convention. - //! - //! Two first parameters (evaluated from left-to-right) are in ECX:EDX - //! registers, all others on the stack in right-to-left order. - //! - //! Arguments direction: - //! - Right to Left (except to first two integer arguments in ECX:EDX) - //! - //! Stack is cleaned by: - //! - Callee. - //! - //! Return value: - //! - Integer types - EAX:EDX registers. - //! - Floating points - fp0 register. - //! - //! \note This calling convention should be compatible with `kX86FuncConvMsFastCall`. - kX86FuncConvGccFastCall = 8, - - //! GCC specific regparm(1) convention. - //! - //! The first parameter (evaluated from left-to-right) is in EAX register, - //! all others on the stack in right-to-left order. - //! - //! Arguments direction: - //! - Right to Left (except to first one integer argument in EAX) - //! - //! Stack is cleaned by: - //! - Caller. - //! - //! Return value: - //! - Integer types - EAX:EDX registers. - //! - Floating points - fp0 register. - kX86FuncConvGccRegParm1 = 9, - - //! GCC specific regparm(2) convention. - //! - //! Two first parameters (evaluated from left-to-right) are in EAX:EDX - //! registers, all others on the stack in right-to-left order. - //! - //! Arguments direction: - //! - Right to Left (except to first two integer arguments in EAX:EDX) - //! - //! Stack is cleaned by: - //! - Caller. - //! - //! Return value: - //! - Integer types - EAX:EDX registers. - //! - Floating points - fp0 register. - kX86FuncConvGccRegParm2 = 10, - - //! GCC specific fastcall with 3 parameters in registers. - //! - //! Three first parameters (evaluated from left-to-right) are in - //! EAX:EDX:ECX registers, all others on the stack in right-to-left order. - //! - //! Arguments direction: - //! - Right to Left (except to first three integer arguments in EAX:EDX:ECX) - //! - //! Stack is cleaned by: - //! - Caller. - //! - //! Return value: - //! - Integer types - EAX:EDX registers. - //! - Floating points - fp0 register. - kX86FuncConvGccRegParm3 = 11, - - //! \internal - //! - //! Count of function calling conventions. - _kX86FuncConvCount = 12 -}; - -#if !defined(ASMJIT_DOCGEN) -// X86/X64 Host Support - documented in base/compiler.h. -#if defined(ASMJIT_ARCH_X86) -enum { - // X86. - kFuncConvHost = kX86FuncConvCDecl, - kFuncConvHostCDecl = kX86FuncConvCDecl, - kFuncConvHostStdCall = kX86FuncConvStdCall, -#if defined(_MSC_VER) - kFuncConvHostFastCall = kX86FuncConvMsFastCall -#elif defined(__GNUC__) - kFuncConvHostFastCall = kX86FuncConvGccFastCall -#elif defined(__BORLANDC__) - kFuncConvHostFastCall = kX86FuncConvBorlandFastCall -#else -#error "AsmJit - kFuncConvHostFastCall not determined." -#endif -}; -#endif // ASMJIT_ARCH_X86 - -#if defined(ASMJIT_ARCH_X64) -enum { -#if defined(ASMJIT_OS_WINDOWS) || defined(ASMJIT_OS_CYGWIN) - kFuncConvHost = kX86FuncConvW64, -#else - kFuncConvHost = kX86FuncConvU64, -#endif - kFuncConvHostCDecl = kFuncConvHost, - kFuncConvHostStdCall = kFuncConvHost, - kFuncConvHostFastCall = kFuncConvHost -}; -#endif // ASMJIT_ARCH_X64 -#endif // !ASMJIT_DOCGEN - -// ============================================================================ -// [asmjit::X86FuncHint] -// ============================================================================ - -//! X86 function hints. -ASMJIT_ENUM(X86FuncHint) { - //! Use push/pop sequences instead of mov sequences in function prolog - //! and epilog. - kX86FuncHintPushPop = 16, - //! Add emms instruction to the function epilog. - kX86FuncHintEmms = 17, - //! Add sfence instruction to the function epilog. - kX86FuncHintSFence = 18, - //! Add lfence instruction to the function epilog. - kX86FuncHintLFence = 19 -}; - -// ============================================================================ -// [asmjit::X86FuncFlags] -// ============================================================================ - -//! X86 function flags. -ASMJIT_ENUM(X86FuncFlags) { - //! Whether to emit register load/save sequence using push/pop pairs. - kX86FuncFlagPushPop = 0x00010000, - - //! Whether to emit `enter` instead of three instructions in case - //! that the function is not naked or misaligned. - kX86FuncFlagEnter = 0x00020000, - - //! Whether to emit `leave` instead of two instructions in case - //! that the function is not naked or misaligned. - kX86FuncFlagLeave = 0x00040000, - - //! Whether it's required to move arguments to a new stack location, - //! because of manual aligning. - kX86FuncFlagMoveArgs = 0x00080000, - - //! Whether to emit `emms` instruction in epilog (auto-detected). - kX86FuncFlagEmms = 0x01000000, - - //! Whether to emit `sfence` instruction in epilog (auto-detected). - //! - //! `kX86FuncFlagSFence` with `kX86FuncFlagLFence` results in emitting `mfence`. - kX86FuncFlagSFence = 0x02000000, - - //! Whether to emit `lfence` instruction in epilog (auto-detected). - //! - //! `kX86FuncFlagSFence` with `kX86FuncFlagLFence` results in emitting `mfence`. - kX86FuncFlagLFence = 0x04000000 -}; - // ============================================================================ // [asmjit::X86VarInfo] // ============================================================================ @@ -488,7 +72,7 @@ struct X86VarInfo { }; //! \internal -ASMJIT_VAR const X86VarInfo _x86VarInfo[]; +ASMJIT_VARAPI const X86VarInfo _x86VarInfo[]; #if defined(ASMJIT_BUILD_X86) //! \internal @@ -500,7 +84,7 @@ ASMJIT_VAR const X86VarInfo _x86VarInfo[]; //! - `kVarTypeUInt64` to `kInvalidVar`. //! - `kVarTypeIntPtr` to `kVarTypeInt32`. //! - `kVarTypeUIntPtr` to `kVarTypeUInt32`. -ASMJIT_VAR const uint8_t _x86VarMapping[kX86VarTypeCount]; +ASMJIT_VARAPI const uint8_t _x86VarMapping[kX86VarTypeCount]; #endif // ASMJIT_BUILD_X86 #if defined(ASMJIT_BUILD_X64) @@ -511,741 +95,15 @@ ASMJIT_VAR const uint8_t _x86VarMapping[kX86VarTypeCount]; //! This mapping translates the following: //! - `kVarTypeIntPtr` to `kVarTypeInt64`. //! - `kVarTypeUIntPtr` to `kVarTypeUInt64`. -ASMJIT_VAR const uint8_t _x64VarMapping[kX86VarTypeCount]; +ASMJIT_VARAPI const uint8_t _x64VarMapping[kX86VarTypeCount]; #endif // ASMJIT_BUILD_X64 -// ============================================================================ -// [asmjit::X86Var] -// ============================================================================ - -//! Base class for all X86 variables. -struct X86Var : public Var { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE X86Var() : Var(NoInit) { - reset(); - } - - ASMJIT_INLINE X86Var(const X86Var& other) : Var(other) {} - - explicit ASMJIT_INLINE X86Var(const _NoInit&) : Var(NoInit) {} - - // -------------------------------------------------------------------------- - // [X86Var Specific] - // -------------------------------------------------------------------------- - - //! Clone X86Var operand. - ASMJIT_INLINE X86Var clone() const { - return X86Var(*this); - } - - // -------------------------------------------------------------------------- - // [Type] - // -------------------------------------------------------------------------- - - //! Get register type. - ASMJIT_INLINE uint32_t getRegType() const { return _vreg.type; } - //! Get variable type. - ASMJIT_INLINE uint32_t getVarType() const { return _vreg.vType; } - - //! Get whether the variable is Gp register. - ASMJIT_INLINE bool isGp() const { return _vreg.type <= kX86RegTypeGpq; } - //! Get whether the variable is Gpb (8-bit) register. - ASMJIT_INLINE bool isGpb() const { return _vreg.type <= kX86RegTypeGpbHi; } - //! Get whether the variable is Gpb-lo (8-bit) register. - ASMJIT_INLINE bool isGpbLo() const { return _vreg.type == kX86RegTypeGpbLo; } - //! Get whether the variable is Gpb-hi (8-bit) register. - ASMJIT_INLINE bool isGpbHi() const { return _vreg.type == kX86RegTypeGpbHi; } - //! Get whether the variable is Gpw (16-bit) register. - ASMJIT_INLINE bool isGpw() const { return _vreg.type == kX86RegTypeGpw; } - //! Get whether the variable is Gpd (32-bit) register. - ASMJIT_INLINE bool isGpd() const { return _vreg.type == kX86RegTypeGpd; } - //! Get whether the variable is Gpq (64-bit) register. - ASMJIT_INLINE bool isGpq() const { return _vreg.type == kX86RegTypeGpq; } - - //! Get whether the variable is Mm (64-bit) register. - ASMJIT_INLINE bool isMm() const { return _vreg.type == kX86RegTypeMm; } - //! Get whether the variable is K (64-bit) register. - ASMJIT_INLINE bool isK() const { return _vreg.type == kX86RegTypeK; } - - //! Get whether the variable is Xmm (128-bit) register. - ASMJIT_INLINE bool isXmm() const { return _vreg.type == kX86RegTypeXmm; } - //! Get whether the variable is Ymm (256-bit) register. - ASMJIT_INLINE bool isYmm() const { return _vreg.type == kX86RegTypeYmm; } - //! Get whether the variable is Zmm (512-bit) register. - ASMJIT_INLINE bool isZmm() const { return _vreg.type == kX86RegTypeZmm; } - - // -------------------------------------------------------------------------- - // [Memory Cast] - // -------------------------------------------------------------------------- - - //! Cast this variable to a memory operand. - //! - //! \note Size of operand depends on native variable type, you can use other - //! variants if you want specific one. - ASMJIT_INLINE X86Mem m(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, getSize()); - } - - //! \overload - ASMJIT_INLINE X86Mem m(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, getSize()); - } - - //! Cast this variable to 8-bit memory operand. - ASMJIT_INLINE X86Mem m8(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, 1); - } - - //! \overload - ASMJIT_INLINE X86Mem m8(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 1); - } - - //! Cast this variable to 16-bit memory operand. - ASMJIT_INLINE X86Mem m16(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, 2); - } - - //! \overload - ASMJIT_INLINE X86Mem m16(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 2); - } - - //! Cast this variable to 32-bit memory operand. - ASMJIT_INLINE X86Mem m32(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, 4); - } - - //! \overload - ASMJIT_INLINE X86Mem m32(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 4); - } - - //! Cast this variable to 64-bit memory operand. - ASMJIT_INLINE X86Mem m64(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, 8); - } - - //! \overload - ASMJIT_INLINE X86Mem m64(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 8); - } - - //! Cast this variable to 80-bit memory operand (long double). - ASMJIT_INLINE X86Mem m80(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, 10); - } - - //! \overload - ASMJIT_INLINE X86Mem m80(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 10); - } - - //! Cast this variable to 128-bit memory operand. - ASMJIT_INLINE X86Mem m128(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, 16); - } - - //! \overload - ASMJIT_INLINE X86Mem m128(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 16); - } - - //! Cast this variable to 256-bit memory operand. - ASMJIT_INLINE X86Mem m256(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, 32); - } - - //! \overload - ASMJIT_INLINE X86Mem m256(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 32); - } - - //! Cast this variable to 256-bit memory operand. - ASMJIT_INLINE X86Mem m512(int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, disp, 64); - } - - //! \overload - ASMJIT_INLINE X86Mem m512(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { - return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 64); - } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE X86Var& operator=(const X86Var& other) { - _copy(other); - return *this; - } - - ASMJIT_INLINE bool operator==(const X86Var& other) const { - return _packed[0] == other._packed[0]; - } - - ASMJIT_INLINE bool operator!=(const X86Var& other) const { - return _packed[0] != other._packed[0]; - } - - // -------------------------------------------------------------------------- - // [Private] - // -------------------------------------------------------------------------- - -protected: - ASMJIT_INLINE X86Var(const X86Var& other, uint32_t reg, uint32_t size) : Var(NoInit) { - _init_packed_op_sz_w0_id(kOperandTypeVar, size, (reg << 8) + other._vreg.index, other._base.id); - _vreg.vType = other._vreg.vType; - } -}; - -// ============================================================================ -// [asmjit::X86GpVar] -// ============================================================================ - -//! Gp variable. -struct X86GpVar : public X86Var { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - -protected: - ASMJIT_INLINE X86GpVar(const X86GpVar& other, uint32_t reg, uint32_t size) : X86Var(other, reg, size) {} - -public: - //! Create a new uninitialized `X86GpVar` instance. - ASMJIT_INLINE X86GpVar() : X86Var() {} - - //! Create a new initialized `X86GpVar` instance. - ASMJIT_INLINE X86GpVar(Compiler& c, uint32_t type = kVarTypeIntPtr, const char* name = NULL) : X86Var(NoInit) { - c._newVar(this, type, name); - } - - //! Create a clone of `other`. - ASMJIT_INLINE X86GpVar(const X86GpVar& other) : X86Var(other) {} - - //! Create a new uninitialized `X86GpVar` instance (internal). - explicit ASMJIT_INLINE X86GpVar(const _NoInit&) : X86Var(NoInit) {} - - // -------------------------------------------------------------------------- - // [X86GpVar Specific] - // -------------------------------------------------------------------------- - - //! Clone X86GpVar operand. - ASMJIT_INLINE X86GpVar clone() const { - return X86GpVar(*this); - } - - //! Reset X86GpVar operand. - ASMJIT_INLINE void reset() { - X86Var::reset(); - } - - // -------------------------------------------------------------------------- - // [X86GpVar Cast] - // -------------------------------------------------------------------------- - - //! Cast this variable to 8-bit (LO) part of variable. - ASMJIT_INLINE X86GpVar r8() const { return X86GpVar(*this, kX86RegTypeGpbLo, 1); } - //! Cast this variable to 8-bit (LO) part of variable. - ASMJIT_INLINE X86GpVar r8Lo() const { return X86GpVar(*this, kX86RegTypeGpbLo, 1); } - //! Cast this variable to 8-bit (HI) part of variable. - ASMJIT_INLINE X86GpVar r8Hi() const { return X86GpVar(*this, kX86RegTypeGpbHi, 1); } - - //! Cast this variable to 16-bit part of variable. - ASMJIT_INLINE X86GpVar r16() const { return X86GpVar(*this, kX86RegTypeGpw, 2); } - //! Cast this variable to 32-bit part of variable. - ASMJIT_INLINE X86GpVar r32() const { return X86GpVar(*this, kX86RegTypeGpd, 4); } - //! Cast this variable to 64-bit part of variable. - ASMJIT_INLINE X86GpVar r64() const { return X86GpVar(*this, kX86RegTypeGpq, 8); } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE X86GpVar& operator=(const X86GpVar& other) { _copy(other); return *this; } - - ASMJIT_INLINE bool operator==(const X86GpVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86GpVar& other) const { return X86Var::operator!=(other); } -}; - -// ============================================================================ -// [asmjit::X86MmVar] -// ============================================================================ - -//! Mm variable. -struct X86MmVar : public X86Var { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new uninitialized `X86MmVar` instance. - ASMJIT_INLINE X86MmVar() : X86Var() {} - //! Create a new initialized `X86MmVar` instance. - ASMJIT_INLINE X86MmVar(Compiler& c, uint32_t type = kX86VarTypeMm, const char* name = NULL) : X86Var(NoInit) { - c._newVar(this, type, name); - } - - //! Create a clone of `other`. - ASMJIT_INLINE X86MmVar(const X86MmVar& other) : X86Var(other) {} - - //! Create a new uninitialized `X86MmVar` instance (internal). - explicit ASMJIT_INLINE X86MmVar(const _NoInit&) : X86Var(NoInit) {} - - // -------------------------------------------------------------------------- - // [X86MmVar Specific] - // -------------------------------------------------------------------------- - - //! Clone X86MmVar operand. - ASMJIT_INLINE X86MmVar clone() const { - return X86MmVar(*this); - } - - //! Reset X86MmVar operand. - ASMJIT_INLINE void reset() { - X86Var::reset(); - } - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE X86MmVar& operator=(const X86MmVar& other) { _copy(other); return *this; } - - ASMJIT_INLINE bool operator==(const X86MmVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86MmVar& other) const { return X86Var::operator!=(other); } -}; - -// ============================================================================ -// [asmjit::X86XmmVar] -// ============================================================================ - -//! Xmm variable. -struct X86XmmVar : public X86Var { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new uninitialized `X86XmmVar` instance. - ASMJIT_INLINE X86XmmVar() : X86Var() {} - //! Create a new initialized `X86XmmVar` instance. - ASMJIT_INLINE X86XmmVar(Compiler& c, uint32_t type = kX86VarTypeXmm, const char* name = NULL) : X86Var(NoInit) { - c._newVar(this, type, name); - } - - //! Create a clone of `other`. - ASMJIT_INLINE X86XmmVar(const X86XmmVar& other) : X86Var(other) {} - - //! Create a new uninitialized `X86XmmVar` instance (internal). - explicit ASMJIT_INLINE X86XmmVar(const _NoInit&) : X86Var(NoInit) {} - - // -------------------------------------------------------------------------- - // [X86XmmVar Specific] - // -------------------------------------------------------------------------- - - //! Clone X86XmmVar operand. - ASMJIT_INLINE X86XmmVar clone() const { - return X86XmmVar(*this); - } - - //! Reset X86XmmVar operand. - ASMJIT_INLINE void reset() { - X86Var::reset(); - } - - // -------------------------------------------------------------------------- - // [X86XmmVar Cast] - // -------------------------------------------------------------------------- - - // TODO: - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE X86XmmVar& operator=(const X86XmmVar& other) { _copy(other); return *this; } - - ASMJIT_INLINE bool operator==(const X86XmmVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86XmmVar& other) const { return X86Var::operator!=(other); } -}; - -// ============================================================================ -// [asmjit::X86YmmVar] -// ============================================================================ - -//! Ymm variable. -struct X86YmmVar : public X86Var { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new uninitialized `X86YmmVar` instance. - ASMJIT_INLINE X86YmmVar() : X86Var() {} - //! Create a new initialized `X86YmmVar` instance. - ASMJIT_INLINE X86YmmVar(Compiler& c, uint32_t type = kX86VarTypeYmm, const char* name = NULL) : X86Var(NoInit) { - c._newVar(this, type, name); - } - - //! Create a clone of `other`. - ASMJIT_INLINE X86YmmVar(const X86YmmVar& other) : X86Var(other) {} - - //! Create a new uninitialized `X86YmmVar` instance (internal). - explicit ASMJIT_INLINE X86YmmVar(const _NoInit&) : X86Var(NoInit) {} - - // -------------------------------------------------------------------------- - // [X86YmmVar Specific] - // -------------------------------------------------------------------------- - - //! Clone X86YmmVar operand. - ASMJIT_INLINE X86YmmVar clone() const { - return X86YmmVar(*this); - } - - //! Reset X86YmmVar operand. - ASMJIT_INLINE void reset() { - X86Var::reset(); - } - - // -------------------------------------------------------------------------- - // [X86YmmVar Cast] - // -------------------------------------------------------------------------- - - // TODO: - - // -------------------------------------------------------------------------- - // [Operator Overload] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE X86YmmVar& operator=(const X86YmmVar& other) { _copy(other); return *this; } - - ASMJIT_INLINE bool operator==(const X86YmmVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86YmmVar& other) const { return X86Var::operator!=(other); } -}; - -// ============================================================================ -// [asmjit::X86VarMap] -// ============================================================================ - -struct X86VarMap : public VarMap { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get variable-attributes list as VarAttr data. - ASMJIT_INLINE VarAttr* getVaList() const { - return const_cast(_list); - } - - //! Get variable-attributes list as VarAttr data (by class). - ASMJIT_INLINE VarAttr* getVaListByClass(uint32_t c) const { - return const_cast(_list) + _start.get(c); - } - - //! Get position of variables (by class). - ASMJIT_INLINE uint32_t getVaStart(uint32_t c) const { - return _start.get(c); - } - - //! Get count of variables (by class). - ASMJIT_INLINE uint32_t getVaCountByClass(uint32_t c) const { - return _count.get(c); - } - - //! Get VarAttr at `index`. - ASMJIT_INLINE VarAttr* getVa(uint32_t index) const { - ASMJIT_ASSERT(index < _vaCount); - return getVaList() + index; - } - - //! Get VarAttr of `c` class at `index`. - ASMJIT_INLINE VarAttr* getVaByClass(uint32_t c, uint32_t index) const { - ASMJIT_ASSERT(index < _count._regs[c]); - return getVaListByClass(c) + index; - } - - // -------------------------------------------------------------------------- - // [Utils] - // -------------------------------------------------------------------------- - - //! Find VarAttr. - ASMJIT_INLINE VarAttr* findVa(VarData* vd) const { - VarAttr* list = getVaList(); - uint32_t count = getVaCount(); - - for (uint32_t i = 0; i < count; i++) - if (list[i].getVd() == vd) - return &list[i]; - - return NULL; - } - - //! Find VarAttr (by class). - ASMJIT_INLINE VarAttr* findVaByClass(uint32_t c, VarData* vd) const { - VarAttr* list = getVaListByClass(c); - uint32_t count = getVaCountByClass(c); - - for (uint32_t i = 0; i < count; i++) - if (list[i].getVd() == vd) - return &list[i]; - - return NULL; - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Special registers on input. - //! - //! Special register(s) restricted to one or more physical register. If there - //! is more than one special register it means that we have to duplicate the - //! variable content to all of them (it means that the same varible was used - //! by two or more operands). We forget about duplicates after the register - //! allocation finishes and marks all duplicates as non-assigned. - X86RegMask _inRegs; - - //! Special registers on output. - //! - //! Special register(s) used on output. Each variable can have only one - //! special register on the output, 'X86VarMap' contains all registers from - //! all 'VarAttr's. - X86RegMask _outRegs; - - //! Clobbered registers (by a function call). - X86RegMask _clobberedRegs; - - //! Start indexes of variables per register class. - X86RegCount _start; - //! Count of variables per register class. - X86RegCount _count; - - //! VarAttr list. - VarAttr _list[1]; -}; - -// ============================================================================ -// [asmjit::X86StateCell] -// ============================================================================ - -//! X86/X64 state-cell. -union X86StateCell { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE uint32_t getState() const { - return _state; - } - - ASMJIT_INLINE void setState(uint32_t state) { - _state = static_cast(state); - } - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void reset() { _packed = 0; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - uint8_t _packed; - - struct { - uint8_t _state : 2; - uint8_t _unused : 6; - }; -}; - -// ============================================================================ -// [asmjit::X86VarState] -// ============================================================================ - -//! X86/X64 state. -struct X86VarState : VarState { - enum { - //! Base index of Gp registers. - kGpIndex = 0, - //! Count of Gp registers. - kGpCount = 16, - - //! Base index of Mm registers. - kMmIndex = kGpIndex + kGpCount, - //! Count of Mm registers. - kMmCount = 8, - - //! Base index of Xmm registers. - kXmmIndex = kMmIndex + kMmCount, - //! Count of Xmm registers. - kXmmCount = 16, - - //! Count of all registers in `X86VarState`. - kAllCount = kXmmIndex + kXmmCount - }; - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE VarData** getList() { - return _list; - } - - ASMJIT_INLINE VarData** getListByClass(uint32_t c) { - switch (c) { - case kX86RegClassGp : return _listGp; - case kX86RegClassMm : return _listMm; - case kX86RegClassXyz: return _listXmm; - - default: - return NULL; - } - } - - // -------------------------------------------------------------------------- - // [Clear] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE void reset(size_t numCells) { - ::memset(this, 0, kAllCount * sizeof(VarData*) + - 2 * sizeof(X86RegMask) + - numCells * sizeof(X86StateCell)); - } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - union { - //! List of all allocated variables in one array. - VarData* _list[kAllCount]; - - struct { - //! Allocated Gp registers. - VarData* _listGp[kGpCount]; - //! Allocated Mm registers. - VarData* _listMm[kMmCount]; - //! Allocated Xmm registers. - VarData* _listXmm[kXmmCount]; - }; - }; - - //! Occupied registers (mask). - X86RegMask _occupied; - //! Modified registers (mask). - X86RegMask _modified; - - //! Variables data, the length is stored in `X86Context`. - X86StateCell _cells[1]; -}; - -// ============================================================================ -// [asmjit::X86FuncDecl] -// ============================================================================ - -//! X86 function, including calling convention, arguments and their -//! register indices or stack positions. -struct X86FuncDecl : public FuncDecl { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new `X86FuncDecl` instance. - ASMJIT_INLINE X86FuncDecl() { - reset(); - } - - // -------------------------------------------------------------------------- - // [Accessors - X86] - // -------------------------------------------------------------------------- - - //! Get used registers (mask). - //! - //! \note The result depends on the function calling convention AND the - //! function prototype. Returned mask contains only registers actually used - //! to pass function arguments. - ASMJIT_INLINE uint32_t getUsed(uint32_t c) const { - return _used.get(c); - } - - //! Get passed registers (mask). - //! - //! \note The result depends on the function calling convention used; the - //! prototype of the function doesn't affect the mask returned. - ASMJIT_INLINE uint32_t getPassed(uint32_t c) const { - return _passed.get(c); - } - - //! Get preserved registers (mask). - //! - //! \note The result depends on the function calling convention used; the - //! prototype of the function doesn't affect the mask returned. - ASMJIT_INLINE uint32_t getPreserved(uint32_t c) const { - return _preserved.get(c); - } - - //! Get ther order of passed registers (Gp). - //! - //! \note The result depends on the function calling convention used; the - //! prototype of the function doesn't affect the mask returned. - ASMJIT_INLINE const uint8_t* getPassedOrderGp() const { - return _passedOrderGp; - } - - //! Get ther order of passed registers (Xmm). - //! - //! \note The result depends on the function calling convention used; the - //! prototype of the function doesn't affect the mask returned. - ASMJIT_INLINE const uint8_t* getPassedOrderXmm() const { - return _passedOrderXmm; - } - - // -------------------------------------------------------------------------- - // [SetPrototype] - // -------------------------------------------------------------------------- - - //! Set function prototype. - //! - //! This will set function calling convention and setup arguments variables. - //! - //! \note This function will allocate variables, it can be called only once. - ASMJIT_API Error setPrototype(uint32_t conv, const FuncPrototype& p); - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - ASMJIT_API void reset(); - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Used registers. - X86RegMask _used; - - //! Passed registers (defined by the calling convention). - X86RegMask _passed; - //! Preserved registers (defined by the calling convention). - X86RegMask _preserved; - - //! Order of registers defined to pass function arguments (Gp). - uint8_t _passedOrderGp[8]; - //! Order of registers defined to pass function arguments (Xmm). - uint8_t _passedOrderXmm[8]; -}; - // ============================================================================ // [asmjit::X86FuncNode] // ============================================================================ //! X86/X64 function node. -struct X86FuncNode : public FuncNode { +struct X86FuncNode : public HLFunc { ASMJIT_NO_COPY(X86FuncNode) // -------------------------------------------------------------------------- @@ -1253,7 +111,7 @@ struct X86FuncNode : public FuncNode { // -------------------------------------------------------------------------- //! Create a new `X86FuncNode` instance. - ASMJIT_INLINE X86FuncNode(Compiler* compiler) : FuncNode(compiler) { + ASMJIT_INLINE X86FuncNode(Compiler* compiler) : HLFunc(compiler) { _decl = &_x86Decl; _saveRestoreRegs.reset(); @@ -1284,12 +142,12 @@ struct X86FuncNode : public FuncNode { //! Get argument. ASMJIT_INLINE VarData* getArg(uint32_t i) const { - ASMJIT_ASSERT(i < _x86Decl.getArgCount()); - return static_cast(_argList[i]); + ASMJIT_ASSERT(i < _x86Decl.getNumArgs()); + return static_cast(_args[i]); } //! Get registers which have to be saved in prolog/epilog. - ASMJIT_INLINE uint32_t getSaveRestoreRegs(uint32_t c) { return _saveRestoreRegs.get(c); } + ASMJIT_INLINE uint32_t getSaveRestoreRegs(uint32_t rc) { return _saveRestoreRegs.get(rc); } //! Get stack size needed to align stack back to the nature alignment. ASMJIT_INLINE uint32_t getAlignStackSize() const { return _alignStackSize; } @@ -1367,7 +225,7 @@ struct X86FuncNode : public FuncNode { // ============================================================================ //! X86/X64 function-call node. -struct X86CallNode : public CallNode { +struct X86CallNode : public HLCall { ASMJIT_NO_COPY(X86CallNode) // -------------------------------------------------------------------------- @@ -1375,7 +233,7 @@ struct X86CallNode : public CallNode { // -------------------------------------------------------------------------- //! Create a new `X86CallNode` instance. - ASMJIT_INLINE X86CallNode(Compiler* compiler, const Operand& target) : CallNode(compiler, target) { + ASMJIT_INLINE X86CallNode(Compiler* compiler, const Operand& target) : HLCall(compiler, target) { _decl = &_x86Decl; _usedArgs.reset(); } @@ -1387,7 +245,7 @@ struct X86CallNode : public CallNode { // [Accessors] // -------------------------------------------------------------------------- - //! Get function prototype. + //! Get the function prototype. ASMJIT_INLINE X86FuncDecl* getDecl() const { return const_cast(&_x86Decl); } @@ -1397,7 +255,9 @@ struct X86CallNode : public CallNode { // -------------------------------------------------------------------------- //! Set function prototype. - ASMJIT_API Error setPrototype(uint32_t conv, const FuncPrototype& p); + ASMJIT_INLINE Error setPrototype(const FuncPrototype& p) { + return _x86Decl.setPrototype(p); + } // -------------------------------------------------------------------------- // [Arg / Ret] @@ -1434,19 +294,6 @@ struct X86CallNode : public CallNode { X86RegMask _usedArgs; }; -// ============================================================================ -// [asmjit::X86VarId / VarMapping] -// ============================================================================ - -#if !defined(ASMJIT_DOCGEN) -ASMJIT_TYPE_ID(X86MmReg, kX86VarTypeMm); -ASMJIT_TYPE_ID(X86MmVar, kX86VarTypeMm); -ASMJIT_TYPE_ID(X86XmmReg, kX86VarTypeXmm); -ASMJIT_TYPE_ID(X86XmmVar, kX86VarTypeXmm); -ASMJIT_TYPE_ID(X86YmmReg, kX86VarTypeYmm); -ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm); -#endif // !ASMJIT_DOCGEN - // ============================================================================ // [asmjit::X86Compiler] // ============================================================================ @@ -1454,370 +301,274 @@ ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm); //! X86/X64 compiler. //! //! This class is used to store instruction stream and allows to modify it on -//! the fly. It uses different concept than `Assembler` class and in fact +//! the fly. It uses a different concept than `Assembler` class and in fact //! `Assembler` is only used as a backend. Compiler never emits machine code -//! and each instruction you use is stored to instruction array instead. This -//! allows to modify instruction stream later and for example to reorder -//! instructions to make better performance. +//! directly, it stores instructions in a code-stream instead. This allows to +//! modify the code-stream later or and to apply various transformations to it. //! -//! `X86Compiler` moves code generation to a higher level. Higher level -//! constructs allows to write more abstract and extensible code that is not -//! possible with pure `X86Assembler`. +//! `X86Compiler` moves the code generation to a higher level. Higher level +//! constructs allow to write more abstract and extensible code that is not +//! possible with pure `X86Assembler` solution. //! //! The Story //! --------- //! -//! Before telling you how Compiler works I'd like to write a story. I'd like -//! to cover reasons why this class was created and why I'm recommending to use -//! it. When I released the first version of AsmJit (0.1) it was a toy. The -//! first function I wrote was function which is still available as testjit and -//! which simply returns 1024. The reason why function works for both 32-bit/ -//! 64-bit mode and for Windows/Unix specific calling conventions is luck, no -//! arguments usage and no registers usage except returning value in EAX/RAX. +//! The compiler was created as a solution to bring higher level concepts into +//! a very low-level code generation. It started as an experiment to unify +//! the code generator for X86 and X86 architectures. These architectures are +//! built on the same ground, but use some concepts that are radically different +//! between X86 and X64. Basically the X64 architecture is a good evolution of +//! X86, because it offers much more registers and it added support for relative +//! addressing. Both architectures also use different ABI, which means that +//! function calling conventions are incompatible between each other (not just +//! between architectures, but also between OSes). //! -//! Then I started a project called BlitJit which was targetted to generating -//! JIT code for computer graphics. After writing some lines I decided that I -//! can't join pieces of code together without abstraction, should be -//! pixels source pointer in ESI/RSI or EDI/RDI or it's completelly -//! irrellevant? What about destination pointer and SSE2 register for reading -//! input pixels? The simple answer might be "just pick some one and use it". +//! This is a pain when it comes to low-level code generation. When AsmJit was +//! first published the main author's plan was to use it for a 2D pipeline +//! generation. In this task the main use of AsmJit was to combine several code +//! sections altogether without worrying about "which register should contain +//! what". This meant that a pure `X86Assembler` probably won't do the job +//! itself. Instead of hacking the `X86Assembler` to do more the `X86Compiler` +//! concept started to provide a layer that will consume instructions the same +//! way as `X86Assembler`, transform them, and serialize them to `X86Assembler`. //! -//! Another reason for abstraction is function calling-conventions. It's really -//! not easy to write assembler code for 32-bit and 64-bit platform supporting -//! three calling conventions (32-bit is similar between Windows and Unix, but -//! 64-bit calling conventions are different). +//! The compiler concept evolved rapidly after the initial version and was +//! rewritten several times before it stabilized into the current form. It is +//! at the moment still evolving and it used to be the biggest source of bugs +//! in AsmJit in the past (doing non-trivial transformations has it's down-sides). //! -//! At this time I realized that I can't write code which uses named registers, -//! I need to abstract it. In most cases you don't need specific register, you -//! need to emit instruction that does something with 'virtual' register(s), -//! memory, immediate or label. -//! -//! The first version of AsmJit with Compiler was 0.5 (or 0.6?, can't remember). -//! There was support for 32-bit and 64-bit mode, function calling conventions, -//! but when emitting instructions the developer needed to decide which -//! registers are changed, which are only read or completely overwritten. This -//! model helped a lot when generating code, especially when joining more -//! code-sections together, but there was also small possibility for mistakes. -//! Simply the first version of Compiler was great improvement over low-level -//! Assembler class, but the API design wasn't perfect. -//! -//! The second version of Compiler, completelly rewritten and based on -//! different goals, is part of AsmJit starting at version 1.0. This version -//! was designed after the first one and it contains serious improvements over -//! the old one. The first improvement is that you just use instructions with -//! virtual registers - called variables. When using compiler there is no way -//! to use native registers, there are variables instead. AsmJit is smarter -//! than before and it knows which register is needed only for read (r), -//! read/write (w) or overwrite (x). Supported are also instructions which -//! are using some registers in implicit way (these registers are not part of -//! instruction definition in string form). For example to use CPUID instruction -//! you must give it four variables which will be automatically allocated in -//! input/output registers (EAX, EBX, ECX, EDX). -//! -//! Another improvement is algorithm used by a register allocator. In first -//! version the registers were allocated when creating instruction stream. In -//! new version registers are allocated after calling `Compiler::make()`, -//! thus register allocator has information about scope of all variables and -//! statistics of their usage. The algorithm to allocate registers is very -//! simple and it's always called as a 'linear scan register allocator'. When -//! you get out of registers the all possible variables are scored and the worst -//! is spilled. Of course algorithm ignores the variables used for current -//! instruction. -//! -//! In addition, because registers are allocated after the code stream is -//! generated, the state switches between jumps are handled by Compiler too. -//! You don't need to worry about jumps, compiler always do this dirty work -//! for you. -//! -//! The nearly last thing I'd like to present is calling other functions from -//! the generated code. AsmJit uses a `FuncPrototype` class to hold function -//! parameters, their position in stack (or register index) and return value. -//! This class is used internally, but it can be used to create your own -//! function calling-convention. All standard function calling conventions are -//! implemented. -//! -//! Please enjoy the new version of Compiler, it was created for writing a -//! low-level code using high-level API, leaving developer to concentrate on -//! real problems and not to solving a register puzzle. +//! The compiler at the moment uses linear-scan register allocation and can +//! look ahead to see which registers it should use. There are many limitations +//! at the moment, but if the resulting code doesn't use so much registers at +//! a same time it's pretty decent. However, please don't expect miracles, it +//! cannot compete with register allocators used in todays C++ compilers. //! //! Code Generation //! --------------- //! -//! First that is needed to know about compiler is that compiler never emits -//! machine code. It's used as a middleware between @c asmjit::Assembler and -//! your code. There is also convenience method @c make() that allows to -//! generate machine code directly without creating @c asmjit::Assembler -//! instance. -//! -//! Comparison of generating machine code through @c Assembler and directly -//! by @c Compiler: -//! -//! ~~~ -//! // Assembler instance is low level code generation class that emits -//! // machine code. -//! Assembler a; -//! -//! // Compiler instance is high level code generation class that stores all -//! // instructions in internal representation. -//! Compiler c; -//! -//! // ... put your code here ... -//! -//! // Final step - generate code. asmjit::Compiler::serialize() will send all -//! // instructions into Assembler and this ensures generating real machine code. -//! c.serialize(&a); -//! -//! // Your function -//! void* fn = a.make(); -//! ~~~ -//! -//! Example how to generate machine code using only @c Compiler (preferred): -//! -//! ~~~ -//! // Compiler instance is enough. -//! Compiler c; -//! -//! // ... put your code here ... -//! -//! // Your function -//! void* fn = c.make(); -//! ~~~ -//! -//! You can see that there is @c asmjit::Compiler::serialize() function that -//! emits instructions into @c asmjit::Assembler(). This layered architecture -//! means that each class is used for something different and there is no code -//! duplication. For convenience there is also @c asmjit::Compiler::make() -//! method that can create your function using @c asmjit::Assembler, but -//! internally (this is preferred bahavior when using @c asmjit::Compiler). -//! -//! The @c make() method allocates memory using `Runtime` instance passed -//! into the @c Compiler constructor. If code generator is used to create JIT -//! function then virtual memory allocated by `VMemMgr` is used. +//! The `X86Compiler` uses `X86Assembler` as a backend. It integrates with it, +//! which means that labels created by Assembler can be used by Compiler and +//! vice-versa. The following code shows the preferred and simplest way of +//! creating a compiler: //! //! ~~~ //! JitRuntime runtime; -//! Compiler c(&runtime); +//! X86Assembler a(&runtime); +//! X86Compiler c(&a); //! -//! // ... put your code using Compiler instance ... +//! // ... use the compiler `c` ... //! -//! // Your function -//! void* fn = c.make(); -//! -//! runtime.release(fn); +//! c.finalize(); //! ~~~ //! +//! After the `finalize()` is called the compiler is detached from the assembler +//! and reset (cannot be used after finalization). It can be reattached again by +//! using `c.attach(&a)`, but the Compiler won't remember anything from the +//! previous code generation execution - it will be like creating a new instance +//! of `X86Compiler`. +//! //! Functions //! --------- //! -//! To build functions with @c Compiler, see @c asmjit::Compiler::addFunc() -//! method. +//! See \ref asmjit::Compiler::addFunc(). //! //! Variables //! --------- //! -//! Compiler is able to manage variables and function arguments. Function -//! arguments are moved to variables by using @c setArg() method, where the -//! first parameter is argument index and second parameter is the variable -//! instance. To declare variable use @c newGpVar(), @c newMmVar() and @c -//! newXmmVar() methods. The @c newXXX() methods accept also parameter -//! describing the variable type. For example the @c newGpVar() method always -//! creates variable which size matches the target architecture size (for -//! 32-bit target the 32-bit variable is created, for 64-bit target the -//! variable size is 64-bit). To override this behavior the variable type -//! must be specified. +//! Compiler has a built-in support for variables and assigning function +//! arguments. Variables are created by using `newXXX()` methods. If the +//! methods ends with `Var`, like `newXmmVar()` it accepts a variable type +//! as a first parameter. Variable type defines the layout and size of the +//! variable. It's the most important for general purpose registers, where +//! the variable type affects which instructions are generated when used as +//! operands. For example "mov eax, edx" is different than "mov rax, rdx", +//! but it's still the same "mov" instruction. Since the variable types are +//! verbose an alternative form to create variables easier was introduced. +//! +//! Instead of using `newGpVar(kVarTypeIntX, ...)` alternative forms like +//! `newIntX(...)` or `newUIntX(...)` can be used instead. Variables can +//! have a name so the code that creates a variable usually looks like +//! `newInt32("a")` or `newIntPtr("pInputBuffer")`, etc... +//! +//! Other register types like MMX or XMM have also alternative forms, so +//! for example `newMm("mmx")`, `newXmm("xmm")`, `newXmmPd("doubles")`, and +//! other forms can be used to create SIMD variables. +//! +//! Function arguments are associated with variables by using `setArg()`, where +//! the first parameter is argument index and the second parameter is the +//! variable instance. Function arguments can be a little bit tricky, because +//! asmjit allows to also define 64-bit arguments on a 32-bit architecture, +//! where the argument itself is split into two - lower-32 bit and higher 32-bit. +//! This applies also to a return value. +//! +//! The following snippet shows how to create a function and associate function +//! arguments with variables: //! //! ~~~ -//! // Compiler and function declaration - void f(int*); -//! Compiler c; -//! X86GpVar a0(c, kVarTypeIntPtr); +//! JitRuntime runtime; +//! X86Assembler a(&runtime); +//! X86Compiler c(&a); //! -//! c.addFunc(kFuncConvHost, FuncBuilder1()); -//! c.setArg(0, a0); +//! // Function prototype is "int function(int*, int*)" by using the host +//! // calling convention, which should be __cdecl in our case (if not +//! // configured to something else). +//! c.addFunc(FuncBuilder2(kCallConvHost)); //! -//! // Create your variables. -//! X86GpVar x0(c, kVarTypeInt32); -//! X86GpVar x1(c, kVarTypeInt32); +//! // Associate function arguments. +//! X86GpVar pX = c.newIntPtr("pX"); +//! X86GpVar pY = c.newIntPtr("pY"); //! -//! // Init your variables. -//! c.mov(x0, 1); -//! c.mov(x1, 2); +//! c.setArg(0, pX); +//! c.setArg(1, pY); //! -//! // ... your code ... -//! c.add(x0, x1); -//! // ... your code ... +//! // Do something useful :) +//! X86GpVar x = c.newInt32("x"); +//! X86GpVar y = c.newInt32("y"); //! -//! // Store result to a given pointer in first argument -//! c.mov(dword_ptr(a0), x0); +//! c.mov(x, dword_ptr(pX)); +//! c.add(y, dword_ptr(pY)); //! -//! // End of function body. +//! // Return `x`. +//! c.ret(x); +//! +//! // End of the function body. //! c.endFunc(); //! -//! // Make the function. +//! // Finalize the compiler. +//! c.finalize(); +//! +//! // Use the `X86Assembler` to assemble and relocate the function. It returns +//! // a pointer to the first byte of the code generated, which is the function +//! // entry point in our case. //! typedef void (*MyFunc)(int*); -//! MyFunc func = asmjit_cast(c.make()); +//! MyFunc func = asmjit_cast(a.make()); //! ~~~ //! -//! This code snipped needs to be explained. You can see that there are more -//! variable types that can be used by `Compiler`. Most useful variables can -//! be allocated using general purpose registers (`X86GpVar`), MMX registers -//! (`X86MmVar`) or SSE/SSE2 registers (`X86XmmVar`). +//! The snippet uses methods to create variables, to associate them with +//! function arguments, and to use them to return from the generated function. //! -//! X86/X64 variable types: +//! When a variable is created, the initial state is `kVarStateNone`, when +//! it's allocated to the register or spilled to a memory it changes its +//! state to `kVarStateReg` or `kVarStateMem`, respectively. It's usual +//! during the variable that its state is changed multiple times. To generate +//! a better code, you can control explicitely the allocation and spilling: //! +//! - `alloc()` - Explicit method to alloc variable into register. It can be +//! used to force allocation a variable before a loop for example. +//! +//! - `spill()` - Explicit method to spill variable. If variable is in +//! register and you call this method, it's moved to its home memory +//! location. If the variable is not in register no operation is performed. +//! +//! - `unuse()` - Unuse variable (you can use this to end the variable scope +//! or sub-scope). +//! +//! List of X86/X64 variable types: //! - `kVarTypeInt8` - Signed 8-bit integer, mapped to Gpd register (eax, ebx, ...). //! - `kVarTypeUInt8` - Unsigned 8-bit integer, mapped to Gpd register (eax, ebx, ...). -//! //! - `kVarTypeInt16` - Signed 16-bit integer, mapped to Gpd register (eax, ebx, ...). //! - `kVarTypeUInt16` - Unsigned 16-bit integer, mapped to Gpd register (eax, ebx, ...). -//! //! - `kVarTypeInt32` - Signed 32-bit integer, mapped to Gpd register (eax, ebx, ...). //! - `kVarTypeUInt32` - Unsigned 32-bit integer, mapped to Gpd register (eax, ebx, ...). -//! //! - `kVarTypeInt64` - Signed 64-bit integer, mapped to Gpq register (rax, rbx, ...). //! - `kVarTypeUInt64` - Unsigned 64-bit integer, mapped to Gpq register (rax, rbx, ...). -//! //! - `kVarTypeIntPtr` - intptr_t, mapped to Gpd/Gpq register; depends on target, not host! //! - `kVarTypeUIntPtr` - uintptr_t, mapped to Gpd/Gpq register; depends on target, not host! +//! - `kX86VarTypeMm` - 64-bit Mm register (mm0, mm1, ...). +//! - `kX86VarTypeXmm` - 128-bit SSE register. +//! - `kX86VarTypeXmmSs` - 128-bit SSE register that contains a scalar 32-bit SP-FP value. +//! - `kX86VarTypeXmmSd` - 128-bit SSE register that contains a scalar 64-bit DP-FP value. +//! - `kX86VarTypeXmmPs` - 128-bit SSE register that contains 4 packed 32-bit SP-FP values. +//! - `kX86VarTypeXmmPd` - 128-bit SSE register that contains 2 packed 64-bit DP-FP values. +//! - `kX86VarTypeYmm` - 256-bit AVX register. +//! - `kX86VarTypeYmmPs` - 256-bit AVX register that contains 4 packed 32-bit SP-FP values. +//! - `kX86VarTypeYmmPd` - 256-bit AVX register that contains 2 packed 64-bit DP-FP values. //! -//! - `kVarTypeFp32` - 32-bit floating point register (fp0, fp1, ...). -//! - `kVarTypeFp64` - 64-bit floating point register (fp0, fp1, ...). -//! -//! - `kX86VarTypeMm` - 64-bit Mm register (mm0, mm1, ...). -//! -//! - `kX86VarTypeXmm` - 128-bit SSE register. -//! - `kX86VarTypeXmmSs` - 128-bit SSE register that contains a scalar 32-bit SP-FP value. -//! - `kX86VarTypeXmmSd` - 128-bit SSE register that contains a scalar 64-bit DP-FP value. -//! - `kX86VarTypeXmmPs` - 128-bit SSE register that contains 4 packed 32-bit SP-FP values. -//! - `kX86VarTypeXmmPd` - 128-bit SSE register that contains 2 packed 64-bit DP-FP values. -//! -//! - `kX86VarTypeYmm` - 256-bit AVX register. -//! - `kX86VarTypeYmmPs` - 256-bit AVX register that contains 4 packed 32-bit SP-FP values. -//! - `kX86VarTypeYmmPd` - 256-bit AVX register that contains 2 packed 64-bit DP-FP values. -//! -//! Variable states: -//! -//! - `kVarStateNone - State that is assigned to newly created variables or -//! to not used variables (dereferenced to zero). +//! List of X86/X64 variable states: +//! - `kVarStateNone - State that is assigned to newly created variables or to +//! not used variables (dereferenced to zero). //! - `kVarStateReg - State that means that variable is currently allocated in //! register. //! - `kVarStateMem - State that means that variable is currently only in //! memory location. //! -//! When you create new variable, initial state is always `kVarStateNone`, -//! allocating it to register or spilling to memory changes this state to -//! `kVarStateReg` or `kVarStateMem`, respectively. During variable lifetime -//! it's usual that its state is changed multiple times. To generate better -//! code, you can control allocating and spilling by using up to four types -//! of methods that allows it (see next list). -//! -//! Explicit variable allocating / spilling methods: -//! -//! - `Compiler::alloc()` - Explicit method to alloc variable into register. -//! It can be used to force allocation a variable before a loop for example. -//! -//! - `Compiler::spill()` - Explicit method to spill variable. If variable -//! is in register and you call this method, it's moved to its home memory -//! location. If variable is not in register no operation is performed. -//! -//! - `Compiler::unuse()` - Unuse variable (you can use this to end the -//! variable scope or sub-scope). -//! -//! Please see AsmJit tutorials (testcompiler.cpp and testvariables.cpp) for -//! more complete examples. -//! //! Memory Management //! ----------------- //! //! Compiler Memory management follows these rules: //! -//! - Everything created by `Compiler` is always freed by `Compiler`. -//! - To get decent performance, compiler always uses larger memory buffer -//! for objects to allocate and when compiler instance is destroyed, this -//! buffer is freed. Destructors of active objects are called when -//! destroying compiler instance. Destructors of abadonded compiler -//! objects are called immediately after abadonding them. +//! - Everything created by `X86Compiler` is always freed by `X86Compiler`. +//! - To get a decent performance, compiler always uses large memory buffers +//! to allocate objects. When the compiler is destroyed, it invalidates all +//! objects that it created. //! - This type of memory management is called 'zone memory management'. //! -//! This means that you can't use any `Compiler` object after destructing it, -//! it also means that each object like `Label`, `Var` and others are created -//! and managed by @c Compiler itself. These objects contain ID which is -//! used internally by Compiler to store additional information about these -//! objects. +//! In other words, anything that returns a pointer to something cannot be +//! used after the compiler was destroyed. However, since compiler integrates +//! with assembler, labels created by Compiler can be used by Assembler or +//! another Compiler attached to it. //! //! Control-Flow and State Management //! --------------------------------- //! -//! The `Compiler` automatically manages state of the variables when using -//! control flow instructions like jumps, conditional jumps and calls. There -//! is minimal heuristics for choosing the method how state is saved or restored. +//! The `X86Compiler` automatically manages state of all variables when using +//! control flow instructions like jumps, conditional jumps and function calls. //! -//! Generally the state can be changed only when using jump or conditional jump -//! instruction. When using non-conditional jump then state change is embedded -//! into the instruction stream before the jump. When using conditional jump -//! the `Compiler` decides whether to restore state before the jump or whether -//! to use another block where state is restored. The last case is that no-code -//! have to be emitted and there is no state change (this is of course ideal). -//! -//! Choosing whether to embed 'restore-state' section before conditional jump -//! is quite simple. If jump is likely to be 'taken' then code is embedded, if -//! jump is unlikely to be taken then the small code section for state-switch -//! will be generated instead. -//! -//! Next example is the situation where the extended code block is used to -//! do state-change: +//! In general the internal state can be changed only when using jump or +//! conditional jump. When using non-conditional jump the state change is +//! embedded before the jump itself, so there is basically zero overhead. +//! However, conditional jumps are more complicated and the compiler can +//! generate in some cases a block at the end of the function that changes +//! the state of one branch. Usually the "taken" branch is embedded directly +//! before the jump, and "not-taken" branch has the separate code block. +//! +//! The next example shows to the extra code block generated for a state change: //! //! ~~~ -//! Compiler c; +//! JitRuntime runtime; +//! X86Assembler a(&runtime); +//! X86Compiler c(&a); //! -//! c.addFunc(kFuncConvHost, FuncBuilder0()); +//! c.addFunc(FuncBuilder0(kCallConvHost)); //! -//! // Labels. +//! X86GpVar x = c.newInt32("x"); +//! X86GpVar y = c.newInt32("y"); //! Label L0(c); //! -//! // Variables. -//! X86GpVar var0(c, kVarTypeInt32); -//! X86GpVar var1(c, kVarTypeInt32); +//! // After these two lines, `x` and `y` will be always stored in registers: +//! // x - register. +//! // y - register. +//! c.xor_(x, x); +//! c.xor_(y, y); +//! c.cmp(x, y); //! -//! // Cleanup. After these two lines, the var0 and var1 will be always stored -//! // in registers. Our example is very small, but in larger code the var0 can -//! // be spilled by xor(var1, var1). -//! c.xor_(var0, var0); -//! c.xor_(var1, var1); -//! c.cmp(var0, var1); -//! // State: -//! // var0 - register. -//! // var1 - register. +//! // Manually spill `x` and `y`: +//! // x - memory. +//! // y - memory. +//! c.spill(x); +//! c.spill(y); //! -//! // We manually spill these variables. -//! c.spill(var0); -//! c.spill(var1); -//! // State: -//! // var0 - memory. -//! // var1 - memory. -//! -//! // Conditional jump to L0. It will be always taken, but compiler thinks that -//! // it is unlikely taken so it will embed state change code somewhere. +//! // Conditional jump to L0. It will be always taken, but the compiler thinks +//! // that it is unlikely to be taken so it will embed the state-change code +//! // somewhere else. //! c.je(L0); //! -//! // Do something. The variables var0 and var1 will be allocated again. -//! c.add(var0, 1); -//! c.add(var1, 2); -//! // State: -//! // var0 - register. -//! // var1 - register. +//! // Do something. The variables `x` and `y` will be allocated again. +//! // `x` - register. +//! // `y` - register. +//! c.add(x, 1); +//! c.add(y, 2); //! -//! // Bind label here, the state is not changed. +//! // Bind a label here, the state is not changed. +//! // `x` - register. +//! // `y` - register. //! c.bind(L0); -//! // State: -//! // var0 - register. -//! // var1 - register. //! -//! // We need to use var0 and var1, because if compiler detects that variables -//! // are out of scope then it optimizes the state-change. -//! c.sub(var0, var1); -//! // State: -//! // var0 - register. -//! // var1 - register. +//! // Use `x` and `y`, because the compiler knows the life-time and can +//! // eliminate the state change of dead variables. +//! // `x` - register. +//! // `y` - register. +//! c.sub(x, y); //! //! c.endFunc(); //! ~~~ @@ -1825,78 +576,75 @@ ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm); //! The output: //! //! ~~~ -//! xor eax, eax ; xor var_0, var_0 -//! xor ecx, ecx ; xor var_1, var_1 -//! cmp eax, ecx ; cmp var_0, var_1 -//! mov [esp - 24], eax ; spill var_0 -//! mov [esp - 28], ecx ; spill var_1 +//! xor eax, eax ; xor x, x +//! xor ecx, ecx ; xor y, y +//! cmp eax, ecx ; cmp x, y +//! mov [esp - 24], eax ; spill x +//! mov [esp - 28], ecx ; spill x //! je L0_Switch -//! mov eax, [esp - 24] ; alloc var_0 -//! add eax, 1 ; add var_0, 1 -//! mov ecx, [esp - 28] ; alloc var_1 -//! add ecx, 2 ; add var_1, 2 +//! mov eax, [esp - 24] ; alloc x +//! add eax, 1 ; add x, 1 +//! mov ecx, [esp - 28] ; alloc y +//! add ecx, 2 ; add y, 2 //! L0: -//! sub eax, ecx ; sub var_0, var_1 +//! sub eax, ecx ; sub x, y //! ret //! //! ; state-switch begin //! L0_Switch0: -//! mov eax, [esp - 24] ; alloc var_0 -//! mov ecx, [esp - 28] ; alloc var_1 +//! mov eax, [esp - 24] ; alloc x +//! mov ecx, [esp - 28] ; alloc y //! jmp short L0 //! ; state-switch end //! ~~~ //! -//! You can see that the state-switch section was generated (see L0_Switch0). -//! The compiler is unable to restore state immediately when emitting the -//! forward jump (the code is generated from first to last instruction and -//! the target state is simply not known at this time). +//! As can be seen, the state-switch section was generated (L0_Switch0). The +//! compiler was unable to restore the state immediately when emitting the +//! forward jump (the code is generated from the first to last instruction +//! and the target state is simply not known at this time). //! -//! To tell `Compiler` that you want to embed state-switch code before jump -//! it's needed to create backward jump (where also processor expects that it -//! will be taken). To demonstrate the possibility to embed state-switch before -//! jump we use slightly modified code: +//! To tell the compiler to embed the state-switch code before the jump it's +//! needed to create a backward jump (where also processor expects that it +//! will be taken). A slightly modified code is used to demonstrate the +//! possibility to embed the state-switch before the jump: //! //! ~~~ -//! Compiler c; +//! JitRuntime runtime; +//! X86Assembler a(&runtime); +//! Compiler c(&a); //! -//! c.addFunc(kFuncConvHost, FuncBuilder0()); +//! c.addFunc(FuncBuilder0(kCallConvHost)); //! -//! // Labels. +//! X86GpVar x = c.newInt32("x"); +//! X86GpVar y = c.newInt32("y"); //! Label L0(c); //! -//! // Variables. -//! X86GpVar var0(c, kVarTypeInt32); -//! X86GpVar var1(c, kVarTypeInt32); +//! // After these two lines, `x` and `y` will be always stored in registers. +//! // `x` - register. +//! // `y` - register. +//! c.xor_(x, x); +//! c.xor_(y, y); //! -//! // Cleanup. After these two lines, the var0 and var1 will be always stored -//! // in registers. Our example is very small, but in larger code the var0 can -//! // be spilled by xor(var1, var1). -//! c.xor_(var0, var0); -//! c.xor_(var1, var1); -//! // State: -//! // var0 - register. -//! // var1 - register. +//! // Manually spill `x` and `y`. +//! // `x` - memory. +//! // `y` - memory. +//! c.spill(x); +//! c.spill(y); //! -//! // We manually spill these variables. -//! c.spill(var0); -//! c.spill(var1); -//! // State: -//! // var0 - memory. -//! // var1 - memory. -//! -//! // Bind our label here. +//! // Bind a label here, the state is not changed. +//! // `x` - memory. +//! // `y` - memory. //! c.bind(L0); //! //! // Do something, the variables will be allocated again. -//! c.add(var0, 1); -//! c.add(var1, 2); +//! c.add(x, 1); +//! c.add(y, 2); //! // State: -//! // var0 - register. -//! // var1 - register. +//! // `x` - register. +//! // `y` - register. //! -//! // Backward conditional jump to L0. The default behavior is that it is taken -//! // so state-change code will be embedded here. +//! // Backward conditional jump to L0. The default behavior is that it +//! // will be taken so the state-change code will be embedded here. //! c.je(L0); //! //! c.endFunc(); @@ -1905,129 +653,93 @@ ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm); //! The output: //! //! ~~~ -//! xor ecx, ecx ; xor var_0, var_0 -//! xor edx, edx ; xor var_1, var_1 -//! mov [esp - 24], ecx ; spill var_0 -//! mov [esp - 28], edx ; spill var_1 +//! xor ecx, ecx ; xor x, x +//! xor edx, edx ; xor y, y +//! mov [esp - 24], ecx ; spill x +//! mov [esp - 28], edx ; spill y //! L2: -//! mov ecx, [esp - 24] ; alloc var_0 -//! add ecx, 1 ; add var_0, 1 -//! mov edx, [esp - 28] ; alloc var_1 -//! add edx, 2 ; add var_1, 2 +//! mov ecx, [esp - 24] ; alloc x +//! add ecx, 1 ; add x, 1 +//! mov edx, [esp - 28] ; alloc y +//! add edx, 2 ; add y, 2 //! //! ; state-switch begin -//! mov [esp - 24], ecx ; spill var_0 -//! mov [esp - 28], edx ; spill var_1 +//! mov [esp - 24], ecx ; spill x +//! mov [esp - 28], edx ; spill y //! ; state-switch end //! //! je short L2 //! ret //! ~~~ //! -//! Please notice where the state-switch section is located. The `Compiler` -//! decided that jump is likely to be taken so the state change is embedded -//! before the conditional jump. To change this behavior into the previous -//! case it's needed to add an option (kInstOptionTaken/kInstOptionNotTaken). +//! Please note where the state-switch sections are located in both examples. +//! To inform the compiler which branch is likely to be taken use the following +//! options: +//! - `kInstOptionTaken` - The conditional jump is likely to be taken. +//! - `kInstOptionNotTaken` - The conditional jump is unlikely to be taken. //! -//! Replacing the c.je(L0) by c.taken(); c.je(L0) -//! will generate code like this: +//! Both options can be used by simply using `taken()` and/or `notTaken()`. The +//! example above could be changed to `c.taken().je(L0)`, which would generate +//! the following output: //! //! ~~~ -//! xor ecx, ecx ; xor var_0, var_0 -//! xor edx, edx ; xor var_1, var_1 -//! mov [esp - 24], ecx ; spill var_0 -//! mov [esp - 28], edx ; spill var_1 +//! xor ecx, ecx ; xor x, x +//! xor edx, edx ; xor y, y +//! mov [esp - 24], ecx ; spill x +//! mov [esp - 28], edx ; spill y //! L0: -//! mov ecx, [esp - 24] ; alloc var_0 -//! add ecx, 1 ; add var_0, a -//! mov edx, [esp - 28] ; alloc var_1 -//! add edx, 2 ; add var_1, 2 +//! mov ecx, [esp - 24] ; alloc x +//! add ecx, 1 ; add x, 1 +//! mov edx, [esp - 28] ; alloc y +//! add edx, 2 ; add y, 2 //! je L0_Switch, 2 //! ret //! //! ; state-switch begin //! L0_Switch: -//! mov [esp - 24], ecx ; spill var_0 -//! mov [esp - 28], edx ; spill var_1 +//! mov [esp - 24], ecx ; spill x +//! mov [esp - 28], edx ; spill y //! jmp short L0 //! ; state-switch end //! ~~~ //! -//! This section provided information about how state-change works. The -//! behavior is deterministic and it can be overridden. +//! This section provided information of how the state-change works. The +//! behavior is deterministic and can be overridden manually if needed. //! //! Advanced Code Generation //! ------------------------ //! -//! This section describes advanced method of code generation available to -//! `Compiler` (but also to `Assembler`). When emitting code to instruction -//! stream the methods like `mov()`, `add()`, `sub()` can be called directly -//! (advantage is static-type control performed also by C++ compiler) or -//! indirectly using `emit()` method. The `emit()` method needs only instruction -//! code and operands. +//! This section describes an advanced method of code generation available in +//! assembler and compiler. Every instruction supported by AsmJit has its ID, +//! which can be used with method `emit()` instead of using compiler's intrinsics. +//! For example `mov(x, y)` is an equivalent to `emit(kX86InstIdMov, x, y)`. +//! The later is, however, not type-safe and C++ compiler won't help you to +//! detect some bugs at compile time. On the other hand the later allows to +//! generate some code programatically without using if/else constructs. //! -//! Example of code generating by standard type-safe API: +//! There are many use-cases where the unsafe API can be used, for example: //! //! ~~~ -//! Compiler c; +//! uint32_t translateOp(const char* op) { +//! if (strcmp(op, "add")) return kX86InstIdAddsd; +//! if (strcmp(op, "sub")) return kX86InstIdSubsd; +//! if (strcmp(op, "mul")) return kX86InstIdMulsd; +//! if (strcmp(op, "div")) return kX86InstIdDivsd; //! -//! X86GpVar var0(c, kVarTypeInt32); -//! X86GpVar var1(c, kVarTypeInt32); +//! return kInstIdNone; +//! } //! -//! ... -//! -//! c.mov(var0, 0); -//! c.add(var0, var1); -//! c.sub(var0, var1); -//! ~~~ -//! -//! The code above can be rewritten as: -//! -//! ~~~ -//! Compiler c; -//! -//! X86GpVar var0(c, kVarTypeInt32); -//! X86GpVar var1(c, kVarTypeInt32); -//! -//! ... -//! -//! c.emit(kX86InstIdMov, var0, 0); -//! c.emit(kX86InstIdAdd, var0, var1); -//! c.emit(kX86InstIdSub, var0, var1); -//! ~~~ -//! -//! The advantage of first snippet is very friendly API and type-safe control -//! that is controlled by the C++ compiler. The advantage of second snippet is -//! availability to replace or generate instruction code in different places. -//! See the next example how the `emit()` method can be used to generate abstract -//! code. -//! -//! Use case: -//! -//! ~~~ -//! bool emitArithmetic(Compiler& c, X86XmmVar& var0, X86XmmVar& var1, const char* op) { -//! uint32_t code = kInstIdNone; -//! -//! if (strcmp(op, "ADD") == 0) -//! code = kX86InstIdAddss; -//! else if (::strcmp(op, "SUBTRACT") == 0) -//! code = kX86InstIdSubss; -//! else if (::strcmp(op, "MULTIPLY") == 0) -//! code = kX86InstIdMulss; -//! else if (::strcmp(op, "DIVIDE") == 0) -//! code = kX86InstIdDivss; -//! else -//! // Invalid parameter? -//! return false; -//! -//! c.emit(code, var0, var1); +//! void emitArith(X86Compiler& c, const char* op, const X86XmmVar& a, const X86XmmVar& b) { +//! uint32_t instId = translateOp(op); +//! if (instId != kInstIdNone) +//! c.emit(instId, a, b); //! } //! ~~~ //! -//! Other use cases are waiting for you! Be sure that instruction you are -//! emitting is correct and encodable, because if not, Assembler will set -//! status code to `kErrorUnknownInst`. -struct ASMJIT_VCLASS X86Compiler : public Compiler { +//! Other use cases are waiting for you! Be sure that instruction that are +//! being emitted are correct and encodable, otherwise the Assembler will +//! fail and set the status code to `kErrorUnknownInst`. +struct ASMJIT_VIRTAPI X86Compiler : public Compiler { ASMJIT_NO_COPY(X86Compiler) // -------------------------------------------------------------------------- @@ -2035,214 +747,211 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { // -------------------------------------------------------------------------- //! Create a `X86Compiler` instance. - ASMJIT_API X86Compiler(Runtime* runtime, uint32_t arch -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) - = kArchHost -#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - ); - + ASMJIT_API X86Compiler(X86Assembler* assembler = NULL); //! Destroy the `X86Compiler` instance. ASMJIT_API ~X86Compiler(); + // -------------------------------------------------------------------------- + // [Attach / Reset] + // -------------------------------------------------------------------------- + + //! \override + ASMJIT_API virtual Error attach(Assembler* assembler); + //! \override + ASMJIT_API virtual void reset(bool releaseMemory); + + // ------------------------------------------------------------------------- + // [Finalize] + // ------------------------------------------------------------------------- + + ASMJIT_API virtual Error finalize(); + + // -------------------------------------------------------------------------- + // [Assembler] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86Assembler* getAssembler() const { + return static_cast(_assembler); + } + // -------------------------------------------------------------------------- // [Arch] // -------------------------------------------------------------------------- - //! \internal - //! - //! Set the architecture to `arch`. - ASMJIT_API Error setArch(uint32_t arch); - //! Get count of registers of the current architecture and mode. - ASMJIT_INLINE const X86RegCount& getRegCount() const { - return _regCount; - } + ASMJIT_INLINE const X86RegCount& getRegCount() const { return _regCount; } //! Get Gpd or Gpq register depending on the current architecture. - ASMJIT_INLINE X86GpReg gpz(uint32_t index) const { - return X86GpReg(zax, index); - } + ASMJIT_INLINE X86GpReg gpz(uint32_t index) const { return X86GpReg(zax, index); } //! Create an architecture dependent intptr_t memory operand. ASMJIT_INLINE X86Mem intptr_ptr(const X86GpReg& base, int32_t disp = 0) const { - return x86::ptr(base, disp, _regSize); + return x86::ptr(base, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr(const X86GpReg& base, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0) const { - return x86::ptr(base, index, shift, disp, _regSize); + return x86::ptr(base, index, shift, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, int32_t disp = 0) const { - return x86::ptr(label, disp, _regSize); + return x86::ptr(label, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp = 0) const { - return x86::ptr(label, index, shift, disp, _regSize); + return x86::ptr(label, index, shift, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr(const X86RipReg& rip, int32_t disp = 0) const { - return x86::ptr(rip, disp, _regSize); + return x86::ptr(rip, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, int32_t disp = 0) const { - return x86::ptr_abs(pAbs, disp, _regSize); + return x86::ptr_abs(pAbs, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, const X86GpReg& index, uint32_t shift, int32_t disp = 0) const { - return x86::ptr_abs(pAbs, index, shift, disp, _regSize); + return x86::ptr_abs(pAbs, index, shift, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr(const X86GpVar& base, int32_t disp = 0) { - return x86::ptr(base, disp, _regSize); + return x86::ptr(base, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr(const X86GpVar& base, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) { - return x86::ptr(base, index, shift, disp, _regSize); + return x86::ptr(base, index, shift, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp = 0) { - return x86::ptr(label, index, shift, disp, _regSize); + return x86::ptr(label, index, shift, disp, zax.getSize()); } //! \overload ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, const X86GpVar& index, uint32_t shift, int32_t disp = 0) { - return x86::ptr_abs(pAbs, index, shift, disp, _regSize); + return x86::ptr_abs(pAbs, index, shift, disp, zax.getSize()); } // -------------------------------------------------------------------------- // [Inst / Emit] // -------------------------------------------------------------------------- - //! Create a new `InstNode`. - ASMJIT_API InstNode* newInst(uint32_t code); + //! Create a new `HLInst`. + ASMJIT_API HLInst* newInst(uint32_t code); //! \overload - ASMJIT_API InstNode* newInst(uint32_t code, const Operand& o0); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0); //! \overload - ASMJIT_API InstNode* newInst(uint32_t code, const Operand& o0, const Operand& o1); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1); //! \overload - ASMJIT_API InstNode* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2); //! \overload - ASMJIT_API InstNode* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3); //! \overload - ASMJIT_API InstNode* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4); - //! Add a new `InstNode`. - ASMJIT_API InstNode* emit(uint32_t code); + //! Add a new `HLInst`. + ASMJIT_API HLInst* emit(uint32_t code); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, const Operand& o1); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, int o0); + ASMJIT_API HLInst* emit(uint32_t code, int o0); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, uint64_t o0); + ASMJIT_API HLInst* emit(uint32_t code, uint64_t o0); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, int o1); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, int o1); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, uint64_t o1); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, uint64_t o1); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, const Operand& o1, int o2); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, int o2); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3); //! \overload - ASMJIT_API InstNode* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3); // -------------------------------------------------------------------------- // [Func] // -------------------------------------------------------------------------- //! Create a new `X86FuncNode`. - ASMJIT_API X86FuncNode* newFunc(uint32_t conv, const FuncPrototype& p); + ASMJIT_API X86FuncNode* newFunc(const FuncPrototype& p); //! Add a new function. //! - //! \param conv Calling convention to use (see \ref FuncConv) - //! \param params Function arguments prototype. + //! \param p Function prototype. //! - //! This method is usually used as a first step when generating functions - //! by `Compiler`. First parameter `cconv` specifies function calling - //! convention to use. Second parameter `params` specifies function - //! arguments. To create function arguments are used templates - //! `FuncBuilder0<...>`, `FuncBuilder1<...>`, `FuncBuilder2<...>`, etc... + //! This method is usually used as a first step used to generate a dynamic + //! function. The prototype `p` contains a function calling convention, + //! return value, and parameters. There are some helper classes that simplify + //! function prototype building, see `FuncBuilder0<...>`, `FuncBuilder1<...>`, + //! `FuncBuilder2<...>`, etc... //! - //! Templates with FuncBuilder prefix are used to generate argument IDs - //! based on real C++ types. See next example how to generate function with - //! two 32-bit integer arguments. + //! Templates with `FuncBuilder` prefix are used to generate a function + //! prototype based on real C++ types. See the next example that shows how + //! to generate a function with two 32-bit integer arguments. //! //! ~~~ - //! // Building function using asmjit::Compiler example. + //! JitRuntime runtime; + //! X86Assembler a(&runtime); + //! X86Compiler c(&a); //! - //! // Compiler instance - //! Compiler c; + //! // Add a function - . + //! c.addFunc(FuncBuilder2(kCallConvHost)); //! - //! // Begin of function, also emits function prolog. - //! c.addFunc( - //! // Default calling convention (32-bit cdecl or 64-bit for host OS) - //! kFuncConvHost, - //! // Using function builder to generate arguments list - //! FuncBuilder2()); + //! // ... body ... //! - //! // End of function, also emits function epilog. + //! // End of the function. //! c.endFunc(); //! ~~~ //! - //! You can see that building functions is really easy. Previous code snipped - //! will generate code for function with two 32-bit integer arguments. You - //! can access arguments by `asmjit::Function::getArg()` method. Arguments - //! are indexed from 0 (like everything in C). + //! Building functions is really easy! The code snippet above can be used + //! to generate a function with two `int32_t` arguments. To assign a variable + //! to a function argument use `c.setArg(index, variable)`. //! //! ~~~ - //! // Accessing function arguments through asmjit::Function example. + //! JitRuntime runtime; + //! X86Assembler a(&runtime); + //! X86Compiler c(&a); //! - //! // Compiler instance - //! Compiler c; - //! X86GpVar a0(c, kVarTypeInt32); - //! X86GpVar a1(c, kVarTypeInt32); + //! X86GpVar arg0 = c.newInt32("arg0"); + //! X86GpVar arg1 = c.newInt32("arg1"); //! - //! // Begin of function (also emits function prolog) - //! c.addFunc( - //! // Default calling convention (32-bit cdecl or 64-bit for host OS) - //! kFuncConvHost, - //! // Using function builder to generate arguments list - //! FuncBuilder2()); + //! // Add a function - . + //! c.addFunc(FuncBuilder2(kCallConvHost)); //! - //! c.setArg(0, a0); - //! c.setArg(1, a1); + //! c.setArg(0, arg0); + //! c.setArg(1, arg1); //! - //! // Use them. - //! c.add(a0, a1); + //! // ... do something ... + //! c.add(arg0, arg1); //! - //! // End of function - emits function epilog and return instruction. + //! // End of the function. //! c.endFunc(); //! ~~~ //! //! Arguments are like variables. How to manipulate with variables is - //! documented in `Compiler`, variables section. + //! documented in a variables section of `X86Compiler` documentation. //! - //! \note To get current function use `getFunc()` method or save pointer to - //! `FuncNode` returned by `Compiler::addFunc<>` method. The recommended way - //! is saving the pointer and using it to specify function arguments and - //! return value. + //! \note To get the current function use `getFunc()` method. //! - //! \sa FuncBuilder0, FuncBuilder1, FuncBuilder2, ... - ASMJIT_API X86FuncNode* addFunc(uint32_t conv, const FuncPrototype& p); + //! \sa \ref FuncBuilder0, \ref FuncBuilder1, \ref FuncBuilder2. + ASMJIT_API X86FuncNode* addFunc(const FuncPrototype& p); - //! End of current function. - ASMJIT_API EndNode* endFunc(); + //! Emit a sentinel that marks the end of the current function. + ASMJIT_API HLSentinel* endFunc(); - //! Get current function as `X86FuncNode`. + //! Get the current function node casted to `X86FuncNode`. //! //! This method can be called within `addFunc()` and `endFunc()` block to get - //! current function you are working with. It's recommended to store `FuncNode` + //! current function you are working with. It's recommended to store `HLFunc` //! pointer returned by `addFunc<>` method, because this allows you in future //! implement function sections outside of function itself. ASMJIT_INLINE X86FuncNode* getFunc() const { @@ -2253,68 +962,114 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { // [Ret] // -------------------------------------------------------------------------- - //! Create a new `RetNode`. - ASMJIT_API RetNode* newRet(const Operand& o0, const Operand& o1); - //! Add a new `RetNode`. - ASMJIT_API RetNode* addRet(const Operand& o0, const Operand& o1); + //! Create a new `HLRet`. + ASMJIT_API HLRet* newRet(const Operand& o0, const Operand& o1); + //! Add a new `HLRet`. + ASMJIT_API HLRet* addRet(const Operand& o0, const Operand& o1); // -------------------------------------------------------------------------- // [Call] // -------------------------------------------------------------------------- //! Create a new `X86CallNode`. - ASMJIT_API X86CallNode* newCall(const Operand& o0, uint32_t conv, const FuncPrototype& p); + ASMJIT_API X86CallNode* newCall(const Operand& o0, const FuncPrototype& p); //! Add a new `X86CallNode`. - ASMJIT_API X86CallNode* addCall(const Operand& o0, uint32_t conv, const FuncPrototype& p); + ASMJIT_API X86CallNode* addCall(const Operand& o0, const FuncPrototype& p); + + // -------------------------------------------------------------------------- + // [Args] + // -------------------------------------------------------------------------- + + //! Set function argument to `var`. + ASMJIT_API Error setArg(uint32_t argIndex, const Var& var); // -------------------------------------------------------------------------- // [Vars] // -------------------------------------------------------------------------- - //! Set function argument to `var`. - ASMJIT_API Error setArg(uint32_t argIndex, Var& var); + ASMJIT_API virtual Error _newVar(Var* var, uint32_t type, const char* name, va_list ap); - ASMJIT_API virtual Error _newVar(Var* var, uint32_t type, const char* name); - - //! Create a new Gp variable. - ASMJIT_INLINE X86GpVar newGpVar(uint32_t vType = kVarTypeIntPtr, const char* name = NULL) { - ASMJIT_ASSERT(vType < kX86VarTypeCount); - ASMJIT_ASSERT(IntUtil::inInterval(vType, _kVarTypeIntStart, _kVarTypeIntEnd)); - - X86GpVar var(NoInit); - _newVar(&var, vType, name); - return var; +#if !defined(ASMJIT_DISABLE_LOGGER) +#define ASMJIT_NEW_VAR_TYPE(func, type, typeFirst, typeLast) \ + ASMJIT_INLINE type func(uint32_t vType) { \ + ASMJIT_ASSERT(vType < kX86VarTypeCount); \ + ASMJIT_ASSERT(Utils::inInterval(vType, typeFirst, typeLast)); \ + \ + type var(NoInit); \ + _newVar(&var, vType, NULL, NULL); \ + return var; \ + } \ + ASMJIT_NOINLINE type func(uint32_t vType, const char* name, ...) { \ + ASMJIT_ASSERT(vType < kX86VarTypeCount); \ + ASMJIT_ASSERT(Utils::inInterval(vType, typeFirst, typeLast)); \ + \ + type var(NoInit); \ + va_list ap; \ + va_start(ap, name); \ + \ + _newVar(&var, vType, name, ap); \ + \ + va_end(ap); \ + return var; \ } - - //! Create a new Mm variable. - ASMJIT_INLINE X86MmVar newMmVar(uint32_t vType = kX86VarTypeMm, const char* name = NULL) { - ASMJIT_ASSERT(vType < kX86VarTypeCount); - ASMJIT_ASSERT(IntUtil::inInterval(vType, _kX86VarTypeMmStart, _kX86VarTypeMmEnd)); - - X86MmVar var(NoInit); - _newVar(&var, vType, name); - return var; +#define ASMJIT_NEW_VAR_AUTO(func, type, typeId) \ + ASMJIT_INLINE type func() { \ + type var(NoInit); \ + _newVar(&var, typeId, NULL, NULL); \ + return var; \ + } \ + \ + ASMJIT_NOINLINE type func(const char* name, ...) { \ + type var(NoInit); \ + va_list ap; \ + va_start(ap, name); \ + \ + _newVar(&var, typeId, name, ap); \ + \ + va_end(ap); \ + return var; \ } - - //! Create a new Xmm variable. - ASMJIT_INLINE X86XmmVar newXmmVar(uint32_t vType = kX86VarTypeXmm, const char* name = NULL) { - ASMJIT_ASSERT(vType < kX86VarTypeCount); - ASMJIT_ASSERT(IntUtil::inInterval(vType, _kX86VarTypeXmmStart, _kX86VarTypeXmmEnd)); - - X86XmmVar var(NoInit); - _newVar(&var, vType, name); - return var; +#else + ASMJIT_INLINE type func() { \ + type var(NoInit); \ + _newVar(&var, typeId, NULL, NULL); \ + return var; \ + } \ + \ + ASMJIT_NOINLINE type func(const char* name, ...) { \ + type var(NoInit); \ + _newVar(&var, typeId, NULL, NULL); \ + return var; \ } +#endif - //! Create a new Ymm variable. - ASMJIT_INLINE X86YmmVar newYmmVar(uint32_t vType = kX86VarTypeYmm, const char* name = NULL) { - ASMJIT_ASSERT(vType < kX86VarTypeCount); - ASMJIT_ASSERT(IntUtil::inInterval(vType, _kX86VarTypeYmmStart, _kX86VarTypeYmmEnd)); + ASMJIT_NEW_VAR_TYPE(newGpVar , X86GpVar , _kVarTypeIntStart , _kVarTypeIntEnd ) + ASMJIT_NEW_VAR_TYPE(newMmVar , X86MmVar , _kX86VarTypeMmStart , _kX86VarTypeMmEnd ) + ASMJIT_NEW_VAR_TYPE(newXmmVar , X86XmmVar, _kX86VarTypeXmmStart, _kX86VarTypeXmmEnd) + ASMJIT_NEW_VAR_TYPE(newYmmVar , X86YmmVar, _kX86VarTypeYmmStart, _kX86VarTypeYmmEnd) - X86YmmVar var(NoInit); - _newVar(&var, vType, name); - return var; - } + ASMJIT_NEW_VAR_AUTO(newInt8 , X86GpVar , kVarTypeInt8 ) + ASMJIT_NEW_VAR_AUTO(newInt16 , X86GpVar , kVarTypeInt16 ) + ASMJIT_NEW_VAR_AUTO(newInt32 , X86GpVar , kVarTypeInt32 ) + ASMJIT_NEW_VAR_AUTO(newInt64 , X86GpVar , kVarTypeInt64 ) + ASMJIT_NEW_VAR_AUTO(newIntPtr , X86GpVar , kVarTypeIntPtr ) + ASMJIT_NEW_VAR_AUTO(newUInt8 , X86GpVar , kVarTypeUInt8 ) + ASMJIT_NEW_VAR_AUTO(newUInt16 , X86GpVar , kVarTypeUInt16 ) + ASMJIT_NEW_VAR_AUTO(newUInt32 , X86GpVar , kVarTypeUInt32 ) + ASMJIT_NEW_VAR_AUTO(newUInt64 , X86GpVar , kVarTypeUInt64 ) + ASMJIT_NEW_VAR_AUTO(newUIntPtr, X86GpVar , kVarTypeUIntPtr ) + ASMJIT_NEW_VAR_AUTO(newMm , X86MmVar , kX86VarTypeMm ) + ASMJIT_NEW_VAR_AUTO(newXmm , X86XmmVar, kX86VarTypeXmm ) + ASMJIT_NEW_VAR_AUTO(newXmmSs , X86XmmVar, kX86VarTypeXmmSs) + ASMJIT_NEW_VAR_AUTO(newXmmSd , X86XmmVar, kX86VarTypeXmmSd) + ASMJIT_NEW_VAR_AUTO(newXmmPs , X86XmmVar, kX86VarTypeXmmPs) + ASMJIT_NEW_VAR_AUTO(newXmmPd , X86XmmVar, kX86VarTypeXmmPd) + ASMJIT_NEW_VAR_AUTO(newYmm , X86YmmVar, kX86VarTypeYmm ) + ASMJIT_NEW_VAR_AUTO(newYmmPs , X86YmmVar, kX86VarTypeYmmPs) + ASMJIT_NEW_VAR_AUTO(newYmmPd , X86YmmVar, kX86VarTypeYmmPd) + +#undef ASMJIT_NEW_VAR_AUTO +#undef ASMJIT_NEW_VAR_TYPE // -------------------------------------------------------------------------- // [Stack] @@ -2381,70 +1136,52 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { // -------------------------------------------------------------------------- //! Add 8-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* db(uint8_t x) { return embed(&x, 1); } + ASMJIT_INLINE Error db(uint8_t x) { return embed(&x, 1); } //! Add 16-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* dw(uint16_t x) { return embed(&x, 2); } + ASMJIT_INLINE Error dw(uint16_t x) { return embed(&x, 2); } //! Add 32-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* dd(uint32_t x) { return embed(&x, 4); } + ASMJIT_INLINE Error dd(uint32_t x) { return embed(&x, 4); } //! Add 64-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* dq(uint64_t x) { return embed(&x, 8); } + ASMJIT_INLINE Error dq(uint64_t x) { return embed(&x, 8); } //! Add 8-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* dint8(int8_t x) { return embed(&x, static_cast(sizeof(int8_t))); } + ASMJIT_INLINE Error dint8(int8_t x) { return embed(&x, static_cast(sizeof(int8_t))); } //! Add 8-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* duint8(uint8_t x) { return embed(&x, static_cast(sizeof(uint8_t))); } + ASMJIT_INLINE Error duint8(uint8_t x) { return embed(&x, static_cast(sizeof(uint8_t))); } //! Add 16-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* dint16(int16_t x) { return embed(&x, static_cast(sizeof(int16_t))); } + ASMJIT_INLINE Error dint16(int16_t x) { return embed(&x, static_cast(sizeof(int16_t))); } //! Add 16-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* duint16(uint16_t x) { return embed(&x, static_cast(sizeof(uint16_t))); } + ASMJIT_INLINE Error duint16(uint16_t x) { return embed(&x, static_cast(sizeof(uint16_t))); } //! Add 32-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* dint32(int32_t x) { return embed(&x, static_cast(sizeof(int32_t))); } + ASMJIT_INLINE Error dint32(int32_t x) { return embed(&x, static_cast(sizeof(int32_t))); } //! Add 32-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* duint32(uint32_t x) { return embed(&x, static_cast(sizeof(uint32_t))); } + ASMJIT_INLINE Error duint32(uint32_t x) { return embed(&x, static_cast(sizeof(uint32_t))); } //! Add 64-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* dint64(int64_t x) { return embed(&x, static_cast(sizeof(int64_t))); } + ASMJIT_INLINE Error dint64(int64_t x) { return embed(&x, static_cast(sizeof(int64_t))); } //! Add 64-bit integer data to the instruction stream. - ASMJIT_INLINE EmbedNode* duint64(uint64_t x) { return embed(&x, static_cast(sizeof(uint64_t))); } + ASMJIT_INLINE Error duint64(uint64_t x) { return embed(&x, static_cast(sizeof(uint64_t))); } //! Add float data to the instruction stream. - ASMJIT_INLINE EmbedNode* dfloat(float x) { return embed(&x, static_cast(sizeof(float))); } + ASMJIT_INLINE Error dfloat(float x) { return embed(&x, static_cast(sizeof(float))); } //! Add double data to the instruction stream. - ASMJIT_INLINE EmbedNode* ddouble(double x) { return embed(&x, static_cast(sizeof(double))); } + ASMJIT_INLINE Error ddouble(double x) { return embed(&x, static_cast(sizeof(double))); } //! Add Mm data to the instruction stream. - ASMJIT_INLINE EmbedNode* dmm(const Vec64& x) { return embed(&x, static_cast(sizeof(Vec64))); } + ASMJIT_INLINE Error dmm(const Vec64& x) { return embed(&x, static_cast(sizeof(Vec64))); } //! Add Xmm data to the instruction stream. - ASMJIT_INLINE EmbedNode* dxmm(const Vec128& x) { return embed(&x, static_cast(sizeof(Vec128))); } + ASMJIT_INLINE Error dxmm(const Vec128& x) { return embed(&x, static_cast(sizeof(Vec128))); } //! Add Ymm data to the instruction stream. - ASMJIT_INLINE EmbedNode* dymm(const Vec256& x) { return embed(&x, static_cast(sizeof(Vec256))); } + ASMJIT_INLINE Error dymm(const Vec256& x) { return embed(&x, static_cast(sizeof(Vec256))); } //! Add data in a given structure instance to the instruction stream. template - ASMJIT_INLINE EmbedNode* dstruct(const T& x) { return embed(&x, static_cast(sizeof(T))); } - - // -------------------------------------------------------------------------- - // [Make] - // -------------------------------------------------------------------------- - - ASMJIT_API virtual void* make(); + ASMJIT_INLINE Error dstruct(const T& x) { return embed(&x, static_cast(sizeof(T))); } // ------------------------------------------------------------------------- - // [Assembler] - // ------------------------------------------------------------------------- - - ASMJIT_API virtual Assembler* _newAssembler(); - - // ------------------------------------------------------------------------- - // [Serialize] - // ------------------------------------------------------------------------- - - ASMJIT_API virtual Error serialize(Assembler* assembler); - - // ------------------------------------------------------------------------- - // [Options] + // [Instruction Options] // ------------------------------------------------------------------------- ASMJIT_X86_EMIT_OPTIONS(X86Compiler) @@ -2484,256 +1221,256 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { // -------------------------------------------------------------------------- #define INST_0x(_Inst_, _Code_) \ - ASMJIT_INLINE InstNode* _Inst_() { \ + ASMJIT_INLINE HLInst* _Inst_() { \ return emit(_Code_); \ } #define INST_1x(_Inst_, _Code_, _Op0_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0) { \ return emit(_Code_, o0); \ } #define INST_1x_(_Inst_, _Code_, _Op0_, _Cond_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0) { \ ASMJIT_ASSERT(_Cond_); \ return emit(_Code_, o0); \ } #define INST_1i(_Inst_, _Code_, _Op0_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0) { \ return emit(_Code_, o0); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(int o0) { \ + ASMJIT_INLINE HLInst* _Inst_(int o0) { \ return emit(_Code_, o0); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(unsigned int o0) { \ + ASMJIT_INLINE HLInst* _Inst_(unsigned int o0) { \ return emit(_Code_, static_cast(o0)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(int64_t o0) { \ + ASMJIT_INLINE HLInst* _Inst_(int64_t o0) { \ return emit(_Code_, static_cast(o0)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(uint64_t o0) { \ + ASMJIT_INLINE HLInst* _Inst_(uint64_t o0) { \ return emit(_Code_, o0); \ } #define INST_1cc(_Inst_, _Code_, _Translate_, _Op0_) \ - ASMJIT_INLINE InstNode* _Inst_(uint32_t cc, const _Op0_& o0) { \ + ASMJIT_INLINE HLInst* _Inst_(uint32_t cc, const _Op0_& o0) { \ return emit(_Translate_(cc), o0); \ } \ \ - ASMJIT_INLINE InstNode* _Inst_##a(const _Op0_& o0) { return emit(_Code_##a, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##ae(const _Op0_& o0) { return emit(_Code_##ae, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##b(const _Op0_& o0) { return emit(_Code_##b, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##be(const _Op0_& o0) { return emit(_Code_##be, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##c(const _Op0_& o0) { return emit(_Code_##c, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##e(const _Op0_& o0) { return emit(_Code_##e, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##g(const _Op0_& o0) { return emit(_Code_##g, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##ge(const _Op0_& o0) { return emit(_Code_##ge, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##l(const _Op0_& o0) { return emit(_Code_##l, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##le(const _Op0_& o0) { return emit(_Code_##le, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##na(const _Op0_& o0) { return emit(_Code_##na, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##nae(const _Op0_& o0) { return emit(_Code_##nae, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##nb(const _Op0_& o0) { return emit(_Code_##nb, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##nbe(const _Op0_& o0) { return emit(_Code_##nbe, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##nc(const _Op0_& o0) { return emit(_Code_##nc, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##ne(const _Op0_& o0) { return emit(_Code_##ne, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##ng(const _Op0_& o0) { return emit(_Code_##ng, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##nge(const _Op0_& o0) { return emit(_Code_##nge, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##nl(const _Op0_& o0) { return emit(_Code_##nl, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##nle(const _Op0_& o0) { return emit(_Code_##nle, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##no(const _Op0_& o0) { return emit(_Code_##no, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##np(const _Op0_& o0) { return emit(_Code_##np, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##ns(const _Op0_& o0) { return emit(_Code_##ns, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##nz(const _Op0_& o0) { return emit(_Code_##nz, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##o(const _Op0_& o0) { return emit(_Code_##o, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##p(const _Op0_& o0) { return emit(_Code_##p, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##pe(const _Op0_& o0) { return emit(_Code_##pe, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##po(const _Op0_& o0) { return emit(_Code_##po, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##s(const _Op0_& o0) { return emit(_Code_##s, o0); } \ - ASMJIT_INLINE InstNode* _Inst_##z(const _Op0_& o0) { return emit(_Code_##z, o0); } + ASMJIT_INLINE HLInst* _Inst_##a(const _Op0_& o0) { return emit(_Code_##a, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ae(const _Op0_& o0) { return emit(_Code_##ae, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##b(const _Op0_& o0) { return emit(_Code_##b, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##be(const _Op0_& o0) { return emit(_Code_##be, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##c(const _Op0_& o0) { return emit(_Code_##c, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##e(const _Op0_& o0) { return emit(_Code_##e, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##g(const _Op0_& o0) { return emit(_Code_##g, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ge(const _Op0_& o0) { return emit(_Code_##ge, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##l(const _Op0_& o0) { return emit(_Code_##l, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##le(const _Op0_& o0) { return emit(_Code_##le, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##na(const _Op0_& o0) { return emit(_Code_##na, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nae(const _Op0_& o0) { return emit(_Code_##nae, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nb(const _Op0_& o0) { return emit(_Code_##nb, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nbe(const _Op0_& o0) { return emit(_Code_##nbe, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nc(const _Op0_& o0) { return emit(_Code_##nc, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ne(const _Op0_& o0) { return emit(_Code_##ne, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ng(const _Op0_& o0) { return emit(_Code_##ng, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nge(const _Op0_& o0) { return emit(_Code_##nge, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nl(const _Op0_& o0) { return emit(_Code_##nl, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nle(const _Op0_& o0) { return emit(_Code_##nle, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##no(const _Op0_& o0) { return emit(_Code_##no, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##np(const _Op0_& o0) { return emit(_Code_##np, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ns(const _Op0_& o0) { return emit(_Code_##ns, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nz(const _Op0_& o0) { return emit(_Code_##nz, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##o(const _Op0_& o0) { return emit(_Code_##o, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##p(const _Op0_& o0) { return emit(_Code_##p, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##pe(const _Op0_& o0) { return emit(_Code_##pe, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##po(const _Op0_& o0) { return emit(_Code_##po, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##s(const _Op0_& o0) { return emit(_Code_##s, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##z(const _Op0_& o0) { return emit(_Code_##z, o0); } #define INST_2x(_Inst_, _Code_, _Op0_, _Op1_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1) { \ return emit(_Code_, o0, o1); \ } #define INST_2x_(_Inst_, _Code_, _Op0_, _Op1_, _Cond_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1) { \ ASMJIT_ASSERT(_Cond_); \ return emit(_Code_, o0, o1); \ } #define INST_2i(_Inst_, _Code_, _Op0_, _Op1_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1) { \ return emit(_Code_, o0, o1); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, int o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int o1) { \ return emit(_Code_, o0, o1); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, unsigned int o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, unsigned int o1) { \ return emit(_Code_, o0, static_cast(o1)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, int64_t o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int64_t o1) { \ return emit(_Code_, o0, static_cast(o1)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, uint64_t o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, uint64_t o1) { \ return emit(_Code_, o0, o1); \ } #define INST_2cc(_Inst_, _Code_, _Translate_, _Op0_, _Op1_) \ - ASMJIT_INLINE InstNode* _Inst_(uint32_t cc, const _Op0_& o0, const _Op1_& o1) { \ + ASMJIT_INLINE HLInst* _Inst_(uint32_t cc, const _Op0_& o0, const _Op1_& o1) { \ return emit(_Translate_(cc), o0, o1); \ } \ \ - ASMJIT_INLINE InstNode* _Inst_##a(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##a, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##ae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ae, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##b(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##b, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##be(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##be, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##c(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##c, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##e(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##e, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##g(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##g, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##ge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ge, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##l(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##l, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##le(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##le, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##na(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##na, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##nae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nae, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##nb(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nb, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##nbe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nbe, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##nc(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nc, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##ne(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ne, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##ng(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ng, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##nge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nge, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##nl(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nl, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##nle(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nle, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##no(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##no, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##np(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##np, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##ns(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ns, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##nz(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nz, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##o(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##o, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##p(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##p, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##pe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##pe, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##po(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##po, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##s(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##s, o0, o1); } \ - ASMJIT_INLINE InstNode* _Inst_##z(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##z, o0, o1); } + ASMJIT_INLINE HLInst* _Inst_##a(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##a, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ae, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##b(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##b, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##be(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##be, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##c(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##c, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##e(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##e, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##g(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##g, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ge, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##l(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##l, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##le(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##le, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##na(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##na, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nae, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nb(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nb, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nbe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nbe, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nc(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nc, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ne(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ne, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ng(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ng, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nge, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nl(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nl, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nle(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nle, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##no(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##no, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##np(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##np, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ns(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ns, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nz(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nz, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##o(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##o, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##p(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##p, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##pe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##pe, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##po(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##po, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##s(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##s, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##z(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##z, o0, o1); } #define INST_3x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ return emit(_Code_, o0, o1, o2); \ } #define INST_3x_(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Cond_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ ASMJIT_ASSERT(_Cond_); \ return emit(_Code_, o0, o1, o2); \ } #define INST_3i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ return emit(_Code_, o0, o1, o2); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, int o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int o2) { \ return emit(_Code_, o0, o1, o2); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2) { \ return emit(_Code_, o0, o1, static_cast(o2)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2) { \ return emit(_Code_, o0, o1, static_cast(o2)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2) { \ return emit(_Code_, o0, o1, o2); \ } #define INST_3ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ return emit(_Code_, o0, o1, o2); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, int o1, int o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int o1, int o2) { \ Imm o1Imm(o1); \ return emit(_Code_, o0, o1Imm, o2); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, unsigned int o1, unsigned int o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, unsigned int o1, unsigned int o2) { \ Imm o1Imm(o1); \ return emit(_Code_, o0, o1Imm, static_cast(o2)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, int64_t o1, int64_t o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int64_t o1, int64_t o2) { \ Imm o1Imm(o1); \ return emit(_Code_, o0, o1Imm, static_cast(o2)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, uint64_t o1, uint64_t o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, uint64_t o1, uint64_t o2) { \ Imm o1Imm(o1); \ return emit(_Code_, o0, o1Imm, o2); \ } #define INST_4x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ return emit(_Code_, o0, o1, o2, o3); \ } #define INST_4x_(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Cond_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ ASMJIT_ASSERT(_Cond_); \ return emit(_Code_, o0, o1, o2, o3); \ } #define INST_4i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ return emit(_Code_, o0, o1, o2, o3); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int o3) { \ return emit(_Code_, o0, o1, o2, o3); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, unsigned int o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, unsigned int o3) { \ return emit(_Code_, o0, o1, o2, static_cast(o3)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int64_t o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int64_t o3) { \ return emit(_Code_, o0, o1, o2, static_cast(o3)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, uint64_t o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, uint64_t o3) { \ return emit(_Code_, o0, o1, o2, o3); \ } #define INST_4ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ return emit(_Code_, o0, o1, o2, o3); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, int o2, int o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int o2, int o3) { \ Imm o2Imm(o2); \ return emit(_Code_, o0, o1, o2Imm, o3); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2, unsigned int o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2, unsigned int o3) { \ Imm o2Imm(o2); \ return emit(_Code_, o0, o1, o2Imm, static_cast(o3)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2, int64_t o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2, int64_t o3) { \ Imm o2Imm(o2); \ return emit(_Code_, o0, o1, o2Imm, static_cast(o3)); \ } \ /*! \overload */ \ - ASMJIT_INLINE InstNode* _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2, uint64_t o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2, uint64_t o3) { \ Imm o2Imm(o2); \ return emit(_Code_, o0, o1, o2Imm, o3); \ } @@ -2825,24 +1562,24 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { INST_2i(bts, kX86InstIdBts, X86Mem, Imm) //! Call a function. - ASMJIT_INLINE X86CallNode* call(const X86GpVar& dst, uint32_t conv, const FuncPrototype& p) { - return addCall(dst, conv, p); + ASMJIT_INLINE X86CallNode* call(const X86GpVar& dst, const FuncPrototype& p) { + return addCall(dst, p); } //! \overload - ASMJIT_INLINE X86CallNode* call(const X86Mem& dst, uint32_t conv, const FuncPrototype& p) { - return addCall(dst, conv, p); + ASMJIT_INLINE X86CallNode* call(const X86Mem& dst, const FuncPrototype& p) { + return addCall(dst, p); } //! \overload - ASMJIT_INLINE X86CallNode* call(const Label& label, uint32_t conv, const FuncPrototype& p) { - return addCall(label, conv, p); + ASMJIT_INLINE X86CallNode* call(const Label& label, const FuncPrototype& p) { + return addCall(label, p); } //! \overload - ASMJIT_INLINE X86CallNode* call(const Imm& dst, uint32_t conv, const FuncPrototype& p) { - return addCall(dst, conv, p); + ASMJIT_INLINE X86CallNode* call(const Imm& dst, const FuncPrototype& p) { + return addCall(dst, p); } //! \overload - ASMJIT_INLINE X86CallNode* call(Ptr dst, uint32_t conv, const FuncPrototype& p) { - return addCall(Imm(dst), conv, p); + ASMJIT_INLINE X86CallNode* call(Ptr dst, const FuncPrototype& p) { + return addCall(Imm(dst), p); } //! Clear carry flag @@ -2896,7 +1633,7 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { INST_3x(cmpxchg, kX86InstIdCmpxchg, X86GpVar /* eax */, X86Mem, X86GpVar) //! Compare and exchange 128-bit value in RDX:RAX with `x_mem` (X64 Only). - ASMJIT_INLINE InstNode* cmpxchg16b( + ASMJIT_INLINE HLInst* cmpxchg16b( const X86GpVar& r_edx, const X86GpVar& r_eax, const X86GpVar& r_ecx, const X86GpVar& r_ebx, const X86Mem& x_mem) { @@ -2905,7 +1642,7 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { } //! Compare and exchange 64-bit value in EDX:EAX with `x_mem` (Pentium). - ASMJIT_INLINE InstNode* cmpxchg8b( + ASMJIT_INLINE HLInst* cmpxchg8b( const X86GpVar& r_edx, const X86GpVar& r_eax, const X86GpVar& r_ecx, const X86GpVar& r_ebx, const X86Mem& x_mem) { @@ -2914,7 +1651,7 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { } //! CPU identification (i486). - ASMJIT_INLINE InstNode* cpuid( + ASMJIT_INLINE HLInst* cpuid( const X86GpVar& x_eax, const X86GpVar& w_ebx, const X86GpVar& x_ecx, @@ -2979,7 +1716,7 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { //! Interrupt. INST_1i(int_, kX86InstIdInt, Imm) //! Interrupt 3 - trap to debugger. - ASMJIT_INLINE InstNode* int3() { return int_(3); } + ASMJIT_INLINE HLInst* int3() { return int_(3); } //! Jump to label `label` if condition `cc` is met. INST_1cc(j, kX86InstIdJ, X86Util::condToJcc, Label) @@ -2996,7 +1733,7 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { //! \overload INST_1x(jmp, kX86InstIdJmp, Imm) //! \overload - ASMJIT_INLINE InstNode* jmp(Ptr dst) { return jmp(Imm(dst)); } + ASMJIT_INLINE HLInst* jmp(Ptr dst) { return jmp(Imm(dst)); } //! Load AH from flags. INST_1x(lahf, kX86InstIdLahf, X86GpVar) @@ -3036,7 +1773,7 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { //! Move (AL|AX|EAX|RAX <- absolute address in immediate). INST_2x(mov_ptr, kX86InstIdMovPtr, X86GpReg, Imm); //! \overload - ASMJIT_INLINE InstNode* mov_ptr(const X86GpReg& o0, Ptr o1) { + ASMJIT_INLINE HLInst* mov_ptr(const X86GpReg& o0, Ptr o1) { ASMJIT_ASSERT(o0.getRegIndex() == 0); return emit(kX86InstIdMovPtr, o0, Imm(o1)); } @@ -3044,7 +1781,7 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { //! Move (absolute address in immediate <- AL|AX|EAX|RAX). INST_2x(mov_ptr, kX86InstIdMovPtr, Imm, X86GpReg); //! \overload - ASMJIT_INLINE InstNode* mov_ptr(Ptr o0, const X86GpReg& o1) { + ASMJIT_INLINE HLInst* mov_ptr(Ptr o0, const X86GpReg& o1) { ASMJIT_ASSERT(o1.getRegIndex() == 0); return emit(kX86InstIdMovPtr, Imm(o0), o1); } @@ -3118,7 +1855,7 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { //! Push WORD or DWORD/QWORD on the stack. INST_1x_(push, kX86InstIdPush, X86GpVar, o0.getSize() == 2 || o0.getSize() == _regSize) //! Push WORD or DWORD/QWORD on the stack. - INST_1x_(push, kX86InstIdPush, X86Mem,o0.getSize() == 2 || o0.getSize() == _regSize) + INST_1x_(push, kX86InstIdPush, X86Mem, o0.getSize() == 2 || o0.getSize() == _regSize) //! Push segment register on the stack. INST_1x(push, kX86InstIdPush, X86SegReg) //! Push WORD or DWORD/QWORD on the stack. @@ -3214,15 +1951,15 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { INST_3x_(repne_scasw, kX86InstIdRepneScasW, X86GpVar, X86GpVar, X86GpVar, o0.getId() != o1.getId() && o1.getId() != o2.getId()) //! Return. - ASMJIT_INLINE RetNode* ret() { return addRet(noOperand, noOperand); } + ASMJIT_INLINE HLRet* ret() { return addRet(noOperand, noOperand); } //! \overload - ASMJIT_INLINE RetNode* ret(const X86GpVar& o0) { return addRet(o0, noOperand); } + ASMJIT_INLINE HLRet* ret(const X86GpVar& o0) { return addRet(o0, noOperand); } //! \overload - ASMJIT_INLINE RetNode* ret(const X86GpVar& o0, const X86GpVar& o1) { return addRet(o0, o1); } + ASMJIT_INLINE HLRet* ret(const X86GpVar& o0, const X86GpVar& o1) { return addRet(o0, o1); } //! \overload - ASMJIT_INLINE RetNode* ret(const X86XmmVar& o0) { return addRet(o0, noOperand); } + ASMJIT_INLINE HLRet* ret(const X86XmmVar& o0) { return addRet(o0, noOperand); } //! \overload - ASMJIT_INLINE RetNode* ret(const X86XmmVar& o0, const X86XmmVar& o1) { return addRet(o0, o1); } + ASMJIT_INLINE HLRet* ret(const X86XmmVar& o0, const X86XmmVar& o1) { return addRet(o0, o1); } //! Rotate bits left. INST_2x(rol, kX86InstIdRol, X86GpVar, X86GpVar) diff --git a/src/asmjit/x86/x86context.cpp b/src/asmjit/x86/x86compilercontext.cpp similarity index 81% rename from src/asmjit/x86/x86context.cpp rename to src/asmjit/x86/x86compilercontext.cpp index fedd32c..d4fe39a 100644 --- a/src/asmjit/x86/x86context.cpp +++ b/src/asmjit/x86/x86compilercontext.cpp @@ -12,11 +12,11 @@ #if !defined(ASMJIT_DISABLE_COMPILER) && (defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)) // [Dependencies - AsmJit] -#include "../base/intutil.h" -#include "../base/string.h" +#include "../base/containers.h" +#include "../base/utils.h" #include "../x86/x86assembler.h" #include "../x86/x86compiler.h" -#include "../x86/x86context_p.h" +#include "../x86/x86compilercontext_p.h" #include "../x86/x86cpuinfo.h" #include "../x86/x86scheduler_p.h" @@ -113,12 +113,11 @@ static void X86Context_annotateOperand(X86Context* self, } sb.appendChar(prefix); - /* - if ((loggerOptions & (1 << kLoggerOptionHexDisplacement)) != 0 && dispOffset > 9) { - sb.appendString("0x", 2); - base = 16; - } - */ + // TODO: Enable again: + // if ((loggerOptions & (1 << kLoggerOptionHexDisplacement)) != 0 && dispOffset > 9) { + // sb.appendString("0x", 2); + // base = 16; + // } sb.appendUInt(static_cast(dispOffset), base); } @@ -158,32 +157,32 @@ static bool X86Context_annotateInstruction(X86Context* self, #endif // !ASMJIT_DISABLE_LOGGER #if defined(ASMJIT_TRACE) -static void X86Context_traceNode(X86Context* self, Node* node_) { - StringBuilderT<256> sb; +static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* prefix) { + StringBuilderTmp<256> sb; switch (node_->getType()) { - case kNodeTypeAlign: { - AlignNode* node = static_cast(node_); + case kHLNodeTypeAlign: { + HLAlign* node = static_cast(node_); sb.appendFormat(".align %u (%s)", node->getOffset(), - node->getMode() == kAlignCode ? "code" : "data"); + node->getAlignMode() == kAlignCode ? "code" : "data"); break; } - case kNodeTypeEmbed: { - EmbedNode* node = static_cast(node_); + case kHLNodeTypeData: { + HLData* node = static_cast(node_); sb.appendFormat(".embed (%u bytes)", node->getSize()); break; } - case kNodeTypeComment: { - CommentNode* node = static_cast(node_); + case kHLNodeTypeComment: { + HLComment* node = static_cast(node_); sb.appendFormat("; %s", node->getComment()); break; } - case kNodeTypeHint: { - HintNode* node = static_cast(node_); + case kHLNodeTypeHint: { + HLHint* node = static_cast(node_); static const char* hint[16] = { "alloc", "spill", @@ -196,47 +195,47 @@ static void X86Context_traceNode(X86Context* self, Node* node_) { break; } - case kNodeTypeTarget: { - TargetNode* node = static_cast(node_); + case kHLNodeTypeLabel: { + HLLabel* node = static_cast(node_); sb.appendFormat("L%u: (NumRefs=%u)", node->getLabelId(), node->getNumRefs()); break; } - case kNodeTypeInst: { - InstNode* node = static_cast(node_); + case kHLNodeTypeInst: { + HLInst* node = static_cast(node_); X86Context_annotateInstruction(self, sb, node->getInstId(), node->getOpList(), node->getOpCount()); break; } - case kNodeTypeFunc: { - FuncNode* node = static_cast(node_); + case kHLNodeTypeFunc: { + HLFunc* node = static_cast(node_); sb.appendFormat("[func]"); break; } - case kNodeTypeEnd: { - EndNode* node = static_cast(node_); + case kHLNodeTypeSentinel: { + HLSentinel* node = static_cast(node_); sb.appendFormat("[end]"); break; } - case kNodeTypeRet: { - RetNode* node = static_cast(node_); + case kHLNodeTypeRet: { + HLRet* node = static_cast(node_); sb.appendFormat("[ret]"); break; } - case kNodeTypeCall: { - CallNode* node = static_cast(node_); + case kHLNodeTypeCall: { + HLCall* node = static_cast(node_); sb.appendFormat("[call]"); break; } - case kNodeTypeSArg: { - SArgNode* node = static_cast(node_); + case kHLNodeTypeCallArg: { + HLCallArg* node = static_cast(node_); sb.appendFormat("[sarg]"); break; } @@ -247,7 +246,7 @@ static void X86Context_traceNode(X86Context* self, Node* node_) { } } - ASMJIT_TLOG("[%05u] %s\n", node_->getFlowId(), sb.getData()); + ASMJIT_TLOG("%s[%05u] %s\n", prefix, node_->getFlowId(), sb.getData()); } #endif // ASMJIT_TRACE @@ -266,7 +265,7 @@ X86Context::X86Context(X86Compiler* compiler) : Context(compiler) { _memSlot.setGpdBase(compiler->getArch() == kArchX86); #if !defined(ASMJIT_DISABLE_LOGGER) - _emitComments = compiler->hasLogger(); + _emitComments = compiler->getAssembler()->hasLogger(); #endif // !ASMJIT_DISABLE_LOGGER _state = &_x86State; @@ -286,10 +285,10 @@ void X86Context::reset(bool releaseMemory) { _clobberedRegs.reset(); _stackFrameCell = NULL; - _gaRegs[kX86RegClassGp ] = IntUtil::bits(_regCount.getGp()) & ~IntUtil::mask(kX86RegIndexSp); - _gaRegs[kX86RegClassMm ] = IntUtil::bits(_regCount.getMm()); - _gaRegs[kX86RegClassK ] = IntUtil::bits(_regCount.getK()); - _gaRegs[kX86RegClassXyz] = IntUtil::bits(_regCount.getXyz()); + _gaRegs[kX86RegClassGp ] = Utils::bits(_regCount.getGp()) & ~Utils::mask(kX86RegIndexSp); + _gaRegs[kX86RegClassMm ] = Utils::bits(_regCount.getMm()); + _gaRegs[kX86RegClassK ] = Utils::bits(_regCount.getK()); + _gaRegs[kX86RegClassXyz] = Utils::bits(_regCount.getXyz()); _argBaseReg = kInvalidReg; // Used by patcher. _varBaseReg = kInvalidReg; // Used by patcher. @@ -312,136 +311,136 @@ struct X86SpecialInst { }; static const X86SpecialInst x86SpecialInstCpuid[] = { - { kX86RegIndexAx, kX86RegIndexAx, kVarAttrInOutReg }, - { kInvalidReg , kX86RegIndexBx, kVarAttrOutReg }, - { kInvalidReg , kX86RegIndexCx, kVarAttrOutReg }, - { kInvalidReg , kX86RegIndexDx, kVarAttrOutReg } + { kX86RegIndexAx, kX86RegIndexAx, kVarAttrXReg }, + { kInvalidReg , kX86RegIndexBx, kVarAttrWReg }, + { kInvalidReg , kX86RegIndexCx, kVarAttrWReg }, + { kInvalidReg , kX86RegIndexDx, kVarAttrWReg } }; static const X86SpecialInst x86SpecialInstCbwCdqeCwde[] = { - { kX86RegIndexAx, kX86RegIndexAx, kVarAttrInOutReg } + { kX86RegIndexAx, kX86RegIndexAx, kVarAttrXReg } }; static const X86SpecialInst x86SpecialInstCdqCwdCqo[] = { - { kInvalidReg , kX86RegIndexDx, kVarAttrOutReg }, - { kX86RegIndexAx, kInvalidReg , kVarAttrInReg } + { kInvalidReg , kX86RegIndexDx, kVarAttrWReg }, + { kX86RegIndexAx, kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstCmpxchg[] = { - { kX86RegIndexAx, kX86RegIndexAx, kVarAttrInOutReg }, - { kInvalidReg , kInvalidReg , kVarAttrInOutReg }, - { kInvalidReg , kInvalidReg , kVarAttrInReg } + { kX86RegIndexAx, kX86RegIndexAx, kVarAttrXReg }, + { kInvalidReg , kInvalidReg , kVarAttrXReg }, + { kInvalidReg , kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstCmpxchg8b16b[] = { - { kX86RegIndexDx, kX86RegIndexDx, kVarAttrInOutReg }, - { kX86RegIndexAx, kX86RegIndexAx, kVarAttrInOutReg }, - { kX86RegIndexCx, kInvalidReg , kVarAttrInReg }, - { kX86RegIndexBx, kInvalidReg , kVarAttrInReg } + { kX86RegIndexDx, kX86RegIndexDx, kVarAttrXReg }, + { kX86RegIndexAx, kX86RegIndexAx, kVarAttrXReg }, + { kX86RegIndexCx, kInvalidReg , kVarAttrRReg }, + { kX86RegIndexBx, kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstDaaDas[] = { - { kX86RegIndexAx, kX86RegIndexAx, kVarAttrInOutReg } + { kX86RegIndexAx, kX86RegIndexAx, kVarAttrXReg } }; static const X86SpecialInst x86SpecialInstDiv[] = { - { kInvalidReg , kX86RegIndexDx, kVarAttrInOutReg }, - { kX86RegIndexAx, kX86RegIndexAx, kVarAttrInOutReg }, - { kInvalidReg , kInvalidReg , kVarAttrInReg } + { kInvalidReg , kX86RegIndexDx, kVarAttrXReg }, + { kX86RegIndexAx, kX86RegIndexAx, kVarAttrXReg }, + { kInvalidReg , kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstJecxz[] = { - { kX86RegIndexCx, kInvalidReg , kVarAttrInReg } + { kX86RegIndexCx, kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstLods[] = { - { kInvalidReg , kX86RegIndexAx, kVarAttrOutReg }, - { kX86RegIndexSi, kX86RegIndexSi, kVarAttrInOutReg }, - { kX86RegIndexCx, kX86RegIndexCx, kVarAttrInOutReg } + { kInvalidReg , kX86RegIndexAx, kVarAttrWReg }, + { kX86RegIndexSi, kX86RegIndexSi, kVarAttrXReg }, + { kX86RegIndexCx, kX86RegIndexCx, kVarAttrXReg } }; static const X86SpecialInst x86SpecialInstMul[] = { - { kInvalidReg , kX86RegIndexDx, kVarAttrOutReg }, - { kX86RegIndexAx, kX86RegIndexAx, kVarAttrInOutReg }, - { kInvalidReg , kInvalidReg , kVarAttrInReg } + { kInvalidReg , kX86RegIndexDx, kVarAttrWReg }, + { kX86RegIndexAx, kX86RegIndexAx, kVarAttrXReg }, + { kInvalidReg , kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstMovPtr[] = { - { kInvalidReg , kX86RegIndexAx, kVarAttrOutReg }, - { kX86RegIndexAx, kInvalidReg , kVarAttrInReg } + { kInvalidReg , kX86RegIndexAx, kVarAttrWReg }, + { kX86RegIndexAx, kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstMovsCmps[] = { - { kX86RegIndexDi, kX86RegIndexDi, kVarAttrInOutReg }, - { kX86RegIndexSi, kX86RegIndexSi, kVarAttrInOutReg }, - { kX86RegIndexCx, kX86RegIndexCx, kVarAttrInOutReg } + { kX86RegIndexDi, kX86RegIndexDi, kVarAttrXReg }, + { kX86RegIndexSi, kX86RegIndexSi, kVarAttrXReg }, + { kX86RegIndexCx, kX86RegIndexCx, kVarAttrXReg } }; static const X86SpecialInst x86SpecialInstLahf[] = { - { kInvalidReg , kX86RegIndexAx, kVarAttrOutReg } + { kInvalidReg , kX86RegIndexAx, kVarAttrWReg } }; static const X86SpecialInst x86SpecialInstSahf[] = { - { kX86RegIndexAx, kInvalidReg , kVarAttrInReg } + { kX86RegIndexAx, kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstMaskmovqMaskmovdqu[] = { - { kInvalidReg , kX86RegIndexDi, kVarAttrInReg }, - { kInvalidReg , kInvalidReg , kVarAttrInReg }, - { kInvalidReg , kInvalidReg , kVarAttrInReg } + { kInvalidReg , kX86RegIndexDi, kVarAttrRReg }, + { kInvalidReg , kInvalidReg , kVarAttrRReg }, + { kInvalidReg , kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstRdtscRdtscp[] = { - { kInvalidReg , kX86RegIndexDx, kVarAttrOutReg }, - { kInvalidReg , kX86RegIndexAx, kVarAttrOutReg }, - { kInvalidReg , kX86RegIndexCx, kVarAttrOutReg } + { kInvalidReg , kX86RegIndexDx, kVarAttrWReg }, + { kInvalidReg , kX86RegIndexAx, kVarAttrWReg }, + { kInvalidReg , kX86RegIndexCx, kVarAttrWReg } }; static const X86SpecialInst x86SpecialInstRot[] = { - { kInvalidReg , kInvalidReg , kVarAttrInOutReg }, - { kX86RegIndexCx, kInvalidReg , kVarAttrInReg } + { kInvalidReg , kInvalidReg , kVarAttrXReg }, + { kX86RegIndexCx, kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstScas[] = { - { kX86RegIndexDi, kX86RegIndexDi, kVarAttrInOutReg }, - { kX86RegIndexAx, kInvalidReg , kVarAttrInReg }, - { kX86RegIndexCx, kX86RegIndexCx, kVarAttrInOutReg } + { kX86RegIndexDi, kX86RegIndexDi, kVarAttrXReg }, + { kX86RegIndexAx, kInvalidReg , kVarAttrRReg }, + { kX86RegIndexCx, kX86RegIndexCx, kVarAttrXReg } }; static const X86SpecialInst x86SpecialInstShlrd[] = { - { kInvalidReg , kInvalidReg , kVarAttrInOutReg }, - { kInvalidReg , kInvalidReg , kVarAttrInReg }, - { kX86RegIndexCx, kInvalidReg , kVarAttrInReg } + { kInvalidReg , kInvalidReg , kVarAttrXReg }, + { kInvalidReg , kInvalidReg , kVarAttrRReg }, + { kX86RegIndexCx, kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstStos[] = { - { kX86RegIndexDi, kInvalidReg , kVarAttrInReg }, - { kX86RegIndexAx, kInvalidReg , kVarAttrInReg }, - { kX86RegIndexCx, kX86RegIndexCx, kVarAttrInOutReg } + { kX86RegIndexDi, kInvalidReg , kVarAttrRReg }, + { kX86RegIndexAx, kInvalidReg , kVarAttrRReg }, + { kX86RegIndexCx, kX86RegIndexCx, kVarAttrXReg } }; static const X86SpecialInst x86SpecialInstBlend[] = { - { kInvalidReg , kInvalidReg , kVarAttrOutReg }, - { kInvalidReg , kInvalidReg , kVarAttrInReg }, - { 0 , kInvalidReg , kVarAttrInReg } + { kInvalidReg , kInvalidReg , kVarAttrWReg }, + { kInvalidReg , kInvalidReg , kVarAttrRReg }, + { 0 , kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstXsaveXrstor[] = { - { kInvalidReg , kInvalidReg , 0 }, - { kX86RegIndexDx, kInvalidReg , kVarAttrInReg }, - { kX86RegIndexAx, kInvalidReg , kVarAttrInReg } + { kInvalidReg , kInvalidReg , 0 }, + { kX86RegIndexDx, kInvalidReg , kVarAttrRReg }, + { kX86RegIndexAx, kInvalidReg , kVarAttrRReg } }; static const X86SpecialInst x86SpecialInstXgetbv[] = { - { kX86RegIndexCx, kInvalidReg , kVarAttrInReg }, - { kInvalidReg , kX86RegIndexDx, kVarAttrOutReg }, - { kInvalidReg , kX86RegIndexAx, kVarAttrOutReg } + { kX86RegIndexCx, kInvalidReg , kVarAttrRReg }, + { kInvalidReg , kX86RegIndexDx, kVarAttrWReg }, + { kInvalidReg , kX86RegIndexAx, kVarAttrWReg } }; static const X86SpecialInst x86SpecialInstXsetbv[] = { - { kX86RegIndexCx, kInvalidReg , kVarAttrInReg }, - { kX86RegIndexDx, kInvalidReg , kVarAttrInReg }, - { kX86RegIndexAx, kInvalidReg , kVarAttrInReg } + { kX86RegIndexCx, kInvalidReg , kVarAttrRReg }, + { kX86RegIndexDx, kInvalidReg , kVarAttrRReg }, + { kX86RegIndexAx, kInvalidReg , kVarAttrRReg } }; static ASMJIT_INLINE const X86SpecialInst* X86SpecialInst_get(uint32_t instId, const Operand* opList, uint32_t opCount) { @@ -647,7 +646,7 @@ void X86Context::emitLoad(VarData* vd, uint32_t regIndex, const char* reason) { X86Compiler* compiler = getCompiler(); X86Mem m = getVarMem(vd); - Node* node = NULL; + HLNode* node = NULL; switch (vd->getType()) { case kVarTypeInt8: @@ -706,7 +705,7 @@ void X86Context::emitLoad(VarData* vd, uint32_t regIndex, const char* reason) { if (!_emitComments) return; - node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName())); + node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); } // ============================================================================ @@ -719,7 +718,7 @@ void X86Context::emitSave(VarData* vd, uint32_t regIndex, const char* reason) { X86Compiler* compiler = getCompiler(); X86Mem m = getVarMem(vd); - Node* node = NULL; + HLNode* node = NULL; switch (vd->getType()) { case kVarTypeInt8: @@ -777,7 +776,7 @@ void X86Context::emitSave(VarData* vd, uint32_t regIndex, const char* reason) { if (!_emitComments) return; - node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName())); + node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); } // ============================================================================ @@ -789,7 +788,7 @@ void X86Context::emitMove(VarData* vd, uint32_t toRegIndex, uint32_t fromRegInde ASMJIT_ASSERT(fromRegIndex != kInvalidReg); X86Compiler* compiler = getCompiler(); - Node* node = NULL; + HLNode* node = NULL; switch (vd->getType()) { case kVarTypeInt8: @@ -838,7 +837,7 @@ void X86Context::emitMove(VarData* vd, uint32_t toRegIndex, uint32_t fromRegInde if (!_emitComments) return; - node->setComment(compiler->_stringZone.sformat("[%s] %s", reason, vd->getName())); + node->setComment(compiler->_stringAllocator.sformat("[%s] %s", reason, vd->getName())); } // ============================================================================ @@ -850,10 +849,10 @@ void X86Context::emitSwapGp(VarData* aVd, VarData* bVd, uint32_t aIndex, uint32_ ASMJIT_ASSERT(bIndex != kInvalidReg); X86Compiler* compiler = getCompiler(); - Node* node = NULL; + HLNode* node = NULL; #if defined(ASMJIT_BUILD_X64) - uint32_t vType = IntUtil::iMax(aVd->getType(), bVd->getType()); + uint32_t vType = Utils::iMax(aVd->getType(), bVd->getType()); if (vType == kVarTypeInt64 || vType == kVarTypeUInt64) { node = compiler->emit(kX86InstIdXchg, x86::gpq(aIndex), x86::gpq(bIndex)); } @@ -866,7 +865,7 @@ void X86Context::emitSwapGp(VarData* aVd, VarData* bVd, uint32_t aIndex, uint32_ if (!_emitComments) return; - node->setComment(compiler->_stringZone.sformat("[%s] %s, %s", reason, aVd->getName(), bVd->getName())); + node->setComment(compiler->_stringAllocator.sformat("[%s] %s, %s", reason, aVd->getName(), bVd->getName())); } // ============================================================================ @@ -936,7 +935,7 @@ void X86Context::emitConvertVarToVar(uint32_t dstType, uint32_t dstIndex, uint32 return; } - if (IntUtil::inInterval(srcType, _kVarTypeIntStart, _kVarTypeIntEnd)) { + if (Utils::inInterval(srcType, _kVarTypeIntStart, _kVarTypeIntEnd)) { // TODO: [COMPILER] Variable conversion not supported. ASMJIT_ASSERT(!"Reached"); } @@ -955,7 +954,7 @@ void X86Context::emitConvertVarToVar(uint32_t dstType, uint32_t dstIndex, uint32 return; } - if (IntUtil::inInterval(srcType, _kVarTypeIntStart, _kVarTypeIntEnd)) { + if (Utils::inInterval(srcType, _kVarTypeIntStart, _kVarTypeIntEnd)) { // TODO: [COMPILER] Variable conversion not supported. ASMJIT_ASSERT(!"Reached"); } @@ -984,23 +983,22 @@ void X86Context::emitMoveVarOnStack( case kVarTypeInt8: case kVarTypeUInt8: // Move DWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt8, kVarTypeUInt64)) + if (Utils::inInterval(srcType, kVarTypeInt8, kVarTypeUInt64)) goto _MovGpD; // Move DWORD (Mm). - if (IntUtil::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) + if (Utils::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) goto _MovMmD; // Move DWORD (Xmm). - if (IntUtil::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) + if (Utils::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) goto _MovXmmD; - break; case kVarTypeInt16: case kVarTypeUInt16: // Extend BYTE->WORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt8, kVarTypeUInt8)) { + if (Utils::inInterval(srcType, kVarTypeInt8, kVarTypeUInt8)) { r1.setSize(1); r1.setCode(kX86RegTypeGpbLo, srcIndex); @@ -1009,23 +1007,22 @@ void X86Context::emitMoveVarOnStack( } // Move DWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt16, kVarTypeUInt64)) + if (Utils::inInterval(srcType, kVarTypeInt16, kVarTypeUInt64)) goto _MovGpD; // Move DWORD (Mm). - if (IntUtil::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) + if (Utils::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) goto _MovMmD; // Move DWORD (Xmm). - if (IntUtil::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) + if (Utils::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) goto _MovXmmD; - break; case kVarTypeInt32: case kVarTypeUInt32: // Extend BYTE->DWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt8, kVarTypeUInt8)) { + if (Utils::inInterval(srcType, kVarTypeInt8, kVarTypeUInt8)) { r1.setSize(1); r1.setCode(kX86RegTypeGpbLo, srcIndex); @@ -1034,7 +1031,7 @@ void X86Context::emitMoveVarOnStack( } // Extend WORD->DWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt16, kVarTypeUInt16)) { + if (Utils::inInterval(srcType, kVarTypeInt16, kVarTypeUInt16)) { r1.setSize(2); r1.setCode(kX86RegTypeGpw, srcIndex); @@ -1043,22 +1040,22 @@ void X86Context::emitMoveVarOnStack( } // Move DWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt32, kVarTypeUInt64)) + if (Utils::inInterval(srcType, kVarTypeInt32, kVarTypeUInt64)) goto _MovGpD; // Move DWORD (Mm). - if (IntUtil::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) + if (Utils::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) goto _MovMmD; // Move DWORD (Xmm). - if (IntUtil::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) + if (Utils::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) goto _MovXmmD; break; case kVarTypeInt64: case kVarTypeUInt64: // Extend BYTE->QWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt8, kVarTypeUInt8)) { + if (Utils::inInterval(srcType, kVarTypeInt8, kVarTypeUInt8)) { r1.setSize(1); r1.setCode(kX86RegTypeGpbLo, srcIndex); @@ -1067,7 +1064,7 @@ void X86Context::emitMoveVarOnStack( } // Extend WORD->QWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt16, kVarTypeUInt16)) { + if (Utils::inInterval(srcType, kVarTypeInt16, kVarTypeUInt16)) { r1.setSize(2); r1.setCode(kX86RegTypeGpw, srcIndex); @@ -1076,7 +1073,7 @@ void X86Context::emitMoveVarOnStack( } // Extend DWORD->QWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt32, kVarTypeUInt32)) { + if (Utils::inInterval(srcType, kVarTypeInt32, kVarTypeUInt32)) { r1.setSize(4); r1.setCode(kX86RegTypeGpd, srcIndex); @@ -1088,21 +1085,21 @@ void X86Context::emitMoveVarOnStack( } // Move QWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt64, kVarTypeUInt64)) + if (Utils::inInterval(srcType, kVarTypeInt64, kVarTypeUInt64)) goto _MovGpQ; // Move QWORD (Mm). - if (IntUtil::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) + if (Utils::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) goto _MovMmQ; // Move QWORD (Xmm). - if (IntUtil::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) + if (Utils::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) goto _MovXmmQ; break; case kX86VarTypeMm: // Extend BYTE->QWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt8, kVarTypeUInt8)) { + if (Utils::inInterval(srcType, kVarTypeInt8, kVarTypeUInt8)) { r1.setSize(1); r1.setCode(kX86RegTypeGpbLo, srcIndex); @@ -1111,7 +1108,7 @@ void X86Context::emitMoveVarOnStack( } // Extend WORD->QWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt16, kVarTypeUInt16)) { + if (Utils::inInterval(srcType, kVarTypeInt16, kVarTypeUInt16)) { r1.setSize(2); r1.setCode(kX86RegTypeGpw, srcIndex); @@ -1120,19 +1117,19 @@ void X86Context::emitMoveVarOnStack( } // Extend DWORD->QWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt32, kVarTypeUInt32)) + if (Utils::inInterval(srcType, kVarTypeInt32, kVarTypeUInt32)) goto _ExtendMovGpDQ; // Move QWORD (Gp). - if (IntUtil::inInterval(srcType, kVarTypeInt64, kVarTypeUInt64)) + if (Utils::inInterval(srcType, kVarTypeInt64, kVarTypeUInt64)) goto _MovGpQ; // Move QWORD (Mm). - if (IntUtil::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) + if (Utils::inInterval(srcType, kX86VarTypeMm, kX86VarTypeMm)) goto _MovMmQ; // Move QWORD (Xmm). - if (IntUtil::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) + if (Utils::inInterval(srcType, kX86VarTypeXmm, kX86VarTypeXmmPd)) goto _MovXmmQ; break; @@ -1530,11 +1527,11 @@ VarState* X86Context::saveState() { VarData** vdArray = _contextVd.getData(); uint32_t vdCount = static_cast(_contextVd.getLength()); - size_t size = IntUtil::alignTo( + size_t size = Utils::alignTo( sizeof(X86VarState) + vdCount * sizeof(X86StateCell), sizeof(void*)); X86VarState* cur = getState(); - X86VarState* dst = _baseZone.allocT(size); + X86VarState* dst = _zoneAllocator.allocT(size); if (dst == NULL) return NULL; @@ -1584,7 +1581,7 @@ static ASMJIT_INLINE void X86Context_switchStateVars(X86Context* self, X86VarSta continue; if (dVd != NULL) { - const X86StateCell& cell = cells[dVd->getContextId()]; + const X86StateCell& cell = cells[dVd->getLocalId()]; if (cell.getState() != kVarStateReg) { if (cell.getState() == kVarStateMem) @@ -1612,7 +1609,7 @@ _MoveOrLoad: } if (dVd != NULL) { - const X86StateCell& cell = cells[dVd->getContextId()]; + const X86StateCell& cell = cells[dVd->getLocalId()]; if (sVd == NULL) { if (cell.getState() == kVarStateReg) continue; @@ -1750,8 +1747,8 @@ static ASMJIT_INLINE void X86Context_intersectStateVars(X86Context* self, X86Var continue; if (dVd != NULL) { - const X86StateCell& aCell = aCells[dVd->getContextId()]; - const X86StateCell& bCell = bCells[dVd->getContextId()]; + const X86StateCell& aCell = aCells[dVd->getLocalId()]; + const X86StateCell& bCell = bCells[dVd->getLocalId()]; if (aCell.getState() != kVarStateReg && bCell.getState() != kVarStateReg) { if (aCell.getState() == kVarStateMem || bCell.getState() == kVarStateMem) @@ -1778,8 +1775,8 @@ static ASMJIT_INLINE void X86Context_intersectStateVars(X86Context* self, X86Var } if (dVd != NULL) { - const X86StateCell& aCell = aCells[dVd->getContextId()]; - const X86StateCell& bCell = bCells[dVd->getContextId()]; + const X86StateCell& aCell = aCells[dVd->getLocalId()]; + const X86StateCell& bCell = bCells[dVd->getLocalId()]; if (aVd == NULL) { if (aCell.getState() == kVarStateReg || bCell.getState() == kVarStateReg) @@ -1817,7 +1814,7 @@ static ASMJIT_INLINE void X86Context_intersectStateVars(X86Context* self, X86Var if (vd == NULL) continue; - const X86StateCell& aCell = aCells[vd->getContextId()]; + const X86StateCell& aCell = aCells[vd->getLocalId()]; if ((dModified & regMask) && !(aModified & regMask) && aCell.getState() == kVarStateReg) self->save(vd); } @@ -1843,7 +1840,7 @@ void X86Context::intersectStates(VarState* a_, VarState* b_) { // ============================================================================ //! \internal -static ASMJIT_INLINE Node* X86Context_getJccFlow(JumpNode* jNode) { +static ASMJIT_INLINE HLNode* X86Context_getJccFlow(HLJump* jNode) { if (jNode->isTaken()) return jNode->getTarget(); else @@ -1851,7 +1848,7 @@ static ASMJIT_INLINE Node* X86Context_getJccFlow(JumpNode* jNode) { } //! \internal -static ASMJIT_INLINE Node* X86Context_getOppositeJccFlow(JumpNode* jNode) { +static ASMJIT_INLINE HLNode* X86Context_getOppositeJccFlow(HLJump* jNode) { if (jNode->isTaken()) return jNode->getNext(); else @@ -1877,7 +1874,7 @@ static void X86Context_prepareSingleVarInst(uint32_t instId, VarAttr* va) { case kX86InstIdPsubsb : case kX86InstIdPsubsw : case kX86InstIdPsubusb : case kX86InstIdPsubusw : case kX86InstIdPcmpeqb : case kX86InstIdPcmpeqw : case kX86InstIdPcmpeqd : case kX86InstIdPcmpeqq : case kX86InstIdPcmpgtb : case kX86InstIdPcmpgtw : case kX86InstIdPcmpgtd : case kX86InstIdPcmpgtq : - va->andNotFlags(kVarAttrInReg); + va->andNotFlags(kVarAttrRReg); break; // - and reg, reg ; Nop. @@ -1886,7 +1883,7 @@ static void X86Context_prepareSingleVarInst(uint32_t instId, VarAttr* va) { case kX86InstIdAnd : case kX86InstIdAndpd : case kX86InstIdAndps : case kX86InstIdPand : case kX86InstIdOr : case kX86InstIdOrpd : case kX86InstIdOrps : case kX86InstIdPor : case kX86InstIdXchg : - va->andNotFlags(kVarAttrOutReg); + va->andNotFlags(kVarAttrWReg); break; } } @@ -1903,13 +1900,13 @@ static ASMJIT_INLINE X86RegMask X86Context_getUsedArgs(X86Context* self, X86Call regs.reset(); uint32_t i; - uint32_t argCount = decl->getArgCount(); + uint32_t argCount = decl->getNumArgs(); for (i = 0; i < argCount; i++) { const FuncInOut& arg = decl->getArg(i); if (!arg.hasRegIndex()) continue; - regs.or_(x86VarTypeToClass(arg.getVarType()), IntUtil::mask(arg.getRegIndex())); + regs.or_(x86VarTypeToClass(arg.getVarType()), Utils::mask(arg.getRegIndex())); } return regs; @@ -1922,7 +1919,7 @@ static ASMJIT_INLINE X86RegMask X86Context_getUsedArgs(X86Context* self, X86Call struct SArgData { VarData* sVd; VarData* cVd; - SArgNode* sArg; + HLCallArg* sArg; uint32_t aType; }; @@ -1969,7 +1966,7 @@ static ASMJIT_INLINE bool X86Context_mustConvertSArg(X86Context* self, uint32_t static ASMJIT_INLINE uint32_t X86Context_typeOfConvertedSArg(X86Context* self, uint32_t aType, uint32_t sType) { ASMJIT_ASSERT(X86Context_mustConvertSArg(self, aType, sType)); - if (IntUtil::inInterval(aType, _kVarTypeIntStart, _kVarTypeIntEnd)) + if (Utils::inInterval(aType, _kVarTypeIntStart, _kVarTypeIntEnd)) return aType; if (aType == kVarTypeFp32) @@ -1978,17 +1975,17 @@ static ASMJIT_INLINE uint32_t X86Context_typeOfConvertedSArg(X86Context* self, u if (aType == kVarTypeFp64) return kX86VarTypeXmmSd; - if (IntUtil::inInterval(aType, _kX86VarTypeXmmStart, _kX86VarTypeXmmEnd)) + if (Utils::inInterval(aType, _kX86VarTypeXmmStart, _kX86VarTypeXmmEnd)) return aType; - if (IntUtil::inInterval(aType, _kX86VarTypeYmmStart, _kX86VarTypeYmmEnd)) + if (Utils::inInterval(aType, _kX86VarTypeYmmStart, _kX86VarTypeYmmEnd)) return aType; ASMJIT_ASSERT(!"Reached"); return aType; } -static ASMJIT_INLINE Error X86Context_insertSArgNode( +static ASMJIT_INLINE Error X86Context_insertHLCallArg( X86Context* self, X86CallNode* call, VarData* sVd, const uint32_t* gaRegs, const FuncInOut& arg, uint32_t argIndex, @@ -2033,7 +2030,7 @@ static ASMJIT_INLINE Error X86Context_insertSArgNode( if (sArgData->cVd->getType() != cType || sArgData->aType != aType) continue; - sArgData->sArg->_args |= IntUtil::mask(argIndex); + sArgData->sArg->_args |= Utils::mask(argIndex); return kErrorOk; } @@ -2041,7 +2038,7 @@ static ASMJIT_INLINE Error X86Context_insertSArgNode( if (cVd == NULL) return kErrorNoHeapMemory; - SArgNode* sArg = compiler->newNode(call, sVd, cVd); + HLCallArg* sArg = compiler->newNode(call, sVd, cVd); if (sArg == NULL) return kErrorNoHeapMemory; @@ -2063,18 +2060,18 @@ static ASMJIT_INLINE Error X86Context_insertSArgNode( map->_clobberedRegs.reset(); if (sClass <= cClass) { - map->_list[0].setup(sVd, kVarAttrInReg , 0, gaRegs[sClass]); - map->_list[1].setup(cVd, kVarAttrOutReg, 0, gaRegs[cClass]); + map->_list[0].setup(sVd, kVarAttrRReg, 0, gaRegs[sClass]); + map->_list[1].setup(cVd, kVarAttrWReg, 0, gaRegs[cClass]); map->_start.set(cClass, sClass != cClass); } else { - map->_list[0].setup(cVd, kVarAttrOutReg, 0, gaRegs[cClass]); - map->_list[1].setup(sVd, kVarAttrInReg , 0, gaRegs[sClass]); + map->_list[0].setup(cVd, kVarAttrWReg, 0, gaRegs[cClass]); + map->_list[1].setup(sVd, kVarAttrRReg, 0, gaRegs[sClass]); map->_start.set(sClass, 1); } sArg->setMap(map); - sArg->_args |= IntUtil::mask(argIndex); + sArg->_args |= Utils::mask(argIndex); compiler->addNodeBefore(sArg, call); ::memmove(sArgData + 1, sArgData, (sArgCount - i) * sizeof(SArgData)); @@ -2088,11 +2085,11 @@ static ASMJIT_INLINE Error X86Context_insertSArgNode( return kErrorOk; } else { - SArgNode* sArg = sArgData->sArg; + HLCallArg* sArg = sArgData->sArg; ASMJIT_PROPAGATE_ERROR(self->_registerContextVar(sVd)); if (sArg == NULL) { - sArg = compiler->newNode(call, sVd, (VarData*)NULL); + sArg = compiler->newNode(call, sVd, (VarData*)NULL); if (sArg == NULL) return kErrorNoHeapMemory; @@ -2107,7 +2104,7 @@ static ASMJIT_INLINE Error X86Context_insertSArgNode( map->_inRegs.reset(); map->_outRegs.reset(); map->_clobberedRegs.reset(); - map->_list[0].setup(sVd, kVarAttrInReg, 0, gaRegs[sClass]); + map->_list[0].setup(sVd, kVarAttrRReg, 0, gaRegs[sClass]); sArg->setMap(map); sArgData->sArg = sArg; @@ -2115,7 +2112,7 @@ static ASMJIT_INLINE Error X86Context_insertSArgNode( compiler->addNodeBefore(sArg, call); } - sArg->_args |= IntUtil::mask(argIndex); + sArg->_args |= Utils::mask(argIndex); return kErrorOk; } } @@ -2132,47 +2129,45 @@ static ASMJIT_INLINE Error X86Context_insertSArgNode( //! - Create and assign groupId and flowId. //! - Collect all variables and merge them to vaList. Error X86Context::fetch() { - ASMJIT_TLOG("[Fetch] === Begin ===\n"); + ASMJIT_TLOG("[F] ======= Fetch (Begin)\n"); X86Compiler* compiler = getCompiler(); X86FuncNode* func = getFunc(); uint32_t arch = compiler->getArch(); - Node* node_ = func; - Node* next = NULL; - Node* stop = getStop(); + HLNode* node_ = func; + HLNode* next = NULL; + HLNode* stop = getStop(); uint32_t flowId = 0; VarAttr vaTmpList[80]; SArgData sArgList[80]; - PodList::Link* jLink = NULL; + PodList::Link* jLink = NULL; // Function flags. func->clearFuncFlags( kFuncFlagIsNaked | - kX86FuncFlagPushPop | - kX86FuncFlagEmms | - kX86FuncFlagSFence | - kX86FuncFlagLFence ); + kFuncFlagX86Emms | + kFuncFlagX86SFence | + kFuncFlagX86LFence ); - if (func->getHint(kFuncHintNaked ) != 0) func->addFuncFlags(kFuncFlagIsNaked ); - if (func->getHint(kFuncHintCompact ) != 0) func->addFuncFlags(kX86FuncFlagPushPop | kX86FuncFlagEnter | kX86FuncFlagLeave); - if (func->getHint(kX86FuncHintPushPop) != 0) func->addFuncFlags(kX86FuncFlagPushPop); - if (func->getHint(kX86FuncHintEmms ) != 0) func->addFuncFlags(kX86FuncFlagEmms ); - if (func->getHint(kX86FuncHintSFence ) != 0) func->addFuncFlags(kX86FuncFlagSFence ); - if (func->getHint(kX86FuncHintLFence ) != 0) func->addFuncFlags(kX86FuncFlagLFence ); + if (func->getHint(kFuncHintNaked ) != 0) func->addFuncFlags(kFuncFlagIsNaked); + if (func->getHint(kFuncHintCompact ) != 0) func->addFuncFlags(kFuncFlagX86Leave); + if (func->getHint(kFuncHintX86Emms ) != 0) func->addFuncFlags(kFuncFlagX86Emms); + if (func->getHint(kFuncHintX86SFence) != 0) func->addFuncFlags(kFuncFlagX86SFence); + if (func->getHint(kFuncHintX86LFence) != 0) func->addFuncFlags(kFuncFlagX86LFence); // Global allocable registers. uint32_t* gaRegs = _gaRegs; if (!func->hasFuncFlag(kFuncFlagIsNaked)) - gaRegs[kX86RegClassGp] &= ~IntUtil::mask(kX86RegIndexBp); + gaRegs[kX86RegClassGp] &= ~Utils::mask(kX86RegIndexBp); // Allowed index registers (Gp/Xmm/Ymm). - const uint32_t indexMask = IntUtil::bits(_regCount.getGp()) & ~(IntUtil::mask(4, 12)); + const uint32_t indexMask = Utils::bits(_regCount.getGp()) & ~(Utils::mask(4, 12)); // -------------------------------------------------------------------------- // [VI Macros] @@ -2223,7 +2218,7 @@ Error X86Context::fetch() { if (va->_inRegs) \ va->_allocableRegs = va->_inRegs; \ else if (va->_outRegIndex != kInvalidReg) \ - va->_allocableRegs = IntUtil::mask(va->_outRegIndex); \ + va->_allocableRegs = Utils::mask(va->_outRegIndex); \ else \ va->_allocableRegs &= ~inRegs.get(class_); \ \ @@ -2284,7 +2279,7 @@ _NextGroup: if (jLink == NULL) goto _Done; - node_ = X86Context_getOppositeJccFlow(static_cast(jLink->getValue())); + node_ = X86Context_getOppositeJccFlow(static_cast(jLink->getValue())); } flowId++; @@ -2293,7 +2288,7 @@ _NextGroup: node_->setFlowId(flowId); ASMJIT_TSEC({ - X86Context_traceNode(this, node_); + X86Context_traceNode(this, node_, "[F] "); }); switch (node_->getType()) { @@ -2301,21 +2296,21 @@ _NextGroup: // [Align/Embed] // ---------------------------------------------------------------------- - case kNodeTypeAlign: - case kNodeTypeEmbed: + case kHLNodeTypeAlign: + case kHLNodeTypeData: break; // ---------------------------------------------------------------------- // [Hint] // ---------------------------------------------------------------------- - case kNodeTypeHint: { - HintNode* node = static_cast(node_); + case kHLNodeTypeHint: { + HLHint* node = static_cast(node_); VI_BEGIN(); if (node->getHint() == kVarHintAlloc) { uint32_t remain[_kX86RegClassManagedCount]; - HintNode* cur = node; + HLHint* cur = node; remain[kX86RegClassGp ] = _regCount.getGp() - 1 - func->hasFuncFlag(kFuncFlagIsNaked); remain[kX86RegClassMm ] = _regCount.getMm(); @@ -2333,14 +2328,14 @@ _NextGroup: // We handle both kInvalidReg and kInvalidValue. if (regIndex < kInvalidReg) - regMask = IntUtil::mask(regIndex); + regMask = Utils::mask(regIndex); if (va == NULL) { if (inRegs.has(regClass, regMask)) break; if (remain[regClass] == 0) break; - VI_ADD_VAR(vd, va, kVarAttrInReg, gaRegs[regClass]); + VI_ADD_VAR(vd, va, kVarAttrRReg, gaRegs[regClass]); if (regMask != 0) { inRegs.xor_(regClass, regMask); @@ -2362,8 +2357,8 @@ _NextGroup: if (cur != node) compiler->removeNode(cur); - cur = static_cast(node->getNext()); - if (cur == NULL || cur->getType() != kNodeTypeHint || cur->getHint() != kVarHintAlloc) + cur = static_cast(node->getNext()); + if (cur == NULL || cur->getType() != kHLNodeTypeHint || cur->getHint() != kVarHintAlloc) break; } @@ -2377,13 +2372,13 @@ _NextGroup: switch (node->getHint()) { case kVarHintSpill: - flags = kVarAttrInMem | kVarAttrSpill; + flags = kVarAttrRMem | kVarAttrSpill; break; case kVarHintSave: - flags = kVarAttrInMem; + flags = kVarAttrRMem; break; case kVarHintSaveAndUnuse: - flags = kVarAttrInMem | kVarAttrUnuse; + flags = kVarAttrRMem | kVarAttrUnuse; break; case kVarHintUnuse: flags = kVarAttrUnuse; @@ -2401,7 +2396,11 @@ _NextGroup: // [Target] // ---------------------------------------------------------------------- - case kNodeTypeTarget: { + case kHLNodeTypeLabel: { + if (node_ == func->getExitNode()) { + ASMJIT_PROPAGATE_ERROR(addReturningNode(node_)); + goto _NextGroup; + } break; } @@ -2409,8 +2408,8 @@ _NextGroup: // [Inst] // ---------------------------------------------------------------------- - case kNodeTypeInst: { - InstNode* node = static_cast(node_); + case kHLNodeTypeInst: { + HLInst* node = static_cast(node_); uint32_t instId = node->getInstId(); uint32_t flags = node->getFlags(); @@ -2425,10 +2424,10 @@ _NextGroup: // Collect instruction flags and merge all 'VarAttr's. if (extendedInfo.isFp()) - flags |= kNodeFlagIsFp; + flags |= kHLNodeFlagIsFp; if (extendedInfo.isSpecial() && (special = X86SpecialInst_get(instId, opList, opCount)) != NULL) - flags |= kNodeFlagIsSpecial; + flags |= kHLNodeFlagIsSpecial; uint32_t gpAllowedMask = 0xFFFFFFFF; @@ -2442,7 +2441,7 @@ _NextGroup: VI_MERGE_VAR(vd, va, 0, gaRegs[vd->getClass()] & gpAllowedMask); if (static_cast(op)->isGpb()) { - va->orFlags(static_cast(op)->isGpbLo() ? kX86VarAttrGpbLo : kX86VarAttrGpbHi); + va->orFlags(static_cast(op)->isGpbLo() ? kVarAttrX86GpbLo : kVarAttrX86GpbHi); if (arch == kArchX86) { // If a byte register is accessed in 32-bit mode we have to limit // all allocable registers for that variable to eax/ebx/ecx/edx. @@ -2461,7 +2460,7 @@ _NextGroup: if (gpAllowedMask != 0xFF) { for (uint32_t j = 0; j < i; j++) - vaTmpList[j]._allocableRegs &= vaTmpList[j].hasFlag(kX86VarAttrGpbHi) ? 0x0F : 0xFF; + vaTmpList[j]._allocableRegs &= vaTmpList[j].hasFlag(kVarAttrX86GpbHi) ? 0x0F : 0xFF; gpAllowedMask = 0xFF; } } @@ -2479,13 +2478,13 @@ _NextGroup: c = kX86RegClassXyz; if (inReg != kInvalidReg) { - uint32_t mask = IntUtil::mask(inReg); + uint32_t mask = Utils::mask(inReg); inRegs.or_(c, mask); va->addInRegs(mask); } if (outReg != kInvalidReg) { - uint32_t mask = IntUtil::mask(outReg); + uint32_t mask = Utils::mask(outReg); outRegs.or_(c, mask); va->setOutRegIndex(outReg); } @@ -2493,8 +2492,8 @@ _NextGroup: va->orFlags(special[i].flags); } else { - uint32_t inFlags = kVarAttrInReg; - uint32_t outFlags = kVarAttrOutReg; + uint32_t inFlags = kVarAttrRReg; + uint32_t outFlags = kVarAttrWReg; uint32_t combinedFlags; if (i == 0) { @@ -2567,11 +2566,11 @@ _NextGroup: if (!vd->isStack()) { VI_MERGE_VAR(vd, va, 0, gaRegs[vd->getClass()] & gpAllowedMask); if (m->getMemType() == kMemTypeBaseIndex) { - va->orFlags(kVarAttrInReg); + va->orFlags(kVarAttrRReg); } else { - uint32_t inFlags = kVarAttrInMem; - uint32_t outFlags = kVarAttrOutMem; + uint32_t inFlags = kVarAttrRMem; + uint32_t outFlags = kVarAttrWMem; uint32_t combinedFlags; if (i == 0) { @@ -2582,7 +2581,7 @@ _NextGroup: // as if it's just move to the register. It's just a bit // simpler as there are no special cases. if (extendedInfo.isMove()) { - uint32_t movSize = IntUtil::iMax(extendedInfo.getWriteSize(), m->getSize()); + uint32_t movSize = Utils::iMax(extendedInfo.getWriteSize(), m->getSize()); uint32_t varSize = vd->getSize(); if (movSize >= varSize) @@ -2612,7 +2611,7 @@ _NextGroup: vd = compiler->getVdById(m->getIndex()); VI_MERGE_VAR(vd, va, 0, gaRegs[kX86RegClassGp] & gpAllowedMask); va->andAllocableRegs(indexMask); - va->orFlags(kVarAttrInReg); + va->orFlags(kVarAttrRReg); } } } @@ -2630,8 +2629,8 @@ _NextGroup: // Handle conditional/unconditional jump. if (node->isJmpOrJcc()) { - JumpNode* jNode = static_cast(node); - TargetNode* jTarget = jNode->getTarget(); + HLJump* jNode = static_cast(node); + HLLabel* jTarget = jNode->getTarget(); // If this jump is unconditional we put next node to unreachable node // list so we can eliminate possible dead code. We have to do this in @@ -2660,11 +2659,11 @@ _NextGroup: if (jTarget->isFetched()) { uint32_t jTargetFlowId = jTarget->getFlowId(); - // Update kNodeFlagIsTaken flag to true if this is a conditional + // Update kHLNodeFlagIsTaken flag to true if this is a conditional // backward jump. This behavior can be overridden by using // `kInstOptionTaken` when the instruction is created. if (!jNode->isTaken() && opCount == 1 && jTargetFlowId <= flowId) { - jNode->orFlags(kNodeFlagIsTaken); + jNode->orFlags(kHLNodeFlagIsTaken); } } else if (next->isFetched()) { @@ -2685,12 +2684,12 @@ _NextGroup: // [Func] // ---------------------------------------------------------------------- - case kNodeTypeFunc: { + case kHLNodeTypeFunc: { ASMJIT_ASSERT(node_ == func); X86FuncDecl* decl = func->getDecl(); VI_BEGIN(); - for (uint32_t i = 0, argCount = decl->getArgCount(); i < argCount; i++) { + for (uint32_t i = 0, argCount = decl->getNumArgs(); i < argCount; i++) { const FuncInOut& arg = decl->getArg(i); VarData* vd = func->getArg(i); @@ -2701,7 +2700,7 @@ _NextGroup: // Overlapped function arguments. if (vd->getVa() != NULL) - return compiler->setError(kErrorOverlappedArgs); + return compiler->setLastError(kErrorOverlappedArgs); VI_ADD_VAR(vd, va, 0, 0); uint32_t aType = arg.getVarType(); @@ -2709,18 +2708,18 @@ _NextGroup: if (arg.hasRegIndex()) { if (x86VarTypeToClass(aType) == vd->getClass()) { - va->orFlags(kVarAttrOutReg); + va->orFlags(kVarAttrWReg); va->setOutRegIndex(arg.getRegIndex()); } else { - va->orFlags(kVarAttrOutConv); + va->orFlags(kVarAttrWConv); } } else { if ((x86VarTypeToClass(aType) == vd->getClass()) || (vType == kX86VarTypeXmmSs && aType == kVarTypeFp32) || (vType == kX86VarTypeXmmSd && aType == kVarTypeFp64)) { - va->orFlags(kVarAttrOutMem); + va->orFlags(kVarAttrWMem); } else { // TODO: [COMPILER] Not implemented. @@ -2736,7 +2735,7 @@ _NextGroup: // [End] // ---------------------------------------------------------------------- - case kNodeTypeEnd: { + case kHLNodeTypeSentinel: { ASMJIT_PROPAGATE_ERROR(addReturningNode(node_)); goto _NextGroup; } @@ -2745,8 +2744,8 @@ _NextGroup: // [Ret] // ---------------------------------------------------------------------- - case kNodeTypeRet: { - RetNode* node = static_cast(node_); + case kHLNodeTypeRet: { + HLRet* node = static_cast(node_); ASMJIT_PROPAGATE_ERROR(addReturningNode(node)); X86FuncDecl* decl = func->getDecl(); @@ -2765,14 +2764,14 @@ _NextGroup: VI_MERGE_VAR(vd, va, 0, 0); if (retClass == vd->getClass()) { - // TODO: [COMPILER] Fix RetNode fetch. - va->orFlags(kVarAttrInReg); - va->setInRegs(i == 0 ? IntUtil::mask(kX86RegIndexAx) : IntUtil::mask(kX86RegIndexDx)); + // TODO: [COMPILER] Fix HLRet fetch. + va->orFlags(kVarAttrRReg); + va->setInRegs(i == 0 ? Utils::mask(kX86RegIndexAx) : Utils::mask(kX86RegIndexDx)); inRegs.or_(retClass, va->getInRegs()); } else if (retClass == kX86RegClassFp) { - uint32_t fldFlag = ret.getVarType() == kVarTypeFp32 ? kX86VarAttrFld4 : kX86VarAttrFld8; - va->orFlags(kVarAttrInMem | fldFlag); + uint32_t fldFlag = ret.getVarType() == kVarTypeFp32 ? kVarAttrX86Fld4 : kVarAttrX86Fld8; + va->orFlags(kVarAttrRMem | fldFlag); } else { // TODO: Fix possible other return type conversions. @@ -2792,20 +2791,20 @@ _NextGroup: // [Call] // ---------------------------------------------------------------------- - case kNodeTypeCall: { + case kHLNodeTypeCall: { X86CallNode* node = static_cast(node_); X86FuncDecl* decl = node->getDecl(); Operand* target = &node->_target; - Operand* argList = node->_args; - Operand* retList = node->_ret; + Operand* args = node->_args; + Operand* rets = node->_ret; func->addFuncFlags(kFuncFlagIsCaller); func->mergeCallStackSize(node->_x86Decl.getArgStackSize()); node->_usedArgs = X86Context_getUsedArgs(this, node, decl); uint32_t i; - uint32_t argCount = decl->getArgCount(); + uint32_t argCount = decl->getNumArgs(); uint32_t sArgCount = 0; uint32_t gpAllocableMask = gaRegs[kX86RegClassGp] & ~node->_usedArgs.get(kX86RegClassGp); @@ -2819,7 +2818,7 @@ _NextGroup: vd = compiler->getVdById(target->getId()); VI_MERGE_VAR(vd, va, 0, 0); - va->orFlags(kVarAttrInReg | kVarAttrInCall); + va->orFlags(kVarAttrRReg | kVarAttrRCall); if (va->getInRegs() == 0) va->addAllocableRegs(gpAllocableMask); } @@ -2831,12 +2830,12 @@ _NextGroup: if (!vd->isStack()) { VI_MERGE_VAR(vd, va, 0, 0); if (m->getMemType() == kMemTypeBaseIndex) { - va->orFlags(kVarAttrInReg | kVarAttrInCall); + va->orFlags(kVarAttrRReg | kVarAttrRCall); if (va->getInRegs() == 0) va->addAllocableRegs(gpAllocableMask); } else { - va->orFlags(kVarAttrInMem | kVarAttrInCall); + va->orFlags(kVarAttrRMem | kVarAttrRCall); } } } @@ -2846,7 +2845,7 @@ _NextGroup: vd = compiler->getVdById(m->getIndex()); VI_MERGE_VAR(vd, va, 0, 0); - va->orFlags(kVarAttrInReg | kVarAttrInCall); + va->orFlags(kVarAttrRReg | kVarAttrRCall); if ((va->getInRegs() & ~indexMask) == 0) va->andAllocableRegs(gpAllocableMask & indexMask); } @@ -2854,7 +2853,7 @@ _NextGroup: // Function-call arguments. for (i = 0; i < argCount; i++) { - Operand* op = &argList[i]; + Operand* op = &args[i]; if (!op->isVar()) continue; @@ -2868,28 +2867,28 @@ _NextGroup: uint32_t argClass = x86VarTypeToClass(argType); if (vd->getClass() == argClass) { - va->addInRegs(IntUtil::mask(arg.getRegIndex())); - va->orFlags(kVarAttrInReg | kVarAttrInArg); + va->addInRegs(Utils::mask(arg.getRegIndex())); + va->orFlags(kVarAttrRReg | kVarAttrRFunc); } else { - va->orFlags(kVarAttrInConv | kVarAttrInArg); + va->orFlags(kVarAttrRConv | kVarAttrRFunc); } } - // If this is a stack-based argument we insert SArgNode instead of + // If this is a stack-based argument we insert HLCallArg instead of // using VarAttr. It improves the code, because the argument can be // moved onto stack as soon as it is ready and the register used by // the variable can be reused for something else. It is also much // easier to handle argument conversions, because there will be at // most only one node per conversion. else { - if (X86Context_insertSArgNode(this, node, vd, gaRegs, arg, i, sArgList, sArgCount) != kErrorOk) + if (X86Context_insertHLCallArg(this, node, vd, gaRegs, arg, i, sArgList, sArgCount) != kErrorOk) goto _NoMemory; } } // Function-call return(s). for (i = 0; i < 2; i++) { - Operand* op = &retList[i]; + Operand* op = &rets[i]; if (!op->isVar()) continue; @@ -2903,19 +2902,19 @@ _NextGroup: if (vd->getClass() == retClass) { va->setOutRegIndex(ret.getRegIndex()); - va->orFlags(kVarAttrOutReg | kVarAttrOutRet); + va->orFlags(kVarAttrWReg | kVarAttrWFunc); } else { - va->orFlags(kVarAttrOutConv | kVarAttrOutRet); + va->orFlags(kVarAttrWConv | kVarAttrWFunc); } } } // Init clobbered. - clobberedRegs.set(kX86RegClassGp , IntUtil::bits(_regCount.getGp()) & (~decl->getPreserved(kX86RegClassGp ))); - clobberedRegs.set(kX86RegClassMm , IntUtil::bits(_regCount.getMm()) & (~decl->getPreserved(kX86RegClassMm ))); - clobberedRegs.set(kX86RegClassK , IntUtil::bits(_regCount.getK()) & (~decl->getPreserved(kX86RegClassK ))); - clobberedRegs.set(kX86RegClassXyz, IntUtil::bits(_regCount.getXyz()) & (~decl->getPreserved(kX86RegClassXyz))); + clobberedRegs.set(kX86RegClassGp , Utils::bits(_regCount.getGp()) & (~decl->getPreserved(kX86RegClassGp ))); + clobberedRegs.set(kX86RegClassMm , Utils::bits(_regCount.getMm()) & (~decl->getPreserved(kX86RegClassMm ))); + clobberedRegs.set(kX86RegClassK , Utils::bits(_regCount.getK()) & (~decl->getPreserved(kX86RegClassK ))); + clobberedRegs.set(kX86RegClassXyz, Utils::bits(_regCount.getXyz()) & (~decl->getPreserved(kX86RegClassXyz))); VI_END(node_); break; @@ -2937,7 +2936,7 @@ _Done: node_->setFlowId(++flowId); } - ASMJIT_TLOG("[Fetch] === Done ===\n\n"); + ASMJIT_TLOG("[F] ======= Fetch (Done)\n"); return kErrorOk; // -------------------------------------------------------------------------- @@ -2945,8 +2944,8 @@ _Done: // -------------------------------------------------------------------------- _NoMemory: - ASMJIT_TLOG("[Fetch] === Out of Memory ===\n"); - return compiler->setError(kErrorNoHeapMemory); + ASMJIT_TLOG("[F] ======= Fetch (Out of Memory)\n"); + return compiler->setLastError(kErrorNoHeapMemory); } // ============================================================================ @@ -2955,23 +2954,23 @@ _NoMemory: Error X86Context::annotate() { #if !defined(ASMJIT_DISABLE_LOGGER) - FuncNode* func = getFunc(); + HLFunc* func = getFunc(); - Node* node_ = func; - Node* end = func->getEnd(); + HLNode* node_ = func; + HLNode* end = func->getEnd(); - Zone& sa = _compiler->_stringZone; - StringBuilderT<128> sb; + Zone& sa = _compiler->_stringAllocator; + StringBuilderTmp<128> sb; uint32_t maxLen = 0; while (node_ != end) { if (node_->getComment() == NULL) { - if (node_->getType() == kNodeTypeInst) { - InstNode* node = static_cast(node_); + if (node_->getType() == kHLNodeTypeInst) { + HLInst* node = static_cast(node_); X86Context_annotateInstruction(this, sb, node->getInstId(), node->getOpList(), node->getOpCount()); node_->setComment(static_cast(sa.dup(sb.getData(), sb.getLength() + 1))); - maxLen = IntUtil::iMax(maxLen, static_cast(sb.getLength())); + maxLen = Utils::iMax(maxLen, static_cast(sb.getLength())); sb.clear(); } @@ -3010,29 +3009,29 @@ struct X86BaseAlloc { ASMJIT_INLINE X86VarState* getState() const { return _context->getState(); } //! Get the node. - ASMJIT_INLINE Node* getNode() const { return _node; } + ASMJIT_INLINE HLNode* getNode() const { return _node; } //! Get VarAttr list (all). ASMJIT_INLINE VarAttr* getVaList() const { return _vaList[0]; } //! Get VarAttr list (per class). - ASMJIT_INLINE VarAttr* getVaListByClass(uint32_t c) const { return _vaList[c]; } + ASMJIT_INLINE VarAttr* getVaListByClass(uint32_t rc) const { return _vaList[rc]; } //! Get VarAttr count (all). ASMJIT_INLINE uint32_t getVaCount() const { return _vaCount; } //! Get VarAttr count (per class). - ASMJIT_INLINE uint32_t getVaCountByClass(uint32_t c) const { return _count.get(c); } + ASMJIT_INLINE uint32_t getVaCountByClass(uint32_t rc) const { return _count.get(rc); } //! Get whether all variables of class `c` are done. - ASMJIT_INLINE bool isVaDone(uint32_t c) const { return _done.get(c) == _count.get(c); } + ASMJIT_INLINE bool isVaDone(uint32_t rc) const { return _done.get(rc) == _count.get(rc); } //! Get how many variables have been allocated. - ASMJIT_INLINE uint32_t getVaDone(uint32_t c) const { return _done.get(c); } - - ASMJIT_INLINE void addVaDone(uint32_t c, uint32_t n = 1) { _done.add(c, n); } + ASMJIT_INLINE uint32_t getVaDone(uint32_t rc) const { return _done.get(rc); } + //! Add to the count of variables allocated. + ASMJIT_INLINE void addVaDone(uint32_t rc, uint32_t n = 1) { _done.add(rc, n); } //! Get number of allocable registers per class. - ASMJIT_INLINE uint32_t getGaRegs(uint32_t c) const { - return _context->_gaRegs[c]; + ASMJIT_INLINE uint32_t getGaRegs(uint32_t rc) const { + return _context->_gaRegs[rc]; } // -------------------------------------------------------------------------- @@ -3042,7 +3041,7 @@ struct X86BaseAlloc { protected: // Just to prevent calling these methods by X86Context::translate(). - ASMJIT_INLINE void init(Node* node, X86VarMap* map); + ASMJIT_INLINE void init(HLNode* node, X86VarMap* map); ASMJIT_INLINE void cleanup(); // -------------------------------------------------------------------------- @@ -3065,7 +3064,7 @@ protected: X86Compiler* _compiler; //! Node. - Node* _node; + HLNode* _node; //! Variable map. X86VarMap* _map; @@ -3085,7 +3084,7 @@ protected: // [asmjit::X86BaseAlloc - Init / Cleanup] // ============================================================================ -ASMJIT_INLINE void X86BaseAlloc::init(Node* node, X86VarMap* map) { +ASMJIT_INLINE void X86BaseAlloc::init(HLNode* node, X86VarMap* map) { _node = node; _map = map; @@ -3138,16 +3137,16 @@ ASMJIT_INLINE void X86BaseAlloc::unuseBefore() { uint32_t count = getVaCountByClass(C); const uint32_t checkFlags = - kVarAttrInOutReg | - kVarAttrInMem | - kVarAttrInArg | - kVarAttrInCall | - kVarAttrInConv ; + kVarAttrXReg | + kVarAttrRMem | + kVarAttrRFunc | + kVarAttrRCall | + kVarAttrRConv ; for (uint32_t i = 0; i < count; i++) { VarAttr* va = &list[i]; - if ((va->getFlags() & checkFlags) == kVarAttrOutReg) { + if ((va->getFlags() & checkFlags) == kVarAttrWReg) { _context->unuse(va->getVd()); } } @@ -3185,7 +3184,7 @@ struct X86VarAlloc : public X86BaseAlloc { // [Run] // -------------------------------------------------------------------------- - ASMJIT_INLINE Error run(Node* node); + ASMJIT_INLINE Error run(HLNode* node); // -------------------------------------------------------------------------- // [Init / Cleanup] @@ -3194,7 +3193,7 @@ struct X86VarAlloc : public X86BaseAlloc { protected: // Just to prevent calling these methods by X86Context::translate(). - ASMJIT_INLINE void init(Node* node, X86VarMap* map); + ASMJIT_INLINE void init(HLNode* node, X86VarMap* map); ASMJIT_INLINE void cleanup(); // -------------------------------------------------------------------------- @@ -3250,7 +3249,7 @@ protected: // [asmjit::X86VarAlloc - Run] // ============================================================================ -ASMJIT_INLINE Error X86VarAlloc::run(Node* node_) { +ASMJIT_INLINE Error X86VarAlloc::run(HLNode* node_) { // Initialize. X86VarMap* map = node_->getMap(); if (map == NULL) @@ -3260,33 +3259,33 @@ ASMJIT_INLINE Error X86VarAlloc::run(Node* node_) { init(node_, map); // Unuse overwritten variables. - unuseBefore(); - unuseBefore(); + unuseBefore(); + unuseBefore(); unuseBefore(); // Plan the allocation. Planner assigns input/output registers for each // variable and decides whether to allocate it in register or stack. - plan(); - plan(); + plan(); + plan(); plan(); // Spill all variables marked by plan(). - spill(); - spill(); + spill(); + spill(); spill(); // Alloc all variables marked by plan(). - alloc(); - alloc(); + alloc(); + alloc(); alloc(); // Translate node operands. - if (node_->getType() == kNodeTypeInst) { - InstNode* node = static_cast(node_); + if (node_->getType() == kHLNodeTypeInst) { + HLInst* node = static_cast(node_); ASMJIT_PROPAGATE_ERROR(X86Context_translateOperands(_context, node->getOpList(), node->getOpCount())); } - else if (node_->getType() == kNodeTypeSArg) { - SArgNode* node = static_cast(node_); + else if (node_->getType() == kHLNodeTypeCallArg) { + HLCallArg* node = static_cast(node_); X86CallNode* call = static_cast(node->getCall()); X86FuncDecl* decl = call->getDecl(); @@ -3323,8 +3322,8 @@ ASMJIT_INLINE Error X86VarAlloc::run(Node* node_) { } // Mark variables as modified. - modified(); - modified(); + modified(); + modified(); modified(); // Cleanup; disconnect Vd->Va. @@ -3335,8 +3334,8 @@ ASMJIT_INLINE Error X86VarAlloc::run(Node* node_) { _context->_clobberedRegs.or_(map->_clobberedRegs); // Unuse. - unuseAfter(); - unuseAfter(); + unuseAfter(); + unuseAfter(); unuseAfter(); return kErrorOk; @@ -3346,7 +3345,7 @@ ASMJIT_INLINE Error X86VarAlloc::run(Node* node_) { // [asmjit::X86VarAlloc - Init / Cleanup] // ============================================================================ -ASMJIT_INLINE void X86VarAlloc::init(Node* node, X86VarMap* map) { +ASMJIT_INLINE void X86VarAlloc::init(HLNode* node, X86VarMap* map) { X86BaseAlloc::init(node, map); // These will block planner from assigning them during planning. Planner will @@ -3388,9 +3387,9 @@ ASMJIT_INLINE void X86VarAlloc::plan() { uint32_t vaFlags = va->getFlags(); uint32_t regIndex = vd->getRegIndex(); - uint32_t regMask = (regIndex != kInvalidReg) ? IntUtil::mask(regIndex) : 0; + uint32_t regMask = (regIndex != kInvalidReg) ? Utils::mask(regIndex) : 0; - if ((vaFlags & kVarAttrInOutReg) != 0) { + if ((vaFlags & kVarAttrXReg) != 0) { // Planning register allocation. First check whether the variable is // already allocated in register and if it can stay allocated there. // @@ -3401,22 +3400,22 @@ ASMJIT_INLINE void X86VarAlloc::plan() { uint32_t mandatoryRegs = va->getInRegs(); uint32_t allocableRegs = va->getAllocableRegs(); - ASMJIT_TLOG("[RA-PLAN ] %s (%s)\n", + ASMJIT_TLOG("[RA-PLAN] %s (%s)\n", vd->getName(), - (vaFlags & kVarAttrInOutReg) == kVarAttrOutReg ? "Out Reg" : "In/Out Reg"); + (vaFlags & kVarAttrXReg) == kVarAttrWReg ? "R-Reg" : "X-Reg"); - ASMJIT_TLOG("[RA-PLAN ] RegMask=%08X Mandatory=%08X Allocable=%08X\n", + ASMJIT_TLOG("[RA-PLAN] RegMask=%08X Mandatory=%08X Allocable=%08X\n", regMask, mandatoryRegs, allocableRegs); if (regMask != 0) { // Special path for planning output-only registers. - if ((vaFlags & kVarAttrInOutReg) == kVarAttrOutReg) { + if ((vaFlags & kVarAttrXReg) == kVarAttrWReg) { uint32_t outRegIndex = va->getOutRegIndex(); - mandatoryRegs = (outRegIndex != kInvalidReg) ? IntUtil::mask(outRegIndex) : 0; + mandatoryRegs = (outRegIndex != kInvalidReg) ? Utils::mask(outRegIndex) : 0; if ((mandatoryRegs | allocableRegs) & regMask) { va->setOutRegIndex(regIndex); - va->orFlags(kVarAttrAllocOutDone); + va->orFlags(kVarAttrAllocWDone); if (mandatoryRegs & regMask) { // Case 'a' - 'willAlloc' contains initially all inRegs from all VarAttr's. @@ -3428,7 +3427,7 @@ ASMJIT_INLINE void X86VarAlloc::plan() { willAlloc |= regMask; } - ASMJIT_TLOG("[RA-PLAN ] WillAlloc\n"); + ASMJIT_TLOG("[RA-PLAN] WillAlloc\n"); addVaDone(C); continue; @@ -3437,7 +3436,7 @@ ASMJIT_INLINE void X86VarAlloc::plan() { else { if ((mandatoryRegs | allocableRegs) & regMask) { va->setInRegIndex(regIndex); - va->orFlags(kVarAttrAllocInDone); + va->orFlags(kVarAttrAllocRDone); if (mandatoryRegs & regMask) { // Case 'a' - 'willAlloc' contains initially all inRegs from all VarAttr's. @@ -3449,7 +3448,7 @@ ASMJIT_INLINE void X86VarAlloc::plan() { willAlloc |= regMask; } - ASMJIT_TLOG("[RA-PLAN ] WillAlloc\n"); + ASMJIT_TLOG("[RA-PLAN] WillAlloc\n"); addVaDone(C); continue; @@ -3457,7 +3456,7 @@ ASMJIT_INLINE void X86VarAlloc::plan() { } // Trace it here so we don't pollute log by `WillFree` of zero regMask. - ASMJIT_TLOG("[RA-PLAN ] WillFree\n"); + ASMJIT_TLOG("[RA-PLAN] WillFree\n"); } // Variable is not allocated or allocated in register that doesn't @@ -3469,23 +3468,23 @@ ASMJIT_INLINE void X86VarAlloc::plan() { // here since now we have no information about the registers that // will be freed. So instead of finding register here, we just mark // the current register (if variable is allocated) as `willFree` so - // the planner can use this information in second step to plan other - // allocation of other variables. + // the planner can use this information in the second step to plan the + // allocation as a whole. willFree |= regMask; continue; } else { // Memory access - if variable is allocated it has to be freed. - ASMJIT_TLOG("[RA-PLAN ] %s (Memory)\n", vd->getName()); + ASMJIT_TLOG("[RA-PLAN] %s (Memory)\n", vd->getName()); if (regMask != 0) { - ASMJIT_TLOG("[RA-PLAN ] WillFree\n"); + ASMJIT_TLOG("[RA-PLAN] WillFree\n"); willFree |= regMask; continue; } else { - ASMJIT_TLOG("[RA-PLAN ] Done\n"); - va->orFlags(kVarAttrAllocInDone); + ASMJIT_TLOG("[RA-PLAN] Done\n"); + va->orFlags(kVarAttrAllocRDone); addVaDone(C); continue; } @@ -3505,20 +3504,20 @@ ASMJIT_INLINE void X86VarAlloc::plan() { uint32_t vaFlags = va->getFlags(); - if ((vaFlags & kVarAttrInOutReg) != 0) { - if ((vaFlags & kVarAttrInOutReg) == kVarAttrOutReg) { - if (vaFlags & kVarAttrAllocOutDone) + if ((vaFlags & kVarAttrXReg) != 0) { + if ((vaFlags & kVarAttrXReg) == kVarAttrWReg) { + if (vaFlags & kVarAttrAllocWDone) continue; // Skip all registers that have assigned outRegIndex. Spill if occupied. if (va->hasOutRegIndex()) { - uint32_t outRegs = IntUtil::mask(va->getOutRegIndex()); + uint32_t outRegs = Utils::mask(va->getOutRegIndex()); willSpill |= occupied & outRegs; continue; } } else { - if (vaFlags & kVarAttrAllocInDone) + if (vaFlags & kVarAttrAllocRDone) continue; // We skip all registers that have assigned inRegIndex, indicates that @@ -3532,7 +3531,7 @@ ASMJIT_INLINE void X86VarAlloc::plan() { uint32_t m = va->getInRegs(); if (va->hasOutRegIndex()) - m |= IntUtil::mask(va->getOutRegIndex()); + m |= Utils::mask(va->getOutRegIndex()); m = va->getAllocableRegs() & ~(willAlloc ^ m); m = guessAlloc(vd, m); @@ -3550,13 +3549,14 @@ ASMJIT_INLINE void X86VarAlloc::plan() { candidateRegs = m; } + // printf("CANDIDATE: %s %08X\n", vd->getName(), homeMask); if (candidateRegs & homeMask) candidateRegs &= homeMask; - regIndex = IntUtil::findFirstBit(candidateRegs); - regMask = IntUtil::mask(regIndex); + regIndex = Utils::findFirstBit(candidateRegs); + regMask = Utils::mask(regIndex); - if ((vaFlags & kVarAttrInOutReg) == kVarAttrOutReg) { + if ((vaFlags & kVarAttrXReg) == kVarAttrWReg) { va->setOutRegIndex(regIndex); } else { @@ -3571,10 +3571,10 @@ ASMJIT_INLINE void X86VarAlloc::plan() { continue; } - else if ((vaFlags & kVarAttrInOutMem) != 0) { + else if ((vaFlags & kVarAttrXMem) != 0) { uint32_t regIndex = vd->getRegIndex(); - if (regIndex != kInvalidReg && (vaFlags & kVarAttrInOutMem) != kVarAttrOutMem) { - willSpill |= IntUtil::mask(regIndex); + if (regIndex != kInvalidReg && (vaFlags & kVarAttrXMem) != kVarAttrWMem) { + willSpill |= Utils::mask(regIndex); } } } @@ -3600,7 +3600,7 @@ ASMJIT_INLINE void X86VarAlloc::spill() { do { // We always advance one more to destroy the bit that we have found. - uint32_t bitIndex = IntUtil::findFirstBit(m) + 1; + uint32_t bitIndex = Utils::findFirstBit(m) + 1; i += bitIndex; m >>= bitIndex; @@ -3609,7 +3609,7 @@ ASMJIT_INLINE void X86VarAlloc::spill() { ASMJIT_ASSERT(vd != NULL); VarAttr* va = vd->getVa(); - ASMJIT_ASSERT(va == NULL || !va->hasFlag(kVarAttrInOutReg)); + ASMJIT_ASSERT(va == NULL || !va->hasFlag(kVarAttrXReg)); if (vd->isModified() && availableRegs) { // Don't check for alternatives if the variable has to be spilled. @@ -3617,8 +3617,8 @@ ASMJIT_INLINE void X86VarAlloc::spill() { uint32_t altRegs = guessSpill(vd, availableRegs); if (altRegs != 0) { - uint32_t regIndex = IntUtil::findFirstBit(altRegs); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regIndex = Utils::findFirstBit(altRegs); + uint32_t regMask = Utils::mask(regIndex); _context->move(vd, regIndex); availableRegs ^= regMask; @@ -3652,7 +3652,7 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { VarAttr* aVa = &list[i]; VarData* aVd = aVa->getVd(); - if ((aVa->getFlags() & (kVarAttrInReg | kVarAttrAllocInDone)) != kVarAttrInReg) + if ((aVa->getFlags() & (kVarAttrRReg | kVarAttrAllocRDone)) != kVarAttrRReg) continue; uint32_t aIndex = aVd->getRegIndex(); @@ -3671,12 +3671,12 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { VarAttr* bVa = bVd->getVa(); _context->swapGp(aVd, bVd); - aVa->orFlags(kVarAttrAllocInDone); + aVa->orFlags(kVarAttrAllocRDone); addVaDone(C); // Doublehit, two registers allocated by a single swap. if (bVa != NULL && bVa->getInRegIndex() == aIndex) { - bVa->orFlags(kVarAttrAllocInDone); + bVa->orFlags(kVarAttrAllocRDone); addVaDone(C); } @@ -3687,7 +3687,7 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { else if (aIndex != kInvalidReg) { _context->move(aVd, bIndex); - aVa->orFlags(kVarAttrAllocInDone); + aVa->orFlags(kVarAttrAllocRDone); addVaDone(C); didWork = true; @@ -3696,7 +3696,7 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { else { _context->alloc(aVd, bIndex); - aVa->orFlags(kVarAttrAllocInDone); + aVa->orFlags(kVarAttrAllocRDone); addVaDone(C); didWork = true; @@ -3710,7 +3710,7 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { VarAttr* va = &list[i]; VarData* vd = va->getVd(); - if ((va->getFlags() & (kVarAttrInOutReg | kVarAttrAllocOutDone)) != kVarAttrOutReg) + if ((va->getFlags() & (kVarAttrXReg | kVarAttrAllocWDone)) != kVarAttrWReg) continue; uint32_t regIndex = va->getOutRegIndex(); @@ -3721,7 +3721,7 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { _context->attach(vd, regIndex, false); } - va->orFlags(kVarAttrAllocOutDone); + va->orFlags(kVarAttrAllocWDone); addVaDone(C); } } @@ -3730,40 +3730,267 @@ ASMJIT_INLINE void X86VarAlloc::alloc() { // [asmjit::X86VarAlloc - GuessAlloc / GuessSpill] // ============================================================================ +#if 0 +// TODO: This works, but should be improved a bit. The idea is to follow code +// flow and to restrict the possible registers where to allocate as much as +// possible so we won't allocate to a register which is home of some variable +// that's gonna be used together with `vd`. The previous implementation didn't +// care about it and produced suboptimal results even in code which didn't +// require any allocs & spills. +enum { kMaxGuessFlow = 10 }; + +struct GuessFlowData { + ASMJIT_INLINE void init(HLNode* node, uint32_t counter, uint32_t safeRegs) { + _node = node; + _counter = counter; + _safeRegs = safeRegs; + } + + //! Node to start. + HLNode* _node; + //! Number of instructions processed from the beginning. + uint32_t _counter; + //! Safe registers, which can be used for the allocation. + uint32_t _safeRegs; +}; + +template +ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRegs) { + ASMJIT_TLOG("[RA-GUESS] === %s (Input=%08X) ===\n", vd->getName(), allocableRegs); + ASMJIT_ASSERT(allocableRegs != 0); + + return allocableRegs; + + // Stop now if there is only one bit (register) set in `allocableRegs` mask. + uint32_t safeRegs = allocableRegs; + if (Utils::isPowerOf2(safeRegs)) + return safeRegs; + + uint32_t counter = 0; + uint32_t maxInst = _compiler->getMaxLookAhead(); + + uint32_t cId = vd->getLocalId(); + uint32_t localToken = _compiler->_generateUniqueToken(); + + uint32_t gfIndex = 0; + GuessFlowData gfArray[kMaxGuessFlow]; + + HLNode* node = _node; + + // Mark this node and also exit node, it will terminate the loop if encountered. + node->setTokenId(localToken); + _context->getFunc()->getExitNode()->setTokenId(localToken); + + // TODO: I don't like this jump, maybe some refactor would help to eliminate it. + goto _Advance; + + // Look ahead and calculate mask of special registers on both - input/output. + for (;;) { + do { + ASMJIT_TSEC({ + X86Context_traceNode(_context, node, " "); + }); + + // Terminate if we have seen this node already. + if (node->hasTokenId(localToken)) + break; + + node->setTokenId(localToken); + counter++; + + // Terminate if the variable is dead here. + if (node->hasLiveness() && !node->getLiveness()->getBit(cId)) { + ASMJIT_TLOG("[RA-GUESS] %s (Terminating, Not alive here)\n", vd->getName()); + break; + } + + if (node->hasState()) { + // If this node contains a state, we have to consider only the state + // and then we can terminate safely - this happens if we jumped to a + // label that is backward (i.e. start of the loop). If we survived + // the liveness check it means that the variable is actually used. + X86VarState* state = node->getState(); + uint32_t homeRegs = 0; + uint32_t tempRegs = 0; + + VarData** vdArray = state->getListByClass(C); + uint32_t vdCount = _compiler->getRegCount().get(C); + + for (uint32_t vdIndex = 0; vdIndex < vdCount; vdIndex++) { + if (vdArray[vdIndex] != NULL) + tempRegs |= Utils::mask(vdIndex); + + if (vdArray[vdIndex] == vd) + homeRegs = Utils::mask(vdIndex); + } + + tempRegs = safeRegs & ~tempRegs; + if (!tempRegs) + goto _Done; + safeRegs = tempRegs; + + tempRegs = safeRegs & homeRegs; + if (!tempRegs) + goto _Done; + safeRegs = tempRegs; + + goto _Done; + } + else { + // Process the current node if it has any variables associated in. + X86VarMap* map = node->getMap(); + if (map != NULL) { + VarAttr* vaList = map->getVaListByClass(C); + uint32_t vaCount = map->getVaCountByClass(C); + + uint32_t homeRegs = 0; + uint32_t tempRegs = safeRegs; + bool found = false; + + for (uint32_t vaIndex = 0; vaIndex < vaCount; vaIndex++) { + VarAttr* va = &vaList[vaIndex]; + + if (va->getVd() == vd) { + found = true; + + // Terminate if the variable is overwritten here. + if (!(va->getFlags() & kVarAttrRAll)) + goto _Done; + + uint32_t mask = va->getAllocableRegs(); + if (mask != 0) { + tempRegs &= mask; + if (!tempRegs) + goto _Done; + safeRegs = tempRegs; + } + + mask = va->getInRegs(); + if (mask != 0) { + tempRegs &= mask; + if (!tempRegs) + goto _Done; + + safeRegs = tempRegs; + goto _Done; + } + } + else { + // It happens often that one variable is used across many blocks of + // assembly code. It can sometimes cause one variable to be allocated + // in a different register, which can cause state switch to generate + // moves in case of jumps and state intersections. We try to prevent + // this case by also considering variables' home registers. + homeRegs |= va->getVd()->getHomeMask(); + } + } + + tempRegs &= ~(map->_outRegs.get(C) | map->_clobberedRegs.get(C)); + if (!found) + tempRegs &= ~map->_inRegs.get(C); + + if (!tempRegs) + goto _Done; + safeRegs = tempRegs; + + if (homeRegs) { + tempRegs = safeRegs & ~homeRegs; + if (!tempRegs) + goto _Done; + safeRegs = tempRegs; + } + } + } + +_Advance: + // Terminate if this is a return node. + if (node->hasFlag(kHLNodeFlagIsRet)) + goto _Done; + + // Advance on non-conditional jump. + if (node->hasFlag(kHLNodeFlagIsJmp)) { + // Stop on a jump that is not followed. + node = static_cast(node)->getTarget(); + if (node == NULL) + break; + continue; + } + + // Split flow on a conditional jump. + if (node->hasFlag(kHLNodeFlagIsJcc)) { + // Put the next node on the stack and follow the target if possible. + HLNode* next = node->getNext(); + if (next != NULL && gfIndex < kMaxGuessFlow) + gfArray[gfIndex++].init(next, counter, safeRegs); + + node = static_cast(node)->getTarget(); + if (node == NULL) + break; + continue; + } + + node = node->getNext(); + ASMJIT_ASSERT(node != NULL); + } while (counter < maxInst); + +_Done: + for (;;) { + if (gfIndex == 0) + goto _Ret; + + GuessFlowData* data = &gfArray[--gfIndex]; + node = data->_node; + counter = data->_counter; + + uint32_t tempRegs = safeRegs & data->_safeRegs; + if (!tempRegs) + continue; + + safeRegs = tempRegs; + break; + } + } + +_Ret: + ASMJIT_TLOG("[RA-GUESS] === %s (Output=%08X) ===\n", vd->getName(), safeRegs); + return safeRegs; +} +#endif + template ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRegs) { ASMJIT_ASSERT(allocableRegs != 0); // Stop now if there is only one bit (register) set in `allocableRegs` mask. - if (IntUtil::isPowerOf2(allocableRegs)) + if (Utils::isPowerOf2(allocableRegs)) return allocableRegs; - uint32_t cId = vd->getContextId(); + uint32_t cId = vd->getLocalId(); uint32_t safeRegs = allocableRegs; uint32_t i; uint32_t maxLookAhead = _compiler->getMaxLookAhead(); // Look ahead and calculate mask of special registers on both - input/output. - Node* node = _node; + HLNode* node = _node; for (i = 0; i < maxLookAhead; i++) { - VarBits* liveness = node->getLiveness(); + BitArray* liveness = node->getLiveness(); // If the variable becomes dead it doesn't make sense to continue. if (liveness != NULL && !liveness->getBit(cId)) break; - // Stop on 'RetNode' and 'EndNode. - if (node->hasFlag(kNodeFlagIsRet)) + // Stop on `HLSentinel` and `HLRet`. + if (node->hasFlag(kHLNodeFlagIsRet)) break; // Stop on conditional jump, we don't follow them. - if (node->hasFlag(kNodeFlagIsJcc)) + if (node->hasFlag(kHLNodeFlagIsJcc)) break; // Advance on non-conditional jump. - if (node->hasFlag(kNodeFlagIsJmp)) { - node = static_cast(node)->getTarget(); + if (node->hasFlag(kHLNodeFlagIsJmp)) { + node = static_cast(node)->getTarget(); // Stop on jump that is not followed. if (node == NULL) break; @@ -3779,7 +4006,7 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe if (va != NULL) { // If the variable is overwritten it doesn't mase sense to continue. - if (!(va->getFlags() & kVarAttrInAll)) + if (!(va->getFlags() & kVarAttrRAll)) break; mask = va->getAllocableRegs(); @@ -3816,6 +4043,7 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe return safeRegs; } + template ASMJIT_INLINE uint32_t X86VarAlloc::guessSpill(VarData* vd, uint32_t allocableRegs) { ASMJIT_ASSERT(allocableRegs != 0); @@ -3835,11 +4063,11 @@ ASMJIT_INLINE void X86VarAlloc::modified() { for (uint32_t i = 0; i < count; i++) { VarAttr* va = &list[i]; - if (va->hasFlag(kVarAttrOutReg)) { + if (va->hasFlag(kVarAttrWReg)) { VarData* vd = va->getVd(); uint32_t regIndex = vd->getRegIndex(); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regMask = Utils::mask(regIndex); vd->setModified(true); _context->_x86State._modified.or_(C, regMask); @@ -4066,9 +4294,9 @@ ASMJIT_INLINE void X86CallAlloc::plan() { uint32_t vaFlags = va->getFlags(); uint32_t regIndex = vd->getRegIndex(); - uint32_t regMask = (regIndex != kInvalidReg) ? IntUtil::mask(regIndex) : 0; + uint32_t regMask = (regIndex != kInvalidReg) ? Utils::mask(regIndex) : 0; - if ((vaFlags & kVarAttrInReg) != 0) { + if ((vaFlags & kVarAttrRReg) != 0) { // Planning register allocation. First check whether the variable is // already allocated in register and if it can stay there. Function // arguments are passed either in a specific register or in stack so @@ -4084,7 +4312,7 @@ ASMJIT_INLINE void X86CallAlloc::plan() { // is not clobbered (i.e. it will survive function call). if ((regMask & inRegs) != 0 || ((regMask & ~clobbered) != 0 && (vaFlags & kVarAttrUnuse) == 0)) { va->setInRegIndex(regIndex); - va->orFlags(kVarAttrAllocInDone); + va->orFlags(kVarAttrAllocRDone); addVaDone(C); } else { @@ -4097,7 +4325,7 @@ ASMJIT_INLINE void X86CallAlloc::plan() { willFree |= regMask; } else { - va->orFlags(kVarAttrAllocInDone); + va->orFlags(kVarAttrAllocRDone); addVaDone(C); } } @@ -4116,14 +4344,14 @@ ASMJIT_INLINE void X86CallAlloc::plan() { VarData* vd = va->getVd(); uint32_t vaFlags = va->getFlags(); - if ((vaFlags & kVarAttrAllocInDone) != 0 || (vaFlags & kVarAttrInReg) == 0) + if ((vaFlags & kVarAttrAllocRDone) != 0 || (vaFlags & kVarAttrRReg) == 0) continue; // All registers except Gp used by call itself must have inRegIndex. uint32_t m = va->getInRegs(); if (C != kX86RegClassGp || m) { ASMJIT_ASSERT(m != 0); - va->setInRegIndex(IntUtil::findFirstBit(m)); + va->setInRegIndex(Utils::findFirstBit(m)); willSpill |= occupied & m; continue; } @@ -4139,11 +4367,11 @@ ASMJIT_INLINE void X86CallAlloc::plan() { candidateRegs = m; } - if (!(vaFlags & (kVarAttrOutReg | kVarAttrUnuse)) && (candidateRegs & ~clobbered)) + if (!(vaFlags & (kVarAttrWReg | kVarAttrUnuse)) && (candidateRegs & ~clobbered)) candidateRegs &= ~clobbered; - uint32_t regIndex = IntUtil::findFirstBit(candidateRegs); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regIndex = Utils::findFirstBit(candidateRegs); + uint32_t regMask = Utils::mask(regIndex); va->setInRegIndex(regIndex); va->setInRegs(regMask); @@ -4177,7 +4405,7 @@ ASMJIT_INLINE void X86CallAlloc::spill() { do { // We always advance one more to destroy the bit that we have found. - uint32_t bitIndex = IntUtil::findFirstBit(m) + 1; + uint32_t bitIndex = Utils::findFirstBit(m) + 1; i += bitIndex; m >>= bitIndex; @@ -4190,8 +4418,8 @@ ASMJIT_INLINE void X86CallAlloc::spill() { uint32_t m = guessSpill(vd, availableRegs); if (m != 0) { - uint32_t regIndex = IntUtil::findFirstBit(m); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regIndex = Utils::findFirstBit(m); + uint32_t regMask = Utils::mask(regIndex); _context->move(vd, regIndex); availableRegs ^= regMask; @@ -4223,7 +4451,7 @@ ASMJIT_INLINE void X86CallAlloc::alloc() { VarAttr* aVa = &list[i]; VarData* aVd = aVa->getVd(); - if ((aVa->getFlags() & (kVarAttrInReg | kVarAttrAllocInDone)) != kVarAttrInReg) + if ((aVa->getFlags() & (kVarAttrRReg | kVarAttrAllocRDone)) != kVarAttrRReg) continue; uint32_t aIndex = aVd->getRegIndex(); @@ -4243,12 +4471,12 @@ ASMJIT_INLINE void X86CallAlloc::alloc() { if (C == kX86RegClassGp) { _context->swapGp(aVd, bVd); - aVa->orFlags(kVarAttrAllocInDone); + aVa->orFlags(kVarAttrAllocRDone); addVaDone(C); // Doublehit, two registers allocated by a single swap. if (bVa != NULL && bVa->getInRegIndex() == aIndex) { - bVa->orFlags(kVarAttrAllocInDone); + bVa->orFlags(kVarAttrAllocRDone); addVaDone(C); } @@ -4259,7 +4487,7 @@ ASMJIT_INLINE void X86CallAlloc::alloc() { else if (aIndex != kInvalidReg) { _context->move(aVd, bIndex); - aVa->orFlags(kVarAttrAllocInDone); + aVa->orFlags(kVarAttrAllocRDone); addVaDone(C); didWork = true; @@ -4268,7 +4496,7 @@ ASMJIT_INLINE void X86CallAlloc::alloc() { else { _context->alloc(aVd, bIndex); - aVa->orFlags(kVarAttrAllocInDone); + aVa->orFlags(kVarAttrAllocRDone); addVaDone(C); didWork = true; @@ -4286,11 +4514,11 @@ ASMJIT_INLINE void X86CallAlloc::allocImmsOnStack() { X86CallNode* node = getNode(); X86FuncDecl* decl = node->getDecl(); - uint32_t argCount = decl->getArgCount(); - Operand* argList = node->_args; + uint32_t argCount = decl->getNumArgs(); + Operand* args = node->_args; for (uint32_t i = 0; i < argCount; i++) { - Operand& op = argList[i]; + Operand& op = args[i]; if (!op.isImm()) continue; @@ -4320,7 +4548,7 @@ ASMJIT_INLINE void X86CallAlloc::duplicate() { for (uint32_t i = 0; i < count; i++) { VarAttr* va = &list[i]; - if (!va->hasFlag(kVarAttrInReg)) + if (!va->hasFlag(kVarAttrRReg)) continue; uint32_t inRegs = va->getInRegs(); @@ -4332,14 +4560,14 @@ ASMJIT_INLINE void X86CallAlloc::duplicate() { ASMJIT_ASSERT(regIndex != kInvalidReg); - inRegs &= ~IntUtil::mask(regIndex); + inRegs &= ~Utils::mask(regIndex); if (!inRegs) continue; for (uint32_t dupIndex = 0; inRegs != 0; dupIndex++, inRegs >>= 1) { if (inRegs & 0x1) { _context->emitMove(vd, dupIndex, regIndex, "Duplicate"); - _context->_clobberedRegs.or_(C, IntUtil::mask(dupIndex)); + _context->_clobberedRegs.or_(C, Utils::mask(dupIndex)); } } } @@ -4354,7 +4582,7 @@ ASMJIT_INLINE uint32_t X86CallAlloc::guessAlloc(VarData* vd, uint32_t allocableR ASMJIT_ASSERT(allocableRegs != 0); // Stop now if there is only one bit (register) set in 'allocableRegs' mask. - if (IntUtil::isPowerOf2(allocableRegs)) + if (Utils::isPowerOf2(allocableRegs)) return allocableRegs; uint32_t i; @@ -4362,19 +4590,19 @@ ASMJIT_INLINE uint32_t X86CallAlloc::guessAlloc(VarData* vd, uint32_t allocableR uint32_t maxLookAhead = _compiler->getMaxLookAhead(); // Look ahead and calculate mask of special registers on both - input/output. - Node* node = _node; + HLNode* node = _node; for (i = 0; i < maxLookAhead; i++) { - // Stop on 'RetNode' and 'EndNode. - if (node->hasFlag(kNodeFlagIsRet)) + // Stop on 'HLRet' and 'HLSentinel. + if (node->hasFlag(kHLNodeFlagIsRet)) break; // Stop on conditional jump, we don't follow them. - if (node->hasFlag(kNodeFlagIsJcc)) + if (node->hasFlag(kHLNodeFlagIsJcc)) break; // Advance on non-conditional jump. - if (node->hasFlag(kNodeFlagIsJmp)) { - node = static_cast(node)->getTarget(); + if (node->hasFlag(kHLNodeFlagIsJmp)) { + node = static_cast(node)->getTarget(); // Stop on jump that is not followed. if (node == NULL) break; @@ -4437,7 +4665,7 @@ ASMJIT_INLINE void X86CallAlloc::save() { ASMJIT_ASSERT(vd->isModified()); VarAttr* va = vd->getVa(); - if (va == NULL || (va->getFlags() & (kVarAttrOutReg | kVarAttrUnuse)) == 0) { + if (va == NULL || (va->getFlags() & (kVarAttrWReg | kVarAttrUnuse)) == 0) { _context->save(vd); } } @@ -4464,7 +4692,7 @@ ASMJIT_INLINE void X86CallAlloc::clobber() { VarAttr* va = vd->getVa(); uint32_t vdState = kVarStateNone; - if (!vd->isModified() || (va != NULL && (va->getFlags() & (kVarAttrOutAll | kVarAttrUnuse)) != 0)) { + if (!vd->isModified() || (va != NULL && (va->getFlags() & (kVarAttrWAll | kVarAttrUnuse)) != 0)) { vdState = kVarStateMem; } @@ -4482,11 +4710,11 @@ ASMJIT_INLINE void X86CallAlloc::ret() { X86FuncDecl* decl = node->getDecl(); uint32_t i; - Operand* retList = node->_ret; + Operand* rets = node->_ret; for (i = 0; i < 2; i++) { const FuncInOut& ret = decl->getRet(i); - Operand* op = &retList[i]; + Operand* op = &rets[i]; if (!ret.hasRegIndex() || !op->isVar()) continue; @@ -4603,11 +4831,11 @@ static Error X86Context_initFunc(X86Context* self, X86FuncNode* func) { func->_saveRestoreRegs.set(kX86RegClassK , 0); func->_saveRestoreRegs.set(kX86RegClassXyz, clobberedRegs.get(kX86RegClassXyz) & decl->getPreserved(kX86RegClassXyz)); - ASMJIT_ASSERT(!func->_saveRestoreRegs.has(kX86RegClassGp, IntUtil::mask(kX86RegIndexSp))); + ASMJIT_ASSERT(!func->_saveRestoreRegs.has(kX86RegClassGp, Utils::mask(kX86RegIndexSp))); // Setup required stack alignment and kFuncFlagIsStackMisaligned. { - uint32_t requiredStackAlignment = IntUtil::iMax(self->_memMaxAlign, self->getRegSize()); + uint32_t requiredStackAlignment = Utils::iMax(self->_memMaxAlign, self->getRegSize()); if (requiredStackAlignment < 16) { // Require 16-byte alignment if 8-byte vars are used. @@ -4615,7 +4843,7 @@ static Error X86Context_initFunc(X86Context* self, X86FuncNode* func) { requiredStackAlignment = 16; else if (func->_saveRestoreRegs.get(kX86RegClassMm) || func->_saveRestoreRegs.get(kX86RegClassXyz)) requiredStackAlignment = 16; - else if (IntUtil::inInterval(func->getRequiredStackAlignment(), 8, 16)) + else if (Utils::inInterval(func->getRequiredStackAlignment(), 8, 16)) requiredStackAlignment = 16; } @@ -4633,23 +4861,23 @@ static Error X86Context_initFunc(X86Context* self, X86FuncNode* func) { // Adjust stack pointer if manual stack alignment is needed. if (func->isStackMisaligned() && func->isNaked()) { // Get a memory cell where the original stack frame will be stored. - MemCell* cell = self->_newStackCell(regSize, regSize); + VarCell* cell = self->_newStackCell(regSize, regSize); if (cell == NULL) - return self->getError(); // The error has already been set. + return self->getLastError(); // The error has already been set. func->addFuncFlags(kFuncFlagIsStackAdjusted); self->_stackFrameCell = cell; if (decl->getArgStackSize() > 0) { - func->addFuncFlags(kX86FuncFlagMoveArgs); + func->addFuncFlags(kFuncFlagX86MoveArgs); func->setExtraStackSize(decl->getArgStackSize()); } // Get temporary register which will be used to align the stack frame. - uint32_t fRegMask = IntUtil::bits(self->_regCount.getGp()); + uint32_t fRegMask = Utils::bits(self->_regCount.getGp()); uint32_t stackFrameCopyRegs; - fRegMask &= ~(decl->getUsed(kX86RegClassGp) | IntUtil::mask(kX86RegIndexSp)); + fRegMask &= ~(decl->getUsed(kX86RegClassGp) | Utils::mask(kX86RegIndexSp)); stackFrameCopyRegs = fRegMask; // Try to remove modified registers from the mask. @@ -4664,32 +4892,32 @@ static Error X86Context_initFunc(X86Context* self, X86FuncNode* func) { ASMJIT_ASSERT(fRegMask != 0); - uint32_t fRegIndex = IntUtil::findFirstBit(fRegMask); + uint32_t fRegIndex = Utils::findFirstBit(fRegMask); func->_stackFrameRegIndex = static_cast(fRegIndex); // We have to save the register on the stack (it will be the part of prolog // and epilog), however we shouldn't save it twice, so we will remove it // from '_saveRestoreRegs' in case that it is preserved. - fRegMask = IntUtil::mask(fRegIndex); + fRegMask = Utils::mask(fRegIndex); if ((fRegMask & decl->getPreserved(kX86RegClassGp)) != 0) { func->_saveRestoreRegs.andNot(kX86RegClassGp, fRegMask); func->_isStackFrameRegPreserved = true; } - if (func->hasFuncFlag(kX86FuncFlagMoveArgs)) { + if (func->hasFuncFlag(kFuncFlagX86MoveArgs)) { uint32_t maxRegs = (func->getArgStackSize() + regSize - 1) / regSize; stackFrameCopyRegs &= ~fRegMask; tRegMask = stackFrameCopyRegs & self->getClobberedRegs(kX86RegClassGp); - uint32_t tRegCnt = IntUtil::bitCount(tRegMask); + uint32_t tRegCnt = Utils::bitCount(tRegMask); if (tRegCnt > 1 || (tRegCnt > 0 && tRegCnt <= maxRegs)) stackFrameCopyRegs = tRegMask; else - stackFrameCopyRegs = IntUtil::keepNOnesFromRight(stackFrameCopyRegs, IntUtil::iMin(maxRegs, 2)); + stackFrameCopyRegs = Utils::keepNOnesFromRight(stackFrameCopyRegs, Utils::iMin(maxRegs, 2)); func->_saveRestoreRegs.or_(kX86RegClassGp, stackFrameCopyRegs & decl->getPreserved(kX86RegClassGp)); - IntUtil::indexNOnesFromRight(func->_stackFrameCopyGpIndex, stackFrameCopyRegs, maxRegs); + Utils::indexNOnesFromRight(func->_stackFrameCopyGpIndex, stackFrameCopyRegs, maxRegs); } } // If function is not naked we generate standard "EBP/RBP" stack frame. @@ -4703,24 +4931,18 @@ static Error X86Context_initFunc(X86Context* self, X86FuncNode* func) { ASMJIT_PROPAGATE_ERROR(self->resolveCellOffsets()); // Adjust stack pointer if requested memory can't fit into "Red Zone" or "Spill Zone". - if (self->_memAllTotal > IntUtil::iMax(func->getRedZoneSize(), func->getSpillZoneSize())) { + if (self->_memAllTotal > Utils::iMax(func->getRedZoneSize(), func->getSpillZoneSize())) { func->addFuncFlags(kFuncFlagIsStackAdjusted); } // Setup stack size used to save preserved registers. { - uint32_t memGpSize = IntUtil::bitCount(func->_saveRestoreRegs.get(kX86RegClassGp )) * regSize; - uint32_t memMmSize = IntUtil::bitCount(func->_saveRestoreRegs.get(kX86RegClassMm )) * 8; - uint32_t memXmmSize = IntUtil::bitCount(func->_saveRestoreRegs.get(kX86RegClassXyz)) * 16; + uint32_t memGpSize = Utils::bitCount(func->_saveRestoreRegs.get(kX86RegClassGp )) * regSize; + uint32_t memMmSize = Utils::bitCount(func->_saveRestoreRegs.get(kX86RegClassMm )) * 8; + uint32_t memXmmSize = Utils::bitCount(func->_saveRestoreRegs.get(kX86RegClassXyz)) * 16; - if (func->hasFuncFlag(kX86FuncFlagPushPop)) { - func->_pushPopStackSize = memGpSize; - func->_moveStackSize = memXmmSize + IntUtil::alignTo(memMmSize, 16); - } - else { - func->_pushPopStackSize = 0; - func->_moveStackSize = memXmmSize + IntUtil::alignTo(memMmSize + memGpSize, 16); - } + func->_pushPopStackSize = memGpSize; + func->_moveStackSize = memXmmSize + Utils::alignTo(memMmSize, 16); } // Setup adjusted stack size. @@ -4741,12 +4963,12 @@ static Error X86Context_initFunc(X86Context* self, X86FuncNode* func) { v += func->getPushPopStackSize(); // Calculate the final offset to keep stack alignment. - func->_alignStackSize = IntUtil::deltaTo(v, func->getRequiredStackAlignment()); + func->_alignStackSize = Utils::alignDiff(v, func->getRequiredStackAlignment()); } // Memory stack size. func->_memStackSize = self->_memAllTotal; - func->_alignedMemStackSize = IntUtil::alignTo(func->_memStackSize, func->_requiredStackAlignment); + func->_alignedMemStackSize = Utils::alignTo(func->_memStackSize, func->_requiredStackAlignment); if (func->isNaked()) { self->_argBaseReg = kX86RegIndexSp; @@ -4794,13 +5016,13 @@ static Error X86Context_initFunc(X86Context* self, X86FuncNode* func) { } //! \internal -static Error X86Context_patchFuncMem(X86Context* self, X86FuncNode* func, Node* stop) { +static Error X86Context_patchFuncMem(X86Context* self, X86FuncNode* func, HLNode* stop) { X86Compiler* compiler = self->getCompiler(); - Node* node = func; + HLNode* node = func; do { - if (node->getType() == kNodeTypeInst) { - InstNode* iNode = static_cast(node); + if (node->getType() == kHLNodeTypeInst) { + HLInst* iNode = static_cast(node); if (iNode->hasMemOp()) { X86Mem* m = iNode->getMemOp(); @@ -4814,7 +5036,7 @@ static Error X86Context_patchFuncMem(X86Context* self, X86FuncNode* func, Node* m->_vmem.displacement += self->_argBaseOffset + vd->getMemOffset(); } else { - MemCell* cell = vd->getMemCell(); + VarCell* cell = vd->getMemCell(); ASMJIT_ASSERT(cell != NULL); m->_vmem.base = self->_varBaseReg; @@ -4885,9 +5107,8 @@ static Error X86Context_translatePrologEpilog(X86Context* self, X86FuncNode* fun fpReg.setIndex(func->getStackFrameRegIndex()); fpOffset = x86::ptr(self->_zsp, self->_varBaseOffset + static_cast(self->_stackFrameCell->getOffset())); - earlyPushPop = func->hasFuncFlag(kX86FuncFlagPushPop); - if (earlyPushPop) - self->emitPushSequence(regsGp); + earlyPushPop = true; + self->emitPushSequence(regsGp); if (func->isStackFrameRegPreserved()) compiler->emit(kX86InstIdPush, fpReg); @@ -4900,7 +5121,7 @@ static Error X86Context_translatePrologEpilog(X86Context* self, X86FuncNode* fun compiler->emit(kX86InstIdMov, fpReg, self->_zsp); } - if (func->hasFuncFlag(kX86FuncFlagPushPop) && !earlyPushPop) { + if (!earlyPushPop) { self->emitPushSequence(regsGp); if (func->isStackMisaligned() && regsGp != 0) useLeaEpilog = true; @@ -4939,20 +5160,11 @@ static Error X86Context_translatePrologEpilog(X86Context* self, X86FuncNode* fun } } - if (!func->hasFuncFlag(kX86FuncFlagPushPop)) { - for (i = 0, mask = regsGp; mask != 0; i++, mask >>= 1) { - if (mask & 0x1) { - compiler->emit(kX86InstIdMov, x86::ptr(self->_zsp, stackPtr), gpReg.setIndex(i)); - stackPtr += regSize; - } - } - } - // -------------------------------------------------------------------------- // [Move-Args] // -------------------------------------------------------------------------- - if (func->hasFuncFlag(kX86FuncFlagMoveArgs)) { + if (func->hasFuncFlag(kFuncFlagX86MoveArgs)) { uint32_t argStackPos = 0; uint32_t argStackSize = decl->getArgStackSize(); @@ -4980,7 +5192,7 @@ static Error X86Context_translatePrologEpilog(X86Context* self, X86FuncNode* fun X86Mem mDst = x86::ptr(self->_zsp, dDst); while (moveIndex < moveCount) { - uint32_t numMovs = IntUtil::iMin(moveCount - moveIndex, numRegs); + uint32_t numMovs = Utils::iMin(moveCount - moveIndex, numRegs); for (i = 0; i < numMovs; i++) compiler->emit(kX86InstIdMov, r[i], mSrc.adjusted((moveIndex + i) * regSize)); @@ -5014,15 +5226,6 @@ static Error X86Context_translatePrologEpilog(X86Context* self, X86FuncNode* fun } } - if (!func->hasFuncFlag(kX86FuncFlagPushPop)) { - for (i = 0, mask = regsGp; mask != 0; i++, mask >>= 1) { - if (mask & 0x1) { - compiler->emit(kX86InstIdMov, gpReg.setIndex(i), x86::ptr(self->_zsp, stackPtr)); - stackPtr += regSize; - } - } - } - // Adjust stack. if (useLeaEpilog) { compiler->emit(kX86InstIdLea, self->_zsp, x86::ptr(fpReg, -static_cast(func->getPushPopStackSize()))); @@ -5033,19 +5236,19 @@ static Error X86Context_translatePrologEpilog(X86Context* self, X86FuncNode* fun } // Restore Gp (Push/Pop). - if (func->hasFuncFlag(kX86FuncFlagPushPop) && !earlyPushPop) + if (!earlyPushPop) self->emitPopSequence(regsGp); // Emms. - if (func->hasFuncFlag(kX86FuncFlagEmms)) + if (func->hasFuncFlag(kFuncFlagX86Emms)) compiler->emit(kX86InstIdEmms); // MFence/SFence/LFence. - if (func->hasFuncFlag(kX86FuncFlagSFence) & func->hasFuncFlag(kX86FuncFlagLFence)) + if (func->hasFuncFlag(kFuncFlagX86SFence) & func->hasFuncFlag(kFuncFlagX86LFence)) compiler->emit(kX86InstIdMfence); - else if (func->hasFuncFlag(kX86FuncFlagSFence)) + else if (func->hasFuncFlag(kFuncFlagX86SFence)) compiler->emit(kX86InstIdSfence); - else if (func->hasFuncFlag(kX86FuncFlagLFence)) + else if (func->hasFuncFlag(kFuncFlagX86LFence)) compiler->emit(kX86InstIdLfence); // Leave. @@ -5064,7 +5267,7 @@ static Error X86Context_translatePrologEpilog(X86Context* self, X86FuncNode* fun if (useLeaEpilog) { compiler->emit(kX86InstIdPop, fpReg); } - else if (func->hasFuncFlag(kX86FuncFlagLeave)) { + else if (func->hasFuncFlag(kFuncFlagX86Leave)) { compiler->emit(kX86InstIdLeave); } else { @@ -5087,9 +5290,9 @@ static Error X86Context_translatePrologEpilog(X86Context* self, X86FuncNode* fun // ============================================================================ //! \internal -static void X86Context_translateJump(X86Context* self, JumpNode* jNode, TargetNode* jTarget) { +static void X86Context_translateJump(X86Context* self, HLJump* jNode, HLLabel* jTarget) { X86Compiler* compiler = self->getCompiler(); - Node* extNode = self->getExtraBlock(); + HLNode* extNode = self->getExtraBlock(); compiler->_setCursor(extNode); self->switchState(jTarget->getState()); @@ -5097,7 +5300,8 @@ static void X86Context_translateJump(X86Context* self, JumpNode* jNode, TargetNo // If one or more instruction has been added during switchState() it will be // moved at the end of the function body. if (compiler->getCursor() != extNode) { - TargetNode* jTrampolineTarget = compiler->newTarget(); + // TODO: Can fail. + HLLabel* jTrampolineTarget = compiler->newLabelNode(); // Add the jump to the target. compiler->jmp(jTarget->getLabel()); @@ -5121,9 +5325,9 @@ static void X86Context_translateJump(X86Context* self, JumpNode* jNode, TargetNo // [asmjit::X86Context - Translate - Ret] // ============================================================================ -static Error X86Context_translateRet(X86Context* self, RetNode* rNode, TargetNode* exitTarget) { +static Error X86Context_translateRet(X86Context* self, HLRet* rNode, HLLabel* exitTarget) { X86Compiler* compiler = self->getCompiler(); - Node* node = rNode->getNext(); + HLNode* node = rNode->getNext(); // 32-bit mode requires to push floating point return value(s), handle it // here as it's a special case. @@ -5134,7 +5338,7 @@ static Error X86Context_translateRet(X86Context* self, RetNode* rNode, TargetNod for (uint32_t i = 0; i < vaCount; i++) { VarAttr& va = vaList[i]; - if (va.hasFlag(kX86VarAttrFld4 | kX86VarAttrFld8)) { + if (va.hasFlag(kVarAttrX86Fld4 | kVarAttrX86Fld8)) { VarData* vd = va.getVd(); X86Mem m(self->getVarMem(vd)); @@ -5142,7 +5346,7 @@ static Error X86Context_translateRet(X86Context* self, RetNode* rNode, TargetNod m.setSize( (flags & kVarFlagSp) ? 4 : (flags & kVarFlagDp) ? 8 : - va.hasFlag(kX86VarAttrFld4) ? 4 : 8); + va.hasFlag(kVarAttrX86Fld4) ? 4 : 8); compiler->fld(m); } @@ -5154,29 +5358,29 @@ static Error X86Context_translateRet(X86Context* self, RetNode* rNode, TargetNod switch (node->getType()) { // If we have found an exit label we just return, there is no need to // emit jump to that. - case kNodeTypeTarget: - if (static_cast(node) == exitTarget) + case kHLNodeTypeLabel: + if (static_cast(node) == exitTarget) return kErrorOk; goto _EmitRet; - case kNodeTypeEmbed: - case kNodeTypeInst: - case kNodeTypeCall: - case kNodeTypeRet: + case kHLNodeTypeData: + case kHLNodeTypeInst: + case kHLNodeTypeCall: + case kHLNodeTypeRet: goto _EmitRet; // Continue iterating. - case kNodeTypeComment: - case kNodeTypeAlign: - case kNodeTypeHint: + case kHLNodeTypeComment: + case kHLNodeTypeAlign: + case kHLNodeTypeHint: break; // Invalid node to be here. - case kNodeTypeFunc: - return self->getCompiler()->setError(kErrorInvalidState); + case kHLNodeTypeFunc: + return self->getCompiler()->setLastError(kErrorInvalidState); // We can't go forward from here. - case kNodeTypeEnd: + case kHLNodeTypeSentinel: return kErrorOk; } @@ -5196,7 +5400,7 @@ _EmitRet: // ============================================================================ Error X86Context::translate() { - ASMJIT_TLOG("[Translate] === Begin ===\n"); + ASMJIT_TLOG("[T] ======= Translate (Begin)\n"); X86Compiler* compiler = getCompiler(); X86FuncNode* func = getFunc(); @@ -5206,17 +5410,17 @@ Error X86Context::translate() { X86CallAlloc cAlloc(this); // Flow. - Node* node_ = func; - Node* next = NULL; - Node* stop = getStop(); + HLNode* node_ = func; + HLNode* next = NULL; + HLNode* stop = getStop(); - PodList::Link* jLink = _jccList.getFirst(); + PodList::Link* jLink = _jccList.getFirst(); for (;;) { while (node_->isTranslated()) { // Switch state if we went to the already translated node. - if (node_->getType() == kNodeTypeTarget) { - TargetNode* node = static_cast(node_); + if (node_->getType() == kHLNodeTypeLabel) { + HLLabel* node = static_cast(node_); compiler->_setCursor(node->getPrev()); switchState(node->getState()); } @@ -5229,13 +5433,13 @@ _NextGroup: node_ = jLink->getValue(); jLink = jLink->getNext(); - Node* jFlow = X86Context_getOppositeJccFlow(static_cast(node_)); + HLNode* jFlow = X86Context_getOppositeJccFlow(static_cast(node_)); loadState(node_->getState()); if (jFlow->getState()) { X86Context_translateJump(this, - static_cast(node_), - static_cast(jFlow)); + static_cast(node_), + static_cast(jFlow)); node_ = jFlow; if (node_->isTranslated()) @@ -5250,10 +5454,10 @@ _NextGroup: } next = node_->getNext(); - node_->orFlags(kNodeFlagIsTranslated); + node_->orFlags(kHLNodeFlagIsTranslated); ASMJIT_TSEC({ - X86Context_traceNode(this, node_); + X86Context_traceNode(this, node_, "[T] "); }); switch (node_->getType()) { @@ -5261,16 +5465,16 @@ _NextGroup: // [Align / Embed] // ---------------------------------------------------------------------- - case kNodeTypeAlign: - case kNodeTypeEmbed: + case kHLNodeTypeAlign: + case kHLNodeTypeData: break; // ---------------------------------------------------------------------- // [Target] // ---------------------------------------------------------------------- - case kNodeTypeTarget: { - TargetNode* node = static_cast(node_); + case kHLNodeTypeLabel: { + HLLabel* node = static_cast(node_); ASMJIT_ASSERT(!node->hasState()); node->setState(saveState()); break; @@ -5280,15 +5484,15 @@ _NextGroup: // [Inst/Call/SArg/Ret] // ---------------------------------------------------------------------- - case kNodeTypeInst: - case kNodeTypeCall: - case kNodeTypeSArg: + case kHLNodeTypeInst: + case kHLNodeTypeCall: + case kHLNodeTypeCallArg: // Update VarAttr's unuse flags based on liveness of the next node. if (!node_->isJcc()) { X86VarMap* map = static_cast(node_->getMap()); - VarBits* liveness = next->getLiveness(); + BitArray* liveness; - if (map != NULL && liveness != NULL) { + if (map != NULL && next != NULL && (liveness = next->getLiveness()) != NULL) { VarAttr* vaList = map->getVaList(); uint32_t vaCount = map->getVaCount(); @@ -5296,26 +5500,26 @@ _NextGroup: VarAttr* va = &vaList[i]; VarData* vd = va->getVd(); - if (!liveness->getBit(vd->getContextId())) + if (!liveness->getBit(vd->getLocalId())) va->orFlags(kVarAttrUnuse); } } } - if (node_->getType() == kNodeTypeCall) { + if (node_->getType() == kHLNodeTypeCall) { ASMJIT_PROPAGATE_ERROR(cAlloc.run(static_cast(node_))); break; } // ... Fall through ... - case kNodeTypeHint: - case kNodeTypeRet: { + case kHLNodeTypeHint: + case kHLNodeTypeRet: { ASMJIT_PROPAGATE_ERROR(vAlloc.run(node_)); // Handle conditional/unconditional jump. if (node_->isJmpOrJcc()) { - JumpNode* node = static_cast(node_); - TargetNode* jTarget = node->getTarget(); + HLJump* node = static_cast(node_); + HLLabel* jTarget = node->getTarget(); // Target not followed. if (jTarget == NULL) { @@ -5337,11 +5541,11 @@ _NextGroup: } } else { - Node* jNext = node->getNext(); + HLNode* jNext = node->getNext(); if (jTarget->isTranslated()) { if (jNext->isTranslated()) { - ASMJIT_ASSERT(jNext->getType() == kNodeTypeTarget); + ASMJIT_ASSERT(jNext->getType() == kHLNodeTypeLabel); compiler->_setCursor(node->getPrev()); intersectStates(jTarget->getState(), jNext->getState()); } @@ -5353,13 +5557,13 @@ _NextGroup: next = jNext; } else if (jNext->isTranslated()) { - ASMJIT_ASSERT(jNext->getType() == kNodeTypeTarget); + ASMJIT_ASSERT(jNext->getType() == kHLNodeTypeLabel); VarState* savedState = saveState(); node->setState(savedState); compiler->_setCursor(node); - switchState(static_cast(jNext)->getState()); + switchState(static_cast(jNext)->getState()); next = jTarget; } else { @@ -5370,7 +5574,7 @@ _NextGroup: } else if (node_->isRet()) { ASMJIT_PROPAGATE_ERROR( - X86Context_translateRet(this, static_cast(node_), func->getExitNode())); + X86Context_translateRet(this, static_cast(node_), func->getExitNode())); } break; } @@ -5379,7 +5583,7 @@ _NextGroup: // [Func] // ---------------------------------------------------------------------- - case kNodeTypeFunc: { + case kHLNodeTypeFunc: { ASMJIT_ASSERT(node_ == func); X86FuncDecl* decl = func->getDecl(); @@ -5387,12 +5591,15 @@ _NextGroup: if (map != NULL) { uint32_t i; - uint32_t argCount = func->_x86Decl.getArgCount(); + uint32_t argCount = func->_x86Decl.getNumArgs(); for (i = 0; i < argCount; i++) { const FuncInOut& arg = decl->getArg(i); VarData* vd = func->getArg(i); + if (vd == NULL) + continue; + VarAttr* va = map->findVa(vd); ASMJIT_ASSERT(va != NULL); @@ -5400,14 +5607,14 @@ _NextGroup: continue; uint32_t regIndex = va->getOutRegIndex(); - if (regIndex != kInvalidReg && (va->getFlags() & kVarAttrOutConv) == 0) { + if (regIndex != kInvalidReg && (va->getFlags() & kVarAttrWConv) == 0) { switch (vd->getClass()) { case kX86RegClassGp : attach(vd, regIndex, true); break; case kX86RegClassMm : attach(vd, regIndex, true); break; case kX86RegClassXyz: attach(vd, regIndex, true); break; } } - else if (va->hasFlag(kVarAttrOutConv)) { + else if (va->hasFlag(kVarAttrWConv)) { // TODO: [COMPILER] Function Argument Conversion. ASMJIT_ASSERT(!"Reached"); } @@ -5425,7 +5632,7 @@ _NextGroup: // [End] // ---------------------------------------------------------------------- - case kNodeTypeEnd: { + case kHLNodeTypeSentinel: { goto _NextGroup; } @@ -5443,7 +5650,7 @@ _Done: ASMJIT_PROPAGATE_ERROR(X86Context_patchFuncMem(this, func, stop)); ASMJIT_PROPAGATE_ERROR(X86Context_translatePrologEpilog(this, func)); - ASMJIT_TLOG("[Translate] === Done ===\n\n"); + ASMJIT_TLOG("[T] ======= Translate (End)\n"); return kErrorOk; } @@ -5456,10 +5663,10 @@ Error X86Context::schedule() { X86Scheduler scheduler(compiler, static_cast(compiler->getRuntime()->getCpuInfo())); - Node* node_ = getFunc(); - Node* stop = getStop(); + HLNode* node_ = getFunc(); + HLNode* stop = getStop(); - PodList::Link* jLink = _jccList.getFirst(); + PodList::Link* jLink = _jccList.getFirst(); // -------------------------------------------------------------------------- // [Loop] @@ -5473,29 +5680,29 @@ _NextGroup: // We always go to the next instruction in the main loop so we have to // jump to the `jcc` target here. - node_ = static_cast(jLink->getValue())->getTarget(); + node_ = static_cast(jLink->getValue())->getTarget(); jLink = jLink->getNext(); } // Find interval that can be passed to scheduler. for (;;) { - Node* schedStart = node_; + HLNode* schedStart = node_; for (;;) { - Node* next = node_->getNext(); - node_->orFlags(kNodeFlagIsScheduled); + HLNode* next = node_->getNext(); + node_->orFlags(kHLNodeFlagIsScheduled); // Shouldn't happen here, investigate if hit. ASMJIT_ASSERT(node_ != stop); uint32_t nodeType = node_->getType(); - if (nodeType != kNodeTypeInst) { + if (nodeType != kHLNodeTypeInst) { // If we didn't reach any instruction node we simply advance. In this // case no informative nodes will be removed and everything else just // skipped. if (schedStart == node_) { node_ = next; - if (nodeType == kNodeTypeEnd || nodeType == kNodeTypeRet) + if (nodeType == kHLNodeTypeSentinel || nodeType == kHLNodeTypeRet) goto _NextGroup; else goto _Advance; @@ -5533,7 +5740,7 @@ _NextGroup: // If node is `jmp` we follow it as well. if (node_->isJmp()) { - node_ = static_cast(node_)->getTarget(); + node_ = static_cast(node_)->getTarget(); if (node_ == NULL) goto _NextGroup; else @@ -5543,7 +5750,7 @@ _NextGroup: // Handle stop nodes. { uint32_t nodeType = node_->getType(); - if (nodeType == kNodeTypeEnd || nodeType == kNodeTypeRet) + if (nodeType == kHLNodeTypeSentinel || nodeType == kHLNodeTypeRet) goto _NextGroup; } @@ -5560,8 +5767,8 @@ _Done: // ============================================================================ template -static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler* assembler, Node* start, Node* stop) { - Node* node_ = start; +static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler* assembler, HLNode* start, HLNode* stop) { + HLNode* node_ = start; StringBuilder& sb = self->_stringBuilder; #if !defined(ASMJIT_DISABLE_LOGGER) @@ -5578,10 +5785,6 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler* } #endif // !ASMJIT_DISABLE_LOGGER - // Create labels on Assembler side. - ASMJIT_PROPAGATE_ERROR( - assembler->_registerIndexedLabels(self->getCompiler()->_targetList.getLength())); - do { #if !defined(ASMJIT_DISABLE_LOGGER) if (LoggingEnabled) { @@ -5598,7 +5801,7 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler* sb.appendChars(' ', vdCount); if (node_->hasLiveness()) { - VarBits* liveness = node_->getLiveness(); + BitArray* liveness = node_->getLiveness(); X86VarMap* map = static_cast(node_->getMap()); uint32_t i; @@ -5617,14 +5820,14 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler* uint32_t flags = va->getFlags(); char c = 'u'; - if ( (flags & kVarAttrInAll) && !(flags & kVarAttrOutAll)) c = 'r'; - if (!(flags & kVarAttrInAll) && (flags & kVarAttrOutAll)) c = 'w'; - if ( (flags & kVarAttrInAll) && (flags & kVarAttrOutAll)) c = 'x'; + if ( (flags & kVarAttrRAll) && !(flags & kVarAttrWAll)) c = 'r'; + if (!(flags & kVarAttrRAll) && (flags & kVarAttrWAll)) c = 'w'; + if ( (flags & kVarAttrRAll) && (flags & kVarAttrWAll)) c = 'x'; if ((flags & kVarAttrUnuse)) c -= 'a' - 'A'; - sb.getData()[offset + vd->getContextId()] = c; + sb.getData()[offset + vd->getLocalId()] = c; } } } @@ -5634,44 +5837,40 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler* #endif // !ASMJIT_DISABLE_LOGGER switch (node_->getType()) { - case kNodeTypeAlign: { - AlignNode* node = static_cast(node_); - assembler->align(node->getMode(), node->getOffset()); + case kHLNodeTypeAlign: { + HLAlign* node = static_cast(node_); + assembler->align(node->getAlignMode(), node->getOffset()); break; } - case kNodeTypeEmbed: { - EmbedNode* node = static_cast(node_); + case kHLNodeTypeData: { + HLData* node = static_cast(node_); assembler->embed(node->getData(), node->getSize()); break; } - case kNodeTypeComment: { - CommentNode* node = static_cast(node_); - + case kHLNodeTypeComment: { #if !defined(ASMJIT_DISABLE_LOGGER) + HLComment* node = static_cast(node_); if (LoggingEnabled) logger->logFormat(kLoggerStyleComment, "%s; %s\n", logger->getIndentation(), node->getComment()); #endif // !ASMJIT_DISABLE_LOGGER - break; } - case kNodeTypeHint: { + case kHLNodeTypeHint: { break; } - case kNodeTypeTarget: { - TargetNode* node = static_cast(node_); - - node->setOffset(assembler->getOffset()); + case kHLNodeTypeLabel: { + HLLabel* node = static_cast(node_); assembler->bind(node->getLabel()); break; } - case kNodeTypeInst: { - InstNode* node = static_cast(node_); + case kHLNodeTypeInst: { + HLInst* node = static_cast(node_); uint32_t instId = node->getInstId(); uint32_t opCount = node->getOpCount(); @@ -5829,15 +6028,15 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler* // Function scope and return is translated to another nodes, no special // handling is required at this point. - case kNodeTypeFunc: - case kNodeTypeEnd: - case kNodeTypeRet: { + case kHLNodeTypeFunc: + case kHLNodeTypeSentinel: + case kHLNodeTypeRet: { break; } // Function call adds nodes before and after, but it's required to emit // the call instruction by itself. - case kNodeTypeCall: { + case kHLNodeTypeCall: { X86CallNode* node = static_cast(node_); assembler->emit(kX86InstIdCall, node->_target, noOperand, noOperand); break; @@ -5853,7 +6052,7 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler* return kErrorOk; } -Error X86Context::serialize(Assembler* assembler, Node* start, Node* stop) { +Error X86Context::serialize(Assembler* assembler, HLNode* start, HLNode* stop) { #if !defined(ASMJIT_DISABLE_LOGGER) if (assembler->hasLogger()) return X86Context_serialize<1>(this, static_cast(assembler), start, stop); diff --git a/src/asmjit/x86/x86context_p.h b/src/asmjit/x86/x86compilercontext_p.h similarity index 64% rename from src/asmjit/x86/x86context_p.h rename to src/asmjit/x86/x86compilercontext_p.h index 1a818dc..1563ae9 100644 --- a/src/asmjit/x86/x86context_p.h +++ b/src/asmjit/x86/x86compilercontext_p.h @@ -5,16 +5,16 @@ // Zlib - See LICENSE.md file in the package. // [Guard] -#ifndef _ASMJIT_X86_X86CONTEXT_P_H -#define _ASMJIT_X86_X86CONTEXT_P_H +#ifndef _ASMJIT_X86_X86COMPILERCONTEXT_P_H +#define _ASMJIT_X86_X86COMPILERCONTEXT_P_H #include "../build.h" #if !defined(ASMJIT_DISABLE_COMPILER) // [Dependencies - AsmJit] #include "../base/compiler.h" -#include "../base/context_p.h" -#include "../base/intutil.h" +#include "../base/compilercontext_p.h" +#include "../base/utils.h" #include "../x86/x86assembler.h" #include "../x86/x86compiler.h" @@ -23,9 +23,228 @@ namespace asmjit { -//! \addtogroup asmjit_x86_compiler +//! \addtogroup asmjit_x86 //! \{ +// ============================================================================ +// [asmjit::X86VarMap] +// ============================================================================ + +struct X86VarMap : public VarMap { + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get variable-attributes list as VarAttr data. + ASMJIT_INLINE VarAttr* getVaList() const { + return const_cast(_list); + } + + //! Get variable-attributes list as VarAttr data (by class). + ASMJIT_INLINE VarAttr* getVaListByClass(uint32_t rc) const { + return const_cast(_list) + _start.get(rc); + } + + //! Get position of variables (by class). + ASMJIT_INLINE uint32_t getVaStart(uint32_t rc) const { + return _start.get(rc); + } + + //! Get count of variables (by class). + ASMJIT_INLINE uint32_t getVaCountByClass(uint32_t rc) const { + return _count.get(rc); + } + + //! Get VarAttr at `index`. + ASMJIT_INLINE VarAttr* getVa(uint32_t index) const { + ASMJIT_ASSERT(index < _vaCount); + return getVaList() + index; + } + + //! Get VarAttr of `c` class at `index`. + ASMJIT_INLINE VarAttr* getVaByClass(uint32_t rc, uint32_t index) const { + ASMJIT_ASSERT(index < _count._regs[rc]); + return getVaListByClass(rc) + index; + } + + // -------------------------------------------------------------------------- + // [Utils] + // -------------------------------------------------------------------------- + + //! Find VarAttr. + ASMJIT_INLINE VarAttr* findVa(VarData* vd) const { + VarAttr* list = getVaList(); + uint32_t count = getVaCount(); + + for (uint32_t i = 0; i < count; i++) + if (list[i].getVd() == vd) + return &list[i]; + + return NULL; + } + + //! Find VarAttr (by class). + ASMJIT_INLINE VarAttr* findVaByClass(uint32_t rc, VarData* vd) const { + VarAttr* list = getVaListByClass(rc); + uint32_t count = getVaCountByClass(rc); + + for (uint32_t i = 0; i < count; i++) + if (list[i].getVd() == vd) + return &list[i]; + + return NULL; + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Special registers on input. + //! + //! Special register(s) restricted to one or more physical register. If there + //! is more than one special register it means that we have to duplicate the + //! variable content to all of them (it means that the same varible was used + //! by two or more operands). We forget about duplicates after the register + //! allocation finishes and marks all duplicates as non-assigned. + X86RegMask _inRegs; + + //! Special registers on output. + //! + //! Special register(s) used on output. Each variable can have only one + //! special register on the output, 'X86VarMap' contains all registers from + //! all 'VarAttr's. + X86RegMask _outRegs; + + //! Clobbered registers (by a function call). + X86RegMask _clobberedRegs; + + //! Start indexes of variables per register class. + X86RegCount _start; + //! Count of variables per register class. + X86RegCount _count; + + //! VarAttr list. + VarAttr _list[1]; +}; + +// ============================================================================ +// [asmjit::X86StateCell] +// ============================================================================ + +//! X86/X64 state-cell. +union X86StateCell { + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE uint32_t getState() const { + return _state; + } + + ASMJIT_INLINE void setState(uint32_t state) { + _state = static_cast(state); + } + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void reset() { _packed = 0; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + uint8_t _packed; + + struct { + uint8_t _state : 2; + uint8_t _unused : 6; + }; +}; + +// ============================================================================ +// [asmjit::X86VarState] +// ============================================================================ + +//! X86/X64 state. +struct X86VarState : VarState { + enum { + //! Base index of Gp registers. + kGpIndex = 0, + //! Count of Gp registers. + kGpCount = 16, + + //! Base index of Mm registers. + kMmIndex = kGpIndex + kGpCount, + //! Count of Mm registers. + kMmCount = 8, + + //! Base index of Xmm registers. + kXmmIndex = kMmIndex + kMmCount, + //! Count of Xmm registers. + kXmmCount = 16, + + //! Count of all registers in `X86VarState`. + kAllCount = kXmmIndex + kXmmCount + }; + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE VarData** getList() { + return _list; + } + + ASMJIT_INLINE VarData** getListByClass(uint32_t rc) { + switch (rc) { + case kX86RegClassGp : return _listGp; + case kX86RegClassMm : return _listMm; + case kX86RegClassXyz: return _listXmm; + + default: + return NULL; + } + } + + // -------------------------------------------------------------------------- + // [Clear] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void reset(size_t numCells) { + ::memset(this, 0, kAllCount * sizeof(VarData*) + + 2 * sizeof(X86RegMask) + + numCells * sizeof(X86StateCell)); + } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + union { + //! List of all allocated variables in one array. + VarData* _list[kAllCount]; + + struct { + //! Allocated Gp registers. + VarData* _listGp[kGpCount]; + //! Allocated Mm registers. + VarData* _listMm[kMmCount]; + //! Allocated Xmm registers. + VarData* _listXmm[kXmmCount]; + }; + }; + + //! Occupied registers (mask). + X86RegMask _occupied; + //! Modified registers (mask). + X86RegMask _modified; + + //! Variables data, the length is stored in `X86Context`. + X86StateCell _cells[1]; +}; + // ============================================================================ // [asmjit::X86Context] // ============================================================================ @@ -38,11 +257,13 @@ namespace asmjit { //! \internal //! -//! Compiler context is used by `X86Compiler`. +//! Compiler context, used by `X86Compiler`. //! -//! Compiler context is used during compilation and normally developer doesn't -//! need access to it. The context is user per function (it's reset after each -//! function is generated). +//! Compiler context takes care of generating function prolog and epilog, and +//! also performs register allocation. It's used during the compilation phase +//! and considered an implementation detail and asmjit consumers don't have +//! access to it. The context is used once per function and it's reset after +//! the function is processed. struct X86Context : public Context { ASMJIT_NO_COPY(X86Context) @@ -65,28 +286,19 @@ struct X86Context : public Context { // [Arch] // -------------------------------------------------------------------------- - ASMJIT_INLINE bool isX64() const { - return _zsp.getSize() == 16; - } + ASMJIT_INLINE bool isX64() const { return _zsp.getSize() == 16; } + ASMJIT_INLINE uint32_t getRegSize() const { return _zsp.getSize(); } // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get compiler as `X86Compiler`. - ASMJIT_INLINE X86Compiler* getCompiler() const { - return static_cast(_compiler); - } - + ASMJIT_INLINE X86Compiler* getCompiler() const { return static_cast(_compiler); } //! Get function as `X86FuncNode`. - ASMJIT_INLINE X86FuncNode* getFunc() const { - return reinterpret_cast(_func); - } - + ASMJIT_INLINE X86FuncNode* getFunc() const { return reinterpret_cast(_func); } //! Get clobbered registers (global). - ASMJIT_INLINE uint32_t getClobberedRegs(uint32_t c) { - return _clobberedRegs.get(c); - } + ASMJIT_INLINE uint32_t getClobberedRegs(uint32_t rc) { return _clobberedRegs.get(rc); } // -------------------------------------------------------------------------- // [Helpers] @@ -94,7 +306,7 @@ struct X86Context : public Context { ASMJIT_INLINE X86VarMap* newVarMap(uint32_t vaCount) { return static_cast( - _baseZone.alloc(sizeof(X86VarMap) + vaCount * sizeof(VarAttr))); + _zoneAllocator.alloc(sizeof(X86VarMap) + vaCount * sizeof(VarAttr))); } // -------------------------------------------------------------------------- @@ -121,10 +333,6 @@ struct X86Context : public Context { void _checkState(); - ASMJIT_INLINE uint32_t getRegSize() const { - return _zsp.getSize(); - } - // -------------------------------------------------------------------------- // [Attach / Detach] // -------------------------------------------------------------------------- @@ -142,12 +350,12 @@ struct X86Context : public Context { // Prevent Esp allocation if C==Gp. ASMJIT_ASSERT(C != kX86RegClassGp || regIndex != kX86RegIndexSp); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regMask = Utils::mask(regIndex); vd->setState(kVarStateReg); + vd->setModified(modified); vd->setRegIndex(regIndex); vd->addHomeIndex(regIndex); - vd->setModified(modified); _x86State.getListByClass(C)[regIndex] = vd; _x86State._occupied.or_(C, regMask); @@ -167,7 +375,7 @@ struct X86Context : public Context { ASMJIT_ASSERT(vd->getRegIndex() == regIndex); ASMJIT_ASSERT(vState != kVarStateReg); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regMask = Utils::mask(regIndex); vd->setState(vState); vd->resetRegIndex(); @@ -188,13 +396,13 @@ struct X86Context : public Context { //! //! Change the register of the 'VarData' changing also the current 'X86VarState'. //! Rebase is nearly identical to 'Detach' and 'Attach' sequence, but doesn't - // change the 'VarData' modified flag. + //! change the `VarData`s modified flag. template ASMJIT_INLINE void rebase(VarData* vd, uint32_t newRegIndex, uint32_t oldRegIndex) { ASMJIT_ASSERT(vd->getClass() == C); - uint32_t newRegMask = IntUtil::mask(newRegIndex); - uint32_t oldRegMask = IntUtil::mask(oldRegIndex); + uint32_t newRegMask = Utils::mask(newRegIndex); + uint32_t oldRegMask = Utils::mask(oldRegIndex); uint32_t bothRegMask = newRegMask ^ oldRegMask; vd->setRegIndex(newRegIndex); @@ -239,7 +447,7 @@ struct X86Context : public Context { ASMJIT_ASSERT(vd->getRegIndex() != kInvalidReg); uint32_t regIndex = vd->getRegIndex(); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regMask = Utils::mask(regIndex); emitSave(vd, regIndex, "Save"); @@ -315,7 +523,7 @@ struct X86Context : public Context { uint32_t oldRegIndex = vd->getRegIndex(); uint32_t oldState = vd->getState(); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regMask = Utils::mask(regIndex); ASMJIT_ASSERT(_x86State.getListByClass(C)[regIndex] == NULL || regIndex == oldRegIndex); @@ -328,7 +536,7 @@ struct X86Context : public Context { emitMove(vd, regIndex, oldRegIndex, "Alloc"); _x86State.getListByClass(C)[oldRegIndex] = NULL; - regMask ^= IntUtil::mask(oldRegIndex); + regMask ^= Utils::mask(oldRegIndex); } else { ASMJIT_X86_CHECK_STATE @@ -337,6 +545,7 @@ struct X86Context : public Context { vd->setState(kVarStateReg); vd->setRegIndex(regIndex); + vd->addHomeIndex(regIndex); _x86State.getListByClass(C)[regIndex] = vd; _x86State._occupied.xor_(C, regMask); @@ -378,7 +587,7 @@ struct X86Context : public Context { ASMJIT_ASSERT(vd->getClass() == C); uint32_t regIndex = vd->getRegIndex(); - uint32_t regMask = IntUtil::mask(regIndex); + uint32_t regMask = Utils::mask(regIndex); vd->setModified(true); _x86State._modified.or_(C, regMask); @@ -463,7 +672,7 @@ struct X86Context : public Context { // [Serialize] // -------------------------------------------------------------------------- - virtual Error serialize(Assembler* assembler, Node* start, Node* stop); + virtual Error serialize(Assembler* assembler, HLNode* start, HLNode* stop); // -------------------------------------------------------------------------- // [Members] @@ -485,7 +694,7 @@ struct X86Context : public Context { //! Memory cell where is stored address used to restore manually //! aligned stack. - MemCell* _stackFrameCell; + VarCell* _stackFrameCell; //! Global allocable registers mask. uint32_t _gaRegs[kX86RegClassCount]; @@ -508,7 +717,7 @@ struct X86Context : public Context { int32_t _varActualDisp; //! Temporary string builder used for logging. - StringBuilderT<256> _stringBuilder; + StringBuilderTmp<256> _stringBuilder; }; //! \} @@ -520,4 +729,4 @@ struct X86Context : public Context { // [Guard] #endif // !ASMJIT_DISABLE_COMPILER -#endif // _ASMJIT_X86_X86CONTEXT_P_H +#endif // _ASMJIT_X86_X86COMPILERCONTEXT_P_H diff --git a/src/asmjit/x86/x86compilerfunc.cpp b/src/asmjit/x86/x86compilerfunc.cpp new file mode 100644 index 0000000..d47470a --- /dev/null +++ b/src/asmjit/x86/x86compilerfunc.cpp @@ -0,0 +1,551 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Export] +#define ASMJIT_EXPORTS + +// [Guard] +#include "../build.h" +#if !defined(ASMJIT_DISABLE_COMPILER) && (defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)) + +// [Dependencies - AsmJit] +#include "../x86/x86compiler.h" +#include "../x86/x86compilerfunc.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +// ============================================================================ +// [asmjit::X86FuncDecl - Helpers] +// ============================================================================ + +static ASMJIT_INLINE bool x86ArgIsInt(uint32_t aType) { + ASMJIT_ASSERT(aType < kX86VarTypeCount); + return Utils::inInterval(aType, _kVarTypeIntStart, _kVarTypeIntEnd); +} + +static ASMJIT_INLINE bool x86ArgIsFp(uint32_t aType) { + ASMJIT_ASSERT(aType < kX86VarTypeCount); + return Utils::inInterval(aType, _kVarTypeFpStart, _kVarTypeFpEnd); +} + +static ASMJIT_INLINE uint32_t x86ArgTypeToXmmType(uint32_t aType) { + if (aType == kVarTypeFp32) return kX86VarTypeXmmSs; + if (aType == kVarTypeFp64) return kX86VarTypeXmmSd; + return aType; +} + +//! Get an architecture depending on the calling convention `callConv`. +//! +//! Returns `kArchNone`, `kArchX86`, or `kArchX64`. +static ASMJIT_INLINE uint32_t x86GetArchFromCConv(uint32_t callConv) { + if (Utils::inInterval(callConv, _kCallConvX86Start, _kCallConvX86End)) return kArchX86; + if (Utils::inInterval(callConv, _kCallConvX64Start, _kCallConvX64End)) return kArchX64; + + return kArchNone; +} + +// ============================================================================ +// [asmjit::X86FuncDecl - SetPrototype] +// ============================================================================ + +#define R(_Index_) kX86RegIndex##_Index_ +static uint32_t X86FuncDecl_initConv(X86FuncDecl* self, uint32_t arch, uint32_t callConv) { + // Setup defaults. + self->_argStackSize = 0; + self->_redZoneSize = 0; + self->_spillZoneSize = 0; + + self->_callConv = static_cast(callConv); + self->_calleePopsStack = false; + self->_direction = kFuncDirRTL; + + self->_passed.reset(); + self->_preserved.reset(); + + ::memset(self->_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderGp)); + ::memset(self->_passedOrderXyz, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderXyz)); + + switch (arch) { + // ------------------------------------------------------------------------ + // [X86 Support] + // ------------------------------------------------------------------------ + +#if defined(ASMJIT_BUILD_X86) + case kArchX86: { + self->_preserved.set(kX86RegClassGp, Utils::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di))); + + switch (callConv) { + case kCallConvX86CDecl: + break; + + case kCallConvX86StdCall: + self->_calleePopsStack = true; + break; + + case kCallConvX86MsThisCall: + self->_calleePopsStack = true; + self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx))); + self->_passedOrderGp[0] = R(Cx); + break; + + case kCallConvX86MsFastCall: + self->_calleePopsStack = true; + self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Cx))); + self->_passedOrderGp[0] = R(Cx); + self->_passedOrderGp[1] = R(Dx); + break; + + case kCallConvX86BorlandFastCall: + self->_calleePopsStack = true; + self->_direction = kFuncDirLTR; + self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx), R(Cx))); + self->_passedOrderGp[0] = R(Ax); + self->_passedOrderGp[1] = R(Dx); + self->_passedOrderGp[2] = R(Cx); + break; + + case kCallConvX86GccFastCall: + self->_calleePopsStack = true; + self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Dx))); + self->_passedOrderGp[0] = R(Cx); + self->_passedOrderGp[1] = R(Dx); + break; + + case kCallConvX86GccRegParm1: + self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax))); + self->_passedOrderGp[0] = R(Ax); + break; + + case kCallConvX86GccRegParm2: + self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx))); + self->_passedOrderGp[0] = R(Ax); + self->_passedOrderGp[1] = R(Dx); + break; + + case kCallConvX86GccRegParm3: + self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx), R(Cx))); + self->_passedOrderGp[0] = R(Ax); + self->_passedOrderGp[1] = R(Dx); + self->_passedOrderGp[2] = R(Cx); + break; + + default: + return kErrorInvalidArgument; + } + + return kErrorOk; + } +#endif // ASMJIT_BUILD_X86 + + // ------------------------------------------------------------------------ + // [X64 Support] + // ------------------------------------------------------------------------ + +#if defined(ASMJIT_BUILD_X64) + case kArchX64: { + switch (callConv) { + case kCallConvX64Win: + self->_spillZoneSize = 32; + + self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Dx), 8, 9)); + self->_passedOrderGp[0] = R(Cx); + self->_passedOrderGp[1] = R(Dx); + self->_passedOrderGp[2] = 8; + self->_passedOrderGp[3] = 9; + + self->_passed.set(kX86RegClassXyz, Utils::mask(0, 1, 2, 3)); + self->_passedOrderXyz[0] = 0; + self->_passedOrderXyz[1] = 1; + self->_passedOrderXyz[2] = 2; + self->_passedOrderXyz[3] = 3; + + self->_preserved.set(kX86RegClassGp , Utils::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di), 12, 13, 14, 15)); + self->_preserved.set(kX86RegClassXyz, Utils::mask(6, 7, 8, 9, 10, 11, 12, 13, 14, 15)); + break; + + case kCallConvX64Unix: + self->_redZoneSize = 128; + + self->_passed.set(kX86RegClassGp, Utils::mask(R(Di), R(Si), R(Dx), R(Cx), 8, 9)); + self->_passedOrderGp[0] = R(Di); + self->_passedOrderGp[1] = R(Si); + self->_passedOrderGp[2] = R(Dx); + self->_passedOrderGp[3] = R(Cx); + self->_passedOrderGp[4] = 8; + self->_passedOrderGp[5] = 9; + + self->_passed.set(kX86RegClassXyz, Utils::mask(0, 1, 2, 3, 4, 5, 6, 7)); + self->_passedOrderXyz[0] = 0; + self->_passedOrderXyz[1] = 1; + self->_passedOrderXyz[2] = 2; + self->_passedOrderXyz[3] = 3; + self->_passedOrderXyz[4] = 4; + self->_passedOrderXyz[5] = 5; + self->_passedOrderXyz[6] = 6; + self->_passedOrderXyz[7] = 7; + + self->_preserved.set(kX86RegClassGp, Utils::mask(R(Bx), R(Sp), R(Bp), 12, 13, 14, 15)); + break; + + default: + return kErrorInvalidArgument; + } + + return kErrorOk; + } +#endif // ASMJIT_BUILD_X64 + + default: + return kErrorInvalidArgument; + } +} +#undef R + +static Error X86FuncDecl_initFunc(X86FuncDecl* self, uint32_t arch, + uint32_t ret, const uint32_t* args, uint32_t numArgs) { + + ASMJIT_ASSERT(numArgs <= kFuncArgCount); + + uint32_t callConv = self->_callConv; + uint32_t regSize = (arch == kArchX86) ? 4 : 8; + + int32_t i = 0; + int32_t gpPos = 0; + int32_t xmmPos = 0; + int32_t stackOffset = 0; + const uint8_t* varMapping = NULL; + +#if defined(ASMJIT_BUILD_X86) + if (arch == kArchX86) + varMapping = _x86VarMapping; +#endif // ASMJIT_BUILD_X86 + +#if defined(ASMJIT_BUILD_X64) + if (arch == kArchX64) + varMapping = _x64VarMapping; +#endif // ASMJIT_BUILD_X64 + + ASMJIT_ASSERT(varMapping != NULL); + self->_numArgs = static_cast(numArgs); + self->_retCount = 0; + + for (i = 0; i < static_cast(numArgs); i++) { + FuncInOut& arg = self->getArg(i); + arg._varType = static_cast(varMapping[args[i]]); + arg._regIndex = kInvalidReg; + arg._stackOffset = kFuncStackInvalid; + } + + for (; i < kFuncArgCount; i++) { + self->_args[i].reset(); + } + + self->_rets[0].reset(); + self->_rets[1].reset(); + self->_argStackSize = 0; + self->_used.reset(); + + if (ret != kInvalidVar) { + ret = varMapping[ret]; + switch (ret) { + case kVarTypeInt64: + case kVarTypeUInt64: + // 64-bit value is returned in EDX:EAX on x86. +#if defined(ASMJIT_BUILD_X86) + if (arch == kArchX86) { + self->_retCount = 2; + self->_rets[0]._varType = kVarTypeUInt32; + self->_rets[0]._regIndex = kX86RegIndexAx; + self->_rets[1]._varType = static_cast(ret - 2); + self->_rets[1]._regIndex = kX86RegIndexDx; + } +#endif // ASMJIT_BUILD_X86 + // ... Fall through ... + + case kVarTypeInt8: + case kVarTypeUInt8: + case kVarTypeInt16: + case kVarTypeUInt16: + case kVarTypeInt32: + case kVarTypeUInt32: + self->_retCount = 1; + self->_rets[0]._varType = static_cast(ret); + self->_rets[0]._regIndex = kX86RegIndexAx; + break; + + case kX86VarTypeMm: + self->_retCount = 1; + self->_rets[0]._varType = static_cast(ret); + self->_rets[0]._regIndex = 0; + break; + + case kVarTypeFp32: + self->_retCount = 1; + if (arch == kArchX86) { + self->_rets[0]._varType = kVarTypeFp32; + self->_rets[0]._regIndex = 0; + } + else { + self->_rets[0]._varType = kX86VarTypeXmmSs; + self->_rets[0]._regIndex = 0; + } + break; + + case kVarTypeFp64: + self->_retCount = 1; + if (arch == kArchX86) { + self->_rets[0]._varType = kVarTypeFp64; + self->_rets[0]._regIndex = 0; + } + else { + self->_rets[0]._varType = kX86VarTypeXmmSd; + self->_rets[0]._regIndex = 0; + break; + } + break; + + case kX86VarTypeXmm: + case kX86VarTypeXmmSs: + case kX86VarTypeXmmSd: + case kX86VarTypeXmmPs: + case kX86VarTypeXmmPd: + self->_retCount = 1; + self->_rets[0]._varType = static_cast(ret); + self->_rets[0]._regIndex = 0; + break; + } + } + + if (self->_numArgs == 0) + return kErrorOk; + +#if defined(ASMJIT_BUILD_X86) + if (arch == kArchX86) { + // Register arguments (Integer), always left-to-right. + for (i = 0; i != static_cast(numArgs); i++) { + FuncInOut& arg = self->getArg(i); + uint32_t varType = varMapping[arg.getVarType()]; + + if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) + continue; + + if (self->_passedOrderGp[gpPos] == kInvalidReg) + continue; + + arg._regIndex = self->_passedOrderGp[gpPos++]; + self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex())); + } + + // Stack arguments. + int32_t iStart = static_cast(numArgs - 1); + int32_t iEnd = -1; + int32_t iStep = -1; + + if (self->_direction == kFuncDirLTR) { + iStart = 0; + iEnd = static_cast(numArgs); + iStep = 1; + } + + for (i = iStart; i != iEnd; i += iStep) { + FuncInOut& arg = self->getArg(i); + uint32_t varType = varMapping[arg.getVarType()]; + + if (arg.hasRegIndex()) + continue; + + if (x86ArgIsInt(varType)) { + stackOffset -= 4; + arg._stackOffset = static_cast(stackOffset); + } + else if (x86ArgIsFp(varType)) { + int32_t size = static_cast(_x86VarInfo[varType].getSize()); + stackOffset -= size; + arg._stackOffset = static_cast(stackOffset); + } + } + } +#endif // ASMJIT_BUILD_X86 + +#if defined(ASMJIT_BUILD_X64) + if (arch == kArchX64) { + if (callConv == kCallConvX64Win) { + int32_t argMax = Utils::iMin(numArgs, 4); + + // Register arguments (Gp/Xmm), always left-to-right. + for (i = 0; i != argMax; i++) { + FuncInOut& arg = self->getArg(i); + uint32_t varType = varMapping[arg.getVarType()]; + + if (x86ArgIsInt(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) { + arg._regIndex = self->_passedOrderGp[i]; + self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex())); + continue; + } + + if (x86ArgIsFp(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderXyz)) { + arg._varType = static_cast(x86ArgTypeToXmmType(varType)); + arg._regIndex = self->_passedOrderXyz[i]; + self->_used.or_(kX86RegClassXyz, Utils::mask(arg.getRegIndex())); + } + } + + // Stack arguments (always right-to-left). + for (i = numArgs - 1; i != -1; i--) { + FuncInOut& arg = self->getArg(i); + uint32_t varType = varMapping[arg.getVarType()]; + + if (arg.hasRegIndex()) + continue; + + if (x86ArgIsInt(varType)) { + stackOffset -= 8; // Always 8 bytes. + arg._stackOffset = stackOffset; + } + else if (x86ArgIsFp(varType)) { + stackOffset -= 8; // Always 8 bytes (float/double). + arg._stackOffset = stackOffset; + } + } + + // 32 bytes shadow space (X64W calling convention specific). + stackOffset -= 4 * 8; + } + else { + // Register arguments (Gp), always left-to-right. + for (i = 0; i != static_cast(numArgs); i++) { + FuncInOut& arg = self->getArg(i); + uint32_t varType = varMapping[arg.getVarType()]; + + if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) + continue; + + if (self->_passedOrderGp[gpPos] == kInvalidReg) + continue; + + arg._regIndex = self->_passedOrderGp[gpPos++]; + self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex())); + } + + // Register arguments (Xmm), always left-to-right. + for (i = 0; i != static_cast(numArgs); i++) { + FuncInOut& arg = self->getArg(i); + uint32_t varType = varMapping[arg.getVarType()]; + + if (x86ArgIsFp(varType)) { + arg._varType = static_cast(x86ArgTypeToXmmType(varType)); + arg._regIndex = self->_passedOrderXyz[xmmPos++]; + self->_used.or_(kX86RegClassXyz, Utils::mask(arg.getRegIndex())); + } + } + + // Stack arguments. + for (i = numArgs - 1; i != -1; i--) { + FuncInOut& arg = self->getArg(i); + uint32_t varType = varMapping[arg.getVarType()]; + + if (arg.hasRegIndex()) + continue; + + if (x86ArgIsInt(varType)) { + stackOffset -= 8; + arg._stackOffset = static_cast(stackOffset); + } + else if (x86ArgIsFp(varType)) { + int32_t size = static_cast(_x86VarInfo[varType].getSize()); + + stackOffset -= size; + arg._stackOffset = static_cast(stackOffset); + } + } + } + } +#endif // ASMJIT_BUILD_X64 + + // Modify the stack offset, thus in result all parameters would have positive + // non-zero stack offset. + for (i = 0; i < static_cast(numArgs); i++) { + FuncInOut& arg = self->getArg(i); + if (!arg.hasRegIndex()) { + arg._stackOffset += static_cast(static_cast(regSize) - stackOffset); + } + } + + self->_argStackSize = static_cast(-stackOffset); + return kErrorOk; +} + +Error X86FuncDecl::setPrototype(const FuncPrototype& p) { + uint32_t callConv = p.getCallConv(); + uint32_t arch = x86GetArchFromCConv(callConv); + + if (arch == kArchNone) + return kErrorInvalidArgument; + + if (p.getNumArgs() > kFuncArgCount) + return kErrorInvalidArgument; + + // Validate that the required convention is supported by the current asmjit + // configuration, if only one target is compiled. +#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64) + if (arch == kArchX64) + return kErrorInvalidState; +#endif // ASMJIT_BUILD_X86 && !ASMJIT_BUILD_X64 + +#if !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64) + if (arch == kArchX86) + return kErrorInvalidState; +#endif // !ASMJIT_BUILD_X86 && ASMJIT_BUILD_X64 + + ASMJIT_PROPAGATE_ERROR(X86FuncDecl_initConv(this, arch, callConv)); + ASMJIT_PROPAGATE_ERROR(X86FuncDecl_initFunc(this, arch, p.getRet(), p.getArgs(), p.getNumArgs())); + + return kErrorOk; +} + +// ============================================================================ +// [asmjit::X86FuncDecl - Reset] +// ============================================================================ + +void X86FuncDecl::reset() { + uint32_t i; + + _callConv = kCallConvNone; + _calleePopsStack = false; + _direction = kFuncDirRTL; + _reserved0 = 0; + + _numArgs = 0; + _retCount = 0; + + _argStackSize = 0; + _redZoneSize = 0; + _spillZoneSize = 0; + + for (i = 0; i < ASMJIT_ARRAY_SIZE(_args); i++) + _args[i].reset(); + + _rets[0].reset(); + _rets[1].reset(); + + _used.reset(); + _passed.reset(); + _preserved.reset(); + + ::memset(_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderGp)); + ::memset(_passedOrderXyz, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderXyz)); +} + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" + +// [Guard] +#endif // !ASMJIT_DISABLE_COMPILER && (ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64) diff --git a/src/asmjit/x86/x86compilerfunc.h b/src/asmjit/x86/x86compilerfunc.h new file mode 100644 index 0000000..57147b8 --- /dev/null +++ b/src/asmjit/x86/x86compilerfunc.h @@ -0,0 +1,133 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Guard] +#ifndef _ASMJIT_X86_X86COMPILERFUNC_P_H +#define _ASMJIT_X86_X86COMPILERFUNC_P_H + +#include "../build.h" +#if !defined(ASMJIT_DISABLE_COMPILER) + +// [Dependencies - AsmJit] +#include "../base/compilerfunc.h" +#include "../x86/x86operand.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +//! \addtogroup asmjit_x86 +//! \{ + +// ============================================================================ +// [asmjit::TypeId] +// ============================================================================ + +#if !defined(ASMJIT_DOCGEN) +ASMJIT_TYPE_ID(X86MmReg, kX86VarTypeMm); +ASMJIT_TYPE_ID(X86MmVar, kX86VarTypeMm); +ASMJIT_TYPE_ID(X86XmmReg, kX86VarTypeXmm); +ASMJIT_TYPE_ID(X86XmmVar, kX86VarTypeXmm); +ASMJIT_TYPE_ID(X86YmmReg, kX86VarTypeYmm); +ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm); +ASMJIT_TYPE_ID(X86ZmmReg, kX86VarTypeZmm); +ASMJIT_TYPE_ID(X86ZmmVar, kX86VarTypeZmm); +#endif // !ASMJIT_DOCGEN + +// ============================================================================ +// [asmjit::X86FuncDecl] +// ============================================================================ + +//! X86 function, including calling convention, arguments and their +//! register indices or stack positions. +struct X86FuncDecl : public FuncDecl { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new `X86FuncDecl` instance. + ASMJIT_INLINE X86FuncDecl() { reset(); } + + // -------------------------------------------------------------------------- + // [Accessors - X86] + // -------------------------------------------------------------------------- + + //! Get used registers mask for the given register class `rc`. + //! + //! \note The result depends on the function calling convention AND the + //! function prototype. Returned mask contains only registers actually used + //! to pass function arguments. + ASMJIT_INLINE uint32_t getUsed(uint32_t rc) const { return _used.get(rc); } + + //! Get passed registers mask for the given register class `rc`. + //! + //! \note The result depends on the function calling convention used; the + //! prototype of the function doesn't affect the mask returned. + ASMJIT_INLINE uint32_t getPassed(uint32_t rc) const { return _passed.get(rc); } + + //! Get preserved registers mask for the given register class `rc`. + //! + //! \note The result depends on the function calling convention used; the + //! prototype of the function doesn't affect the mask returned. + ASMJIT_INLINE uint32_t getPreserved(uint32_t rc) const { return _preserved.get(rc); } + + //! Get ther order of passed registers (Gp). + //! + //! \note The result depends on the function calling convention used; the + //! prototype of the function doesn't affect the mask returned. + ASMJIT_INLINE const uint8_t* getPassedOrderGp() const { return _passedOrderGp; } + + //! Get ther order of passed registers (Xmm/Ymm/Zmm). + //! + //! \note The result depends on the function calling convention used; the + //! prototype of the function doesn't affect the mask returned. + ASMJIT_INLINE const uint8_t* getPassedOrderXyz() const { return _passedOrderXyz; } + + // -------------------------------------------------------------------------- + // [SetPrototype] + // -------------------------------------------------------------------------- + + //! Set function prototype. + //! + //! This will set function calling convention and setup arguments variables. + //! + //! \note This function will allocate variables, it can be called only once. + ASMJIT_API Error setPrototype(const FuncPrototype& p); + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + ASMJIT_API void reset(); + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Used registers. + X86RegMask _used; + //! Passed registers (defined by the calling convention). + X86RegMask _passed; + //! Preserved registers (defined by the calling convention). + X86RegMask _preserved; + + //! Order of registers used to pass Gp function arguments. + uint8_t _passedOrderGp[8]; + //! Order of registers used to pass Xmm function arguments. + uint8_t _passedOrderXyz[8]; +}; + +//! \} + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" + +// [Guard] +#endif // !ASMJIT_DISABLE_COMPILER +#endif // _ASMJIT_X86_X86COMPILERFUNC_P_H diff --git a/src/asmjit/x86/x86cpuinfo.cpp b/src/asmjit/x86/x86cpuinfo.cpp index 218f584..607cb1c 100644 --- a/src/asmjit/x86/x86cpuinfo.cpp +++ b/src/asmjit/x86/x86cpuinfo.cpp @@ -12,7 +12,7 @@ #if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64) // [Dependencies - AsmJit] -#include "../base/intutil.h" +#include "../base/utils.h" #include "../x86/x86cpuinfo.h" // 2009-02-05: Thanks to Mike Tajmajer for VC7.1 compiler support. It shouldn't @@ -37,9 +37,9 @@ struct X86CpuVendor { static const X86CpuVendor x86CpuVendorList[] = { { kCpuVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } }, - { kCpuVendorAmd , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } }, - { kCpuVendorVia , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } }, - { kCpuVendorVia , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } } + { kCpuVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } }, + { kCpuVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } }, + { kCpuVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } } }; static ASMJIT_INLINE bool x86CpuVendorEq(const X86CpuVendor& info, const char* vendorString) { @@ -98,14 +98,14 @@ union X86XCR { }; // callCpuId() and detectCpuInfo() for x86 and x64 platforms begins here. -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 void X86CpuUtil::_docpuid(uint32_t inEcx, uint32_t inEax, X86CpuId* result) { #if defined(_MSC_VER) // __cpuidex was introduced by VS2008-SP1. # if _MSC_FULL_VER >= 150030729 __cpuidex(reinterpret_cast(result->i), inEax, inEcx); -# elif defined(ASMJIT_ARCH_X64) +# elif ASMJIT_ARCH_X64 // VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However, 64-bit // calling convention specifies parameter to be passed in ECX/RCX, so we may // be lucky if compiler doesn't move the register, otherwise the result is @@ -130,7 +130,7 @@ void X86CpuUtil::_docpuid(uint32_t inEcx, uint32_t inEax, X86CpuId* result) { #elif defined(__GNUC__) // Note, patched to preserve ebx/rbx register which is used by GCC. -# if defined(ASMJIT_ARCH_X86) +# if ASMJIT_ARCH_X86 # define __myCpuId(inEax, inEcx, outEax, outEbx, outEcx, outEdx) \ __asm__ __volatile__( \ "mov %%ebx, %%edi\n" \ @@ -150,22 +150,18 @@ void X86CpuUtil::_docpuid(uint32_t inEcx, uint32_t inEax, X86CpuId* result) { __myCpuId(inEax, inEcx, result->eax, result->ebx, result->ecx, result->edx); #else -# error "asmjit::X86CpuUtil::_docpuid() unimplemented!" +# error "[asmjit] X86CpuUtil::_docpuid() unimplemented!" #endif } static void callXGetBV(X86XCR* result, uint32_t inEcx) { - #if defined(_MSC_VER) - # if (_MSC_FULL_VER >= 160040219) // 2010SP1+ result->value = _xgetbv(inEcx); # else result->value = 0; # endif - #elif defined(__GNUC__) - unsigned int eax, edx; // Removed, because the world is not perfect: @@ -174,21 +170,16 @@ static void callXGetBV(X86XCR* result, uint32_t inEcx) { result->eax = eax; result->edx = edx; - #else - result->value = 0; - #endif // COMPILER } void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { - X86CpuId regs; - uint32_t i; uint32_t maxBaseId; - bool maybeMPX = false; + X86CpuId regs; X86XCR xcr0; xcr0.value = 0; @@ -252,8 +243,8 @@ void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { if (regs.ecx & 0x00400000U) cpuInfo->addFeature(kX86CpuFeatureMOVBE); if (regs.ecx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeaturePOPCNT); if (regs.ecx & 0x02000000U) cpuInfo->addFeature(kX86CpuFeatureAESNI); - if (regs.ecx & 0x04000000U) cpuInfo->addFeature(kX86CpuFeatureXSave); - if (regs.ecx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureXSaveOS); + if (regs.ecx & 0x04000000U) cpuInfo->addFeature(kX86CpuFeatureXSAVE); + if (regs.ecx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureXSAVE_OS); if (regs.ecx & 0x40000000U) cpuInfo->addFeature(kX86CpuFeatureRDRAND); if (regs.edx & 0x00000010U) cpuInfo->addFeature(kX86CpuFeatureRDTSC); @@ -267,7 +258,7 @@ void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { if (regs.edx & 0x10000000U) cpuInfo->addFeature(kX86CpuFeatureMT); // AMD sets Multithreading to ON if it has two or more cores. - if (cpuInfo->_hwThreadsCount == 1 && cpuInfo->_vendorId == kCpuVendorAmd && (regs.edx & 0x10000000U)) { + if (cpuInfo->_hwThreadsCount == 1 && cpuInfo->_vendorId == kCpuVendorAMD && (regs.edx & 0x10000000U)) { cpuInfo->_hwThreadsCount = 2; } @@ -296,19 +287,20 @@ void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { // -------------------------------------------------------------------------- // Detect new features if the processor supports CPUID-07. + bool maybeMPX = false; if (maxBaseId >= 0x7) { callCpuId(®s, 0x7); - if (regs.ebx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeatureFSGSBase); + if (regs.ebx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeatureFSGSBASE); if (regs.ebx & 0x00000008U) cpuInfo->addFeature(kX86CpuFeatureBMI); if (regs.ebx & 0x00000010U) cpuInfo->addFeature(kX86CpuFeatureHLE); if (regs.ebx & 0x00000100U) cpuInfo->addFeature(kX86CpuFeatureBMI2); - if (regs.ebx & 0x00000200U) cpuInfo->addFeature(kX86CpuFeatureMOVSBSTOSBOpt); + if (regs.ebx & 0x00000200U) cpuInfo->addFeature(kX86CpuFeatureMOVSBSTOSB_OPT); if (regs.ebx & 0x00000800U) cpuInfo->addFeature(kX86CpuFeatureRTM); if (regs.ebx & 0x00004000U) maybeMPX = true; if (regs.ebx & 0x00040000U) cpuInfo->addFeature(kX86CpuFeatureRDSEED); if (regs.ebx & 0x00080000U) cpuInfo->addFeature(kX86CpuFeatureADX); - if (regs.ebx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeatureCLFLUSHOpt); + if (regs.ebx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeatureCLFLUSH_OPT); if (regs.ebx & 0x20000000U) cpuInfo->addFeature(kX86CpuFeatureSHA); if (regs.ecx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeaturePREFETCHWT1); @@ -358,7 +350,7 @@ void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { // extended IDs. callCpuId(®s, 0x80000000); - uint32_t maxExtId = IntUtil::iMin(regs.eax, 0x80000004); + uint32_t maxExtId = Utils::iMin(regs.eax, 0x80000004); uint32_t* brand = reinterpret_cast(cpuInfo->_brandString); for (i = 0x80000001; i <= maxExtId; i++) { @@ -373,7 +365,7 @@ void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { if (regs.ecx & 0x00000100U) cpuInfo->addFeature(kX86CpuFeaturePREFETCH); if (regs.edx & 0x00100000U) cpuInfo->addFeature(kX86CpuFeatureNX); - if (regs.edx & 0x00200000U) cpuInfo->addFeature(kX86CpuFeatureFXSROpt); + if (regs.edx & 0x00200000U) cpuInfo->addFeature(kX86CpuFeatureFXSR_OPT); if (regs.edx & 0x00400000U) cpuInfo->addFeature(kX86CpuFeatureMMX2); if (regs.edx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureRDTSCP); if (regs.edx & 0x40000000U) cpuInfo->addFeature(kX86CpuFeature3DNOW2).addFeature(kX86CpuFeatureMMX2); @@ -395,7 +387,7 @@ void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { } } - // Simplify the brand string (remove unnecessary spaces to make printing nicer). + // Simplify the brand string (remove unnecessary spaces to make printing prettier). x86SimplifyBrandString(cpuInfo->_brandString); } #endif diff --git a/src/asmjit/x86/x86cpuinfo.h b/src/asmjit/x86/x86cpuinfo.h index 8268ef2..a908027 100644 --- a/src/asmjit/x86/x86cpuinfo.h +++ b/src/asmjit/x86/x86cpuinfo.h @@ -22,7 +22,7 @@ namespace asmjit { struct X86CpuInfo; -//! \addtogroup asmjit_x86_general +//! \addtogroup asmjit_x86 //! \{ // ============================================================================ @@ -48,7 +48,7 @@ ASMJIT_ENUM(X86CpuFeature) { //! Cpu has CLFUSH. kX86CpuFeatureCLFLUSH, //! Cpu has CLFUSH (Optimized). - kX86CpuFeatureCLFLUSHOpt, + kX86CpuFeatureCLFLUSH_OPT, //! Cpu has PREFETCH. kX86CpuFeaturePREFETCH, //! Cpu has PREFETCHWT1. @@ -58,7 +58,7 @@ ASMJIT_ENUM(X86CpuFeature) { //! Cpu has FXSAVE/FXRSTOR. kX86CpuFeatureFXSR, //! Cpu has FXSAVE/FXRSTOR (Optimized). - kX86CpuFeatureFXSROpt, + kX86CpuFeatureFXSR_OPT, //! Cpu has MMX. kX86CpuFeatureMMX, //! Cpu has extended MMX. @@ -102,9 +102,9 @@ ASMJIT_ENUM(X86CpuFeature) { //! Cpu has SHA-1 and SHA-256. kX86CpuFeatureSHA, //! Cpu has XSAVE support - XSAVE/XRSTOR, XSETBV/XGETBV, and XCR0. - kX86CpuFeatureXSave, + kX86CpuFeatureXSAVE, //! OS has enabled XSAVE, you can call XGETBV to get value of XCR0. - kX86CpuFeatureXSaveOS, + kX86CpuFeatureXSAVE_OS, //! Cpu has AVX. kX86CpuFeatureAVX, //! Cpu has AVX2. @@ -130,9 +130,9 @@ ASMJIT_ENUM(X86CpuFeature) { //! Cpu has MPX (Memory Protection Extensions). kX86CpuFeatureMPX, //! Cpu has FSGSBASE. - kX86CpuFeatureFSGSBase, + kX86CpuFeatureFSGSBASE, //! Cpu has optimized REP MOVSB/STOSB. - kX86CpuFeatureMOVSBSTOSBOpt, + kX86CpuFeatureMOVSBSTOSB_OPT, //! Cpu has AVX-512F (Foundation). kX86CpuFeatureAVX512F, @@ -178,7 +178,7 @@ union X86CpuId { // [asmjit::X86CpuUtil] // ============================================================================ -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 //! CPU utilities available only if the host processor is X86/X64. struct X86CpuUtil { //! \internal @@ -238,7 +238,7 @@ struct X86CpuInfo : public CpuInfo { // [Statics] // -------------------------------------------------------------------------- -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 //! Get global instance of `X86CpuInfo`. static ASMJIT_INLINE const X86CpuInfo* getHost() { return static_cast(CpuInfo::getHost()); diff --git a/src/asmjit/x86/x86inst.cpp b/src/asmjit/x86/x86inst.cpp index c1bcfc4..d23c641 100644 --- a/src/asmjit/x86/x86inst.cpp +++ b/src/asmjit/x86/x86inst.cpp @@ -25,21 +25,22 @@ namespace asmjit { //! \internal enum { - kX86InstTable_L__ = (0) << kX86InstOpCode_L_Shift, - kX86InstTable_L_I = (0) << kX86InstOpCode_L_Shift, - kX86InstTable_L_0 = (0) << kX86InstOpCode_L_Shift, - kX86InstTable_L_L = (1) << kX86InstOpCode_L_Shift, + // REX/VEX. + kX86InstTable_L__ = (0) << kX86InstOpCode_L_Shift, // L is operand-based or unspecified. + kX86InstTable_L_I = (0) << kX86InstOpCode_L_Shift, // L is ignored (LIG). + kX86InstTable_L_0 = (0) << kX86InstOpCode_L_Shift, // L has to be zero. + kX86InstTable_L_L = (1) << kX86InstOpCode_L_Shift, // L has to be set. - kX86InstTable_W__ = (0) << kX86InstOpCode_W_Shift, - kX86InstTable_W_I = (0) << kX86InstOpCode_W_Shift, - kX86InstTable_W_0 = (0) << kX86InstOpCode_W_Shift, - kX86InstTable_W_1 = (1) << kX86InstOpCode_W_Shift, - kX86InstTable_W_W = (1) << kX86InstOpCode_W_Shift, + kX86InstTable_W__ = (0) << kX86InstOpCode_W_Shift, // W is operand-based or unspecified. + kX86InstTable_W_I = (0) << kX86InstOpCode_W_Shift, // W is ignored (WIG). + kX86InstTable_W_0 = (0) << kX86InstOpCode_W_Shift, // W has to be zero. + kX86InstTable_W_W = (1) << kX86InstOpCode_W_Shift, // W has to be set. - kX86InstTable_E__ = (0) << kX86InstOpCode_EW_Shift, - kX86InstTable_E_I = (0) << kX86InstOpCode_EW_Shift, - kX86InstTable_E_0 = (0) << kX86InstOpCode_EW_Shift, - kX86InstTable_E_1 = (1) << kX86InstOpCode_EW_Shift + // EVEX. + kX86InstTable_E__ = (0) << kX86InstOpCode_EW_Shift, // EVEX.W is operand-based or unspecified. + kX86InstTable_E_I = (0) << kX86InstOpCode_EW_Shift, // EVEX.W is ignored (WIG). + kX86InstTable_E_0 = (0) << kX86InstOpCode_EW_Shift, // EVEX.W has to be zero. + kX86InstTable_E_1 = (1) << kX86InstOpCode_EW_Shift // EVEX.W has to be set. }; //! \internal @@ -47,8 +48,9 @@ enum { //! Combined flags. enum X86InstOpInternal { kX86InstOpI = kX86InstOpImm, + kX86InstOpL = kX86InstOpLabel, - kX86InstOpLbImm = kX86InstOpLabel | kX86InstOpImm, + kX86InstOpLImm = kX86InstOpLabel | kX86InstOpImm, kX86InstOpGwb = kX86InstOpGw | kX86InstOpGb, kX86InstOpGqd = kX86InstOpGq | kX86InstOpGd, @@ -2044,7 +2046,7 @@ const X86InstExtendedInfo _x86InstExtendedInfo[] = { { Enc(X86BTest) , 0 , 0 , 0x00, 0x3B, 0, { O(GqdwMem) , O(Gqdw)|O(Imm) , U , U , U }, F(Lock) , O_000F00(BA,7,_,_,_) }, { Enc(X86BTest) , 0 , 0 , 0x00, 0x3B, 0, { O(GqdwMem) , O(Gqdw)|O(Imm) , U , U , U }, F(Lock) , O_000F00(BA,6,_,_,_) }, { Enc(X86BTest) , 0 , 0 , 0x00, 0x3B, 0, { O(GqdwMem) , O(Gqdw)|O(Imm) , U , U , U }, F(Lock) , O_000F00(BA,5,_,_,_) }, - { Enc(X86Call) , 0 , 0 , 0x00, 0x00, 0, { O(GqdMem)|O(LbImm), U , U , U , U }, F(Flow) , O_000000(E8,U,_,_,_) }, + { Enc(X86Call) , 0 , 0 , 0x00, 0x00, 0, { O(GqdMem)|O(LImm) , U , U , U , U }, F(Flow) , O_000000(E8,U,_,_,_) }, { Enc(X86Op) , 0 , 0 , 0x00, 0x00, 0, { U , U , U , U , U }, F(None)|F(Special) , U }, { Enc(X86Op) , 0 , 0 , 0x00, 0x20, 0, { U , U , U , U , U }, F(None) , U }, { Enc(X86Op) , 0 , 0 , 0x00, 0x40, 0, { U , U , U , U , U }, F(None) , U }, @@ -3377,12 +3379,12 @@ enum X86InstData_ExtendedIndex { }; // ${X86InstData:End} -// Please run tools/src-gendefs.js (by using just node.js, without any dependencies) to regenerate the code above. +// Please run tools/src-gendefs.js (by using just node.js, without any dependencies) to regenerate the code enclosed with ${X86InstData...}. const X86InstInfo _x86InstInfo[] = { // <----------------------------+--------------------+-------------------------------------------+-------------------+------------------------------------+-------------+-------+---------------------------------------------------------------------------------------------------+ // | | Instruction Opcodes | | Instruction Flags | E-FLAGS | Write | Operands (Gp/Fp/Mm/K/Xmm/Ymm/Zmm Regs, Mem, Imm, Label, None/Undefined) | // Instruction Id | Instruction Name +---------------------+---------------------+ Instruction Enc. +---------------+--------------------+-------------+---+---+-------------------+-------------------+-------------------+-------------------+-------------------+ - // | | 0:PP-MMM OP/O L/W/EW| 1:PP-MMM OP/O L/W/EW| | Global Flags |A512(ID|VL|kz|rnd|b)| EF:OSZAPCDX |Idx| Sz| [0] 1st Operand | [1] 2nd Operand | [2] 3rd Operand | [3] 4th Operand | [4] 5th Operand | + // | | O-PP-MMM OP/O L/W/EW| 1:PP-MMM OP/O L/W/EW| | Global Flags |A512(ID|VL|kz|rnd|b)| EF:OSZAPCDX |Idx| Sz| [0] 1st Operand | [1] 2nd Operand | [2] 3rd Operand | [3] 4th Operand | [4] 5th Operand | // <----------------------------+--------------------+---------------------+---------------------+-------------------+---------------+--------------------+-------------+---+---+-------------------+-------------------+-------------------+-------------------+-------------------+ INST(kInstIdNone , "" , U , U , Enc(None) , F(None) , EF(________), 0 , 0 , U , U , U , U , U ), INST(kX86InstIdAdc , "adc" , O_000000(10,2,_,_,_), U , Enc(X86Arith) , F(Lock) , EF(WWWWWX__), 0 , 0 , O(GqdwbMem) , O(GqdwbMem)|O(Imm), U , U , U ), @@ -3421,7 +3423,7 @@ const X86InstInfo _x86InstInfo[] = { INST(kX86InstIdBtr , "btr" , O_000F00(B3,U,_,_,_), O_000F00(BA,6,_,_,_), Enc(X86BTest) , F(Lock) , EF(UU_UUW__), 0 , 0 , O(GqdwMem) , O(Gqdw)|O(Imm) , U , U , U ), INST(kX86InstIdBts , "bts" , O_000F00(AB,U,_,_,_), O_000F00(BA,5,_,_,_), Enc(X86BTest) , F(Lock) , EF(UU_UUW__), 0 , 0 , O(GqdwMem) , O(Gqdw)|O(Imm) , U , U , U ), INST(kX86InstIdBzhi , "bzhi" , O_000F38(F5,U,_,_,_), U , Enc(AvxRmv) , F(None) , EF(WWWUUW__), 0 , 0 , O(Gqd) , O(GqdMem) , O(Gqd) , U , U ), - INST(kX86InstIdCall , "call" , O_000000(FF,2,_,_,_), O_000000(E8,U,_,_,_), Enc(X86Call) , F(Flow) , EF(________), 0 , 0 , O(GqdMem)|O(LbImm), U , U , U , U ), + INST(kX86InstIdCall , "call" , O_000000(FF,2,_,_,_), O_000000(E8,U,_,_,_), Enc(X86Call) , F(Flow) , EF(________), 0 , 0 , O(GqdMem)|O(LImm) , U , U , U , U ), INST(kX86InstIdCbw , "cbw" , O_660000(98,U,_,_,_), U , Enc(X86Op) , F(None)|F(Special) , EF(________), 0 , 0 , U , U , U , U , U ), INST(kX86InstIdCdq , "cdq" , O_000000(99,U,_,_,_), U , Enc(X86Op) , F(None)|F(Special) , EF(________), 0 , 0 , U , U , U , U , U ), INST(kX86InstIdCdqe , "cdqe" , O_000000(98,U,_,W,_), U , Enc(X86Op) , F(None)|F(Special) , EF(________), 0 , 0 , U , U , U , U , U ), @@ -4490,52 +4492,19 @@ const X86InstInfo _x86InstInfo[] = { // ============================================================================ #define CC_TO_INST(_Inst_) { \ - _Inst_##o, \ - _Inst_##no, \ - _Inst_##b, \ - _Inst_##ae, \ - _Inst_##e, \ - _Inst_##ne, \ - _Inst_##be, \ - _Inst_##a, \ - _Inst_##s, \ - _Inst_##ns, \ - _Inst_##pe, \ - _Inst_##po, \ - _Inst_##l, \ - _Inst_##ge, \ - _Inst_##le, \ - _Inst_##g, \ - \ - kInstIdNone, \ - kInstIdNone, \ - kInstIdNone, \ - kInstIdNone \ + _Inst_##o , _Inst_##no , _Inst_##b , _Inst_##ae , \ + _Inst_##e , _Inst_##ne , _Inst_##be , _Inst_##a , \ + _Inst_##s , _Inst_##ns , _Inst_##pe , _Inst_##po , \ + _Inst_##l , _Inst_##ge , _Inst_##le , _Inst_##g , \ + kInstIdNone, kInstIdNone, kInstIdNone, kInstIdNone \ } const uint32_t _x86ReverseCond[20] = { - /* kX86CondO -> */ kX86CondO, - /* kX86CondNO -> */ kX86CondNO, - /* kX86CondB -> */ kX86CondA, - /* kX86CondAE -> */ kX86CondBE, - /* kX86CondE -> */ kX86CondE, - /* kX86CondNE -> */ kX86CondNE, - /* kX86CondBE -> */ kX86CondAE, - /* kX86CondA -> */ kX86CondB, - /* kX86CondS -> */ kX86CondS, - /* kX86CondNS -> */ kX86CondNS, - /* kX86CondPE -> */ kX86CondPE, - /* kX86CondPO -> */ kX86CondPO, - /* kX86CondL -> */ kX86CondG, - /* kX86CondGE -> */ kX86CondLE, - /* kX86CondLE -> */ kX86CondGE, - /* kX86CondG -> */ kX86CondL, - - /* kX86CondFpuUnordered -> */ kX86CondFpuUnordered, - /* kX86CondFpuNotUnordered -> */ kX86CondFpuNotUnordered, - - 0x12, - 0x13 + /* O|NO|B|AE -> */ kX86CondO, kX86CondNO, kX86CondA , kX86CondBE, + /* E|NE|BE|A -> */ kX86CondE, kX86CondNE, kX86CondAE, kX86CondB , + /* S|NS|PE|PO -> */ kX86CondS, kX86CondNS, kX86CondPE, kX86CondPO, + /* L|GE|LE|G -> */ kX86CondG, kX86CondLE, kX86CondGE, kX86CondL , + /* Unord|!Unord -> */ kX86CondFpuUnordered , kX86CondFpuNotUnordered, 0x12, 0x13 }; const uint32_t _x86CondToCmovcc[20] = CC_TO_INST(kX86InstIdCmov); @@ -4549,10 +4518,12 @@ const uint32_t _x86CondToSetcc [20] = CC_TO_INST(kX86InstIdSet ); // ============================================================================ #if !defined(ASMJIT_DISABLE_NAMES) -// Compare two instruction names. -// -// `a` is null terminated instruction name from `_x86InstName[]` table. -// `b` is non-null terminated instruction name passed to `getInstIdByName()`. +//! \internal +//! +//! Compare two instruction names. +//! +//! `a` is null terminated instruction name from `_x86InstName[]` table. +//! `b` is non-null terminated instruction name passed to `getInstIdByName()`. static ASMJIT_INLINE int X86Util_cmpInstName(const char* a, const char* b, size_t len) { for (size_t i = 0; i < len; i++) { int c = static_cast(static_cast(a[i])) - diff --git a/src/asmjit/x86/x86inst.h b/src/asmjit/x86/x86inst.h index fe8f7d0..f5683e9 100644 --- a/src/asmjit/x86/x86inst.h +++ b/src/asmjit/x86/x86inst.h @@ -10,10 +10,9 @@ // [Dependencies - AsmJit] #include "../base/assembler.h" -#include "../base/compiler.h" #include "../base/globals.h" -#include "../base/intutil.h" #include "../base/operand.h" +#include "../base/utils.h" #include "../base/vectypes.h" // [Api-Begin] @@ -28,7 +27,7 @@ namespace asmjit { struct X86InstInfo; struct X86InstExtendedInfo; -//! \addtogroup asmjit_x86_inst +//! \addtogroup asmjit_x86 //! \{ // ============================================================================ @@ -39,38 +38,38 @@ struct X86InstExtendedInfo; //! \internal //! //! X86/X64 instructions' names, accessible through `X86InstInfo`. -ASMJIT_VAR const char _x86InstName[]; +ASMJIT_VARAPI const char _x86InstName[]; #endif // !ASMJIT_DISABLE_NAMES //! \internal //! //! X86/X64 instructions' extended information, accessible through `X86InstInfo`. -ASMJIT_VAR const X86InstExtendedInfo _x86InstExtendedInfo[]; +ASMJIT_VARAPI const X86InstExtendedInfo _x86InstExtendedInfo[]; //! \internal //! //! X86/X64 instructions' information. -ASMJIT_VAR const X86InstInfo _x86InstInfo[]; +ASMJIT_VARAPI const X86InstInfo _x86InstInfo[]; //! \internal //! //! X86/X64 condition codes to reversed condition codes map. -ASMJIT_VAR const uint32_t _x86ReverseCond[20]; +ASMJIT_VARAPI const uint32_t _x86ReverseCond[20]; //! \internal //! //! X86/X64 condition codes to "cmovcc" group map. -ASMJIT_VAR const uint32_t _x86CondToCmovcc[20]; +ASMJIT_VARAPI const uint32_t _x86CondToCmovcc[20]; //! \internal //! //! X86/X64 condition codes to "jcc" group map. -ASMJIT_VAR const uint32_t _x86CondToJcc[20]; +ASMJIT_VARAPI const uint32_t _x86CondToJcc[20]; //! \internal //! //! X86/X64 condition codes to "setcc" group map. -ASMJIT_VAR const uint32_t _x86CondToSetcc[20]; +ASMJIT_VARAPI const uint32_t _x86CondToSetcc[20]; // ============================================================================ // [asmjit::X86InstId] @@ -1624,8 +1623,8 @@ ASMJIT_ENUM(X86InstFlags) { //! Instruction always performs memory access. //! //! This flag is always combined with `kX86InstFlagSpecial` and describes - //! that there is an implicit address which is accessed (usually EDI/RDI or - //! ESI/EDI). + //! that there is an implicit address which is accessed (usually EDI/RDI + //! and/or ESI/RSI). kX86InstFlagSpecialMem = 0x00000080, //! Instruction memory operand can refer to 16-bit address (used by FPU). @@ -1667,7 +1666,7 @@ ASMJIT_ENUM(X86InstFlags) { //! Instruction supports zeroing of elements {k0z..k7z}. kX86InstFlagAvx512KZero = 0x10000000, //! Instruction supports broadcast {1toN}. - kX86InstFlagAvx512Broadcast = 0x20000000, + kX86InstFlagAvx512BCast = 0x20000000, //! Instruction supports suppressing all exceptions {sae}. kX86InstFlagAvx512Sae = 0x40000000, //! Instruction supports static rounding control with SAE {rnd-sae}, @@ -1885,9 +1884,9 @@ ASMJIT_ENUM(X86FpSw) { kX86FpSw_Busy = 0x8000 }; -// ============================================================================ +// ============================================================================ // [asmjit::X86FpCw] -// ============================================================================ +// ============================================================================ //! X86/X64 FPU control word. ASMJIT_ENUM(X86FpCw) { @@ -1916,63 +1915,84 @@ ASMJIT_ENUM(X86FpCw) { kX86FpCw_IC_Affine = 0x1000 }; -// ============================================================================ +// ============================================================================ // [asmjit::X86Cmp] -// ============================================================================ +// ============================================================================ //! X86/X64 Comparison predicate used by CMP[PD/PS/SD/SS] family instructions. ASMJIT_ENUM(X86Cmp) { - kX86CmpEQ = 0x00, //!< Equal (Quite). - kX86CmpLT = 0x01, //!< Less (Signaling). - kX86CmpLE = 0x02, //!< Less/Equal (Signaling). - kX86CmpUNORD = 0x03, //!< Unordered (Quite). - kX86CmpNEQ = 0x04, //!< Not Equal (Quite). - kX86CmpNLT = 0x05, //!< Not Less (Signaling). - kX86CmpNLE = 0x06, //!< Not Less/Equal (Signaling). - kX86CmpORD = 0x07 //!< Ordered (Quite). + kX86CmpEQ = 0x00, //!< Equal (Quite). + kX86CmpLT = 0x01, //!< Less (Signaling). + kX86CmpLE = 0x02, //!< Less/Equal (Signaling). + kX86CmpUNORD = 0x03, //!< Unordered (Quite). + kX86CmpNEQ = 0x04, //!< Not Equal (Quite). + kX86CmpNLT = 0x05, //!< Not Less (Signaling). + kX86CmpNLE = 0x06, //!< Not Less/Equal (Signaling). + kX86CmpORD = 0x07 //!< Ordered (Quite). }; -// ============================================================================ +// ============================================================================ // [asmjit::X86VCmp] -// ============================================================================ +// ============================================================================ //! X86/X64 Comparison predicate used by VCMP[PD/PS/SD/SS] family instructions. //! //! The first 8 are compatible with \ref X86Cmp. ASMJIT_ENUM(X86VCmp) { - kX86VCmpEQ_OQ = 0x00, //!< Equal (Quite, Ordered). - kX86VCmpLT_OS = 0x01, //!< Less (Signaling, Ordered). - kX86VCmpLE_OS = 0x02, //!< Less/Equal (Signaling, Ordered). - kX86VCmpUNORD_Q = 0x03, //!< Unordered (Quite). - kX86VCmpNEQ_UQ = 0x04, //!< Not Equal (Quite, Unordered). - kX86VCmpNLT_US = 0x05, //!< Not Less (Signaling, Unordered). - kX86VCmpNLE_US = 0x06, //!< Not Less/Equal (Signaling, Unordered). - kX86VCmpORD_Q = 0x07, //!< Ordered (Quite). + kX86VCmpEQ_OQ = 0x00, //!< Equal (Quite, Ordered). + kX86VCmpLT_OS = 0x01, //!< Less (Signaling, Ordered). + kX86VCmpLE_OS = 0x02, //!< Less/Equal (Signaling, Ordered). + kX86VCmpUNORD_Q = 0x03, //!< Unordered (Quite). + kX86VCmpNEQ_UQ = 0x04, //!< Not Equal (Quite, Unordered). + kX86VCmpNLT_US = 0x05, //!< Not Less (Signaling, Unordered). + kX86VCmpNLE_US = 0x06, //!< Not Less/Equal (Signaling, Unordered). + kX86VCmpORD_Q = 0x07, //!< Ordered (Quite). - kX86VCmpEQ_UQ = 0x08, //!< Equal (Quite, Unordered). - kX86VCmpNGE_US = 0x09, //!< Not Greater/Equal (Signaling, Unordered). - kX86VCmpNGT_US = 0x0A, //!< Not Greater (Signaling, Unordered). - kX86VCmpFALSE_OQ = 0x0B, //!< False (Quite, Ordered). - kX86VCmpNEQ_OQ = 0x0C, //!< Not Equal (Quite, Ordered). - kX86VCmpGE_OS = 0x0D, //!< Greater/Equal (Signaling, Ordered). - kX86VCmpGT_OS = 0x0E, //!< Greater (Signaling, Ordered). - kX86VCmpTRUE_UQ = 0x0F, //!< True (Quite, Unordered). - kX86VCmpEQ_OS = 0x10, //!< Equal (Signaling, Ordered). - kX86VCmpLT_OQ = 0x11, //!< Less (Quite, Ordered). - kX86VCmpLE_OQ = 0x12, //!< Less/Equal (Quite, Ordered). - kX86VCmpUNORD_S = 0x13, //!< Unordered (Signaling). - kX86VCmpNEQ_US = 0x14, //!< Not Equal (Signaling, Unordered). - kX86VCmpNLT_UQ = 0x15, //!< Not Less (Quite, Unordered). - kX86VCmpNLE_UQ = 0x16, //!< Not Less/Equal (Quite, Unordered). - kX86VCmpORD_S = 0x17, //!< Ordered (Signaling). - kX86VCmpEQ_US = 0x18, //!< Equal (Signaling, Unordered). - kX86VCmpNGE_UQ = 0x19, //!< Not Greater/Equal (Quite, Unordered). - kX86VCmpNGT_UQ = 0x1A, //!< Not Greater (Quite, Unordered). - kX86VCmpFALSE_OS = 0x1B, //!< False (Signaling, Ordered). - kX86VCmpNEQ_OS = 0x1C, //!< Not Equal (Signaling, Ordered). - kX86VCmpGE_OQ = 0x1D, //!< Greater/Equal (Quite, Ordered). - kX86VCmpGT_OQ = 0x1E, //!< Greater (Quite, Ordered). - kX86VCmpTRUE_US = 0x1F //!< True (Signaling, Unordered). + kX86VCmpEQ_UQ = 0x08, //!< Equal (Quite, Unordered). + kX86VCmpNGE_US = 0x09, //!< Not Greater/Equal (Signaling, Unordered). + kX86VCmpNGT_US = 0x0A, //!< Not Greater (Signaling, Unordered). + kX86VCmpFALSE_OQ = 0x0B, //!< False (Quite, Ordered). + kX86VCmpNEQ_OQ = 0x0C, //!< Not Equal (Quite, Ordered). + kX86VCmpGE_OS = 0x0D, //!< Greater/Equal (Signaling, Ordered). + kX86VCmpGT_OS = 0x0E, //!< Greater (Signaling, Ordered). + kX86VCmpTRUE_UQ = 0x0F, //!< True (Quite, Unordered). + kX86VCmpEQ_OS = 0x10, //!< Equal (Signaling, Ordered). + kX86VCmpLT_OQ = 0x11, //!< Less (Quite, Ordered). + kX86VCmpLE_OQ = 0x12, //!< Less/Equal (Quite, Ordered). + kX86VCmpUNORD_S = 0x13, //!< Unordered (Signaling). + kX86VCmpNEQ_US = 0x14, //!< Not Equal (Signaling, Unordered). + kX86VCmpNLT_UQ = 0x15, //!< Not Less (Quite, Unordered). + kX86VCmpNLE_UQ = 0x16, //!< Not Less/Equal (Quite, Unordered). + kX86VCmpORD_S = 0x17, //!< Ordered (Signaling). + kX86VCmpEQ_US = 0x18, //!< Equal (Signaling, Unordered). + kX86VCmpNGE_UQ = 0x19, //!< Not Greater/Equal (Quite, Unordered). + kX86VCmpNGT_UQ = 0x1A, //!< Not Greater (Quite, Unordered). + kX86VCmpFALSE_OS = 0x1B, //!< False (Signaling, Ordered). + kX86VCmpNEQ_OS = 0x1C, //!< Not Equal (Signaling, Ordered). + kX86VCmpGE_OQ = 0x1D, //!< Greater/Equal (Quite, Ordered). + kX86VCmpGT_OQ = 0x1E, //!< Greater (Quite, Ordered). + kX86VCmpTRUE_US = 0x1F //!< True (Signaling, Unordered). +}; + +// ============================================================================ +// [asmjit::X86Round] +// ============================================================================ + +//! X86/X64 round encoding used by ROUND[PD/PS/SD/SS] family instructions. +ASMJIT_ENUM(X86Round) { + //! Round control - round to nearest (even). + kX86RoundNearest = 0x0, + //! Round control - round to down toward -INF (floor), + kX86RoundDown = 0x1, + //! Round control - round to up toward +INF (ceil). + kX86RoundUp = 0x2, + //! Round control - round toward zero (truncate). + kX86RoundTrunc = 0x3, + //! Rounding select - if set it will use the the current rounding mode + //! according to MXCS and ignore the round control (RC) bits. + kX86RoundCurrent = 0x4, + //! Precision mask - if set it avoids an inexact exception. + kX86RoundInexact = 0x8 }; // ============================================================================ @@ -2327,8 +2347,7 @@ struct X86Util { //! `kInstIdNone` (zero) is returned. //! //! The given `name` doesn't have to be null-terminated if `len` is provided. - ASMJIT_API static uint32_t getInstIdByName( - const char* name, size_t len = kInvalidIndex); + ASMJIT_API static uint32_t getInstIdByName(const char* name, size_t len = kInvalidIndex); #endif // !ASMJIT_DISABLE_NAMES // -------------------------------------------------------------------------- @@ -2369,35 +2388,39 @@ struct X86Util { } // -------------------------------------------------------------------------- - // [MmShuffle] + // [Shuffle (SIMD)] // -------------------------------------------------------------------------- //! Pack a shuffle constant to be used with multimedia instrutions (2 values). //! - //! \param x First component position, number at interval [0, 1] inclusive. - //! \param y Second component position, number at interval [0, 1] inclusive. + //! \param a Position of the first component [0, 1], inclusive. + //! \param b Position of the second component [0, 1], inclusive. //! - //! Shuffle constants can be used to make immediate value for these intrinsics: + //! Shuffle constants can be used to encode an immediate for these instructions: //! - `X86Assembler::shufpd()` and `X86Compiler::shufpd()` - static ASMJIT_INLINE int mmShuffle(uint32_t x, uint32_t y) { - return static_cast((x << 1) | y); + static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b) { + uint32_t result = (a << 1) | b; + ASMJIT_ASSERT(result <= 0xFF); + return static_cast(result); } //! Pack a shuffle constant to be used with multimedia instrutions (4 values). //! - //! \param z First component position, number at interval [0, 3] inclusive. - //! \param x Second component position, number at interval [0, 3] inclusive. - //! \param y Third component position, number at interval [0, 3] inclusive. - //! \param w Fourth component position, number at interval [0, 3] inclusive. + //! \param a Position of the first component [0, 3], inclusive. + //! \param b Position of the second component [0, 3], inclusive. + //! \param c Position of the third component [0, 3], inclusive. + //! \param d Position of the fourth component [0, 3], inclusive. //! - //! Shuffle constants can be used to make immediate value for these intrinsics: - //! - `X86Assembler::pshufw()` and `X86Compiler::pshufw()` - //! - `X86Assembler::pshufd()` and `X86Compiler::pshufd()` - //! - `X86Assembler::pshufhw()` and `X86Compiler::pshufhw()` - //! - `X86Assembler::pshuflw()` and `X86Compiler::pshuflw()` - //! - `X86Assembler::shufps()` and `X86Compiler::shufps()` - static ASMJIT_INLINE int mmShuffle(uint32_t z, uint32_t y, uint32_t x, uint32_t w) { - return static_cast((z << 6) | (y << 4) | (x << 2) | w); + //! Shuffle constants can be used to encode an immediate for these instructions: + //! - `X86Assembler::pshufw()` and `X86Compiler::pshufw()`. + //! - `X86Assembler::pshufd()` and `X86Compiler::pshufd()`. + //! - `X86Assembler::pshufhw()` and `X86Compiler::pshufhw()`. + //! - `X86Assembler::pshuflw()` and `X86Compiler::pshuflw()`. + //! - `X86Assembler::shufps()` and `X86Compiler::shufps()`. + static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { + uint32_t result = (a << 6) | (b << 4) | (c << 2) | d; + ASMJIT_ASSERT(result <= 0xFF); + return static_cast(result); } }; diff --git a/src/asmjit/x86/x86operand.h b/src/asmjit/x86/x86operand.h index f64e1e0..807bb69 100644 --- a/src/asmjit/x86/x86operand.h +++ b/src/asmjit/x86/x86operand.h @@ -11,9 +11,8 @@ // [Dependencies - AsmJit] #include "../base/assembler.h" #include "../base/compiler.h" -#include "../base/globals.h" -#include "../base/intutil.h" #include "../base/operand.h" +#include "../base/utils.h" #include "../base/vectypes.h" // [Api-Begin] @@ -52,7 +51,7 @@ struct X86YmmVar; struct X86ZmmVar; #endif // !ASMJIT_DISABLE_COMPILER -//! \addtogroup asmjit_x86_general +//! \addtogroup asmjit_x86 //! \{ // ============================================================================ @@ -250,22 +249,66 @@ ASMJIT_ENUM(X86MemFlags) { kX86MemShiftMask = kX86MemShiftBits << kX86MemShiftIndex }; -// This is only defined by `x86operand_regs.cpp` when exporting registers. -#if defined(ASMJIT_EXPORTS_X86OPERAND_REGS) +// ============================================================================ +// [asmjit::k86VarType] +// ============================================================================ -// Remap all classes to POD structs so they can be statically initialized -// without calling a constructor. Compiler will store these in .DATA section. -struct X86RipReg { Operand::VRegOp data; }; -struct X86SegReg { Operand::VRegOp data; }; -struct X86GpReg { Operand::VRegOp data; }; -struct X86FpReg { Operand::VRegOp data; }; -struct X86KReg { Operand::VRegOp data; }; -struct X86MmReg { Operand::VRegOp data; }; -struct X86XmmReg { Operand::VRegOp data; }; -struct X86YmmReg { Operand::VRegOp data; }; -struct X86ZmmReg { Operand::VRegOp data; }; +//! X86/X64 variable type. +ASMJIT_ENUM(X86VarType) { + //! Variable is SP-FP (x87). + kX86VarTypeFp32 = kVarTypeFp32, + //! Variable is DP-FP (x87). + kX86VarTypeFp64 = kVarTypeFp64, -#else + //! Variable is Mm (MMX). + kX86VarTypeMm = 12, + + //! Variable is K (AVX512+) + kX86VarTypeK, + + //! Variable is Xmm (SSE+). + kX86VarTypeXmm, + //! Variable is a scalar Xmm SP-FP number. + kX86VarTypeXmmSs, + //! Variable is a packed Xmm SP-FP number (4 floats). + kX86VarTypeXmmPs, + //! Variable is a scalar Xmm DP-FP number. + kX86VarTypeXmmSd, + //! Variable is a packed Xmm DP-FP number (2 doubles). + kX86VarTypeXmmPd, + + //! Variable is Ymm (AVX+). + kX86VarTypeYmm, + //! Variable is a packed Ymm SP-FP number (8 floats). + kX86VarTypeYmmPs, + //! Variable is a packed Ymm DP-FP number (4 doubles). + kX86VarTypeYmmPd, + + //! Variable is Zmm (AVX512+). + kX86VarTypeZmm, + //! Variable is a packed Zmm SP-FP number (16 floats). + kX86VarTypeZmmPs, + //! Variable is a packed Zmm DP-FP number (8 doubles). + kX86VarTypeZmmPd, + + //! Count of variable types. + kX86VarTypeCount, + + //! \internal + //! \{ + _kX86VarTypeMmStart = kX86VarTypeMm, + _kX86VarTypeMmEnd = kX86VarTypeMm, + + _kX86VarTypeXmmStart = kX86VarTypeXmm, + _kX86VarTypeXmmEnd = kX86VarTypeXmmPd, + + _kX86VarTypeYmmStart = kX86VarTypeYmm, + _kX86VarTypeYmmEnd = kX86VarTypeYmmPd, + + _kX86VarTypeZmmStart = kX86VarTypeZmm, + _kX86VarTypeZmmEnd = kX86VarTypeZmmPd + //! \} +}; // ============================================================================ // [asmjit::X86RegCount] @@ -288,17 +331,15 @@ struct X86ZmmReg { Operand::VRegOp data; }; //! variables, thus, not needed to be managed. //! //! \note At the moment `X86RegCount` can fit into 32-bits, having 8-bits for -//! all register classes (except Fp). This can change in the future after a -//! new instruction set is announced. +//! each register class except `fp`. This can change in the future after a +//! new instruction set, which adds more registers, is introduced. struct X86RegCount { // -------------------------------------------------------------------------- // [Zero] // -------------------------------------------------------------------------- //! Reset all counters to zero. - ASMJIT_INLINE void reset() { - _packed = 0; - } + ASMJIT_INLINE void reset() { _packed = 0; } // -------------------------------------------------------------------------- // [Get] @@ -636,6 +677,29 @@ struct X86RegMask { }; }; +// ============================================================================ +// [asmjit::Reg] +// ============================================================================ + +// This is only defined by `x86operand_regs.cpp` when exporting registers. +#if defined(ASMJIT_EXPORTS_X86OPERAND_REGS) + +// Remap all classes to POD structs so they can be statically initialized +// without calling a constructor. Compiler will store these in .DATA section. +// +// Kept in union to prevent LTO warnings. +struct X86RipReg { union { Operand::VRegOp _vreg; }; }; +struct X86SegReg { union { Operand::VRegOp _vreg; }; }; +struct X86GpReg { union { Operand::VRegOp _vreg; }; }; +struct X86FpReg { union { Operand::VRegOp _vreg; }; }; +struct X86KReg { union { Operand::VRegOp _vreg; }; }; +struct X86MmReg { union { Operand::VRegOp _vreg; }; }; +struct X86XmmReg { union { Operand::VRegOp _vreg; }; }; +struct X86YmmReg { union { Operand::VRegOp _vreg; }; }; +struct X86ZmmReg { union { Operand::VRegOp _vreg; }; }; + +#else + // ============================================================================ // [asmjit::X86Reg] // ============================================================================ @@ -704,10 +768,10 @@ struct X86Reg : public Reg { //! Get whether the `op` operand is Gpb-Lo or Gpb-Hi register. static ASMJIT_INLINE bool isGpbReg(const Operand& op) { - const uint32_t mask = IntUtil::pack32_2x8_1x16( + const uint32_t mask = Utils::pack32_2x8_1x16( 0xFF, 0xFF, ~(_kX86RegTypePatchedGpbHi << 8) & 0xFF00); - return (op._packed[0].u32[0] & mask) == IntUtil::pack32_2x8_1x16(kOperandTypeReg, 1, 0x0000); + return (op._packed[0].u32[0] & mask) == Utils::pack32_2x8_1x16(kOperandTypeReg, 1, 0x0000); } }; @@ -794,6 +858,15 @@ struct X86GpReg : public X86Reg { // [X86GpReg Cast] // -------------------------------------------------------------------------- + //! Cast this register to the same register type/size as `other`. + //! + //! This function has been designed to help with maintaining code that runs + //! in both 32-bit and 64-bit modes. If you have registers that have mixed + //! types, use `X86GpReg::as()` to cast one type to another. + ASMJIT_INLINE X86GpReg as(const X86GpReg& other) const { + return X86GpReg(other.getRegType(), getRegIndex(), other.getSize()); + } + //! Cast this register to 8-bit (LO) part. ASMJIT_INLINE X86GpReg r8() const { return X86GpReg(kX86RegTypeGpbLo, getRegIndex(), 1); } //! Cast this register to 8-bit (LO) part. @@ -1319,19 +1392,19 @@ struct X86Mem : public BaseMem { //! Get whether the memory operand has 32-bit GP base. ASMJIT_INLINE bool hasGpdBase() const { - return (_packed[0].u32[0] & IntUtil::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask)) != 0; + return (_packed[0].u32[0] & Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask)) != 0; } //! Set whether the memory operand has 32-bit GP base. ASMJIT_INLINE X86Mem& setGpdBase() { - _packed[0].u32[0] |= IntUtil::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask); + _packed[0].u32[0] |= Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask); return *this; } //! Set whether the memory operand has 32-bit GP base to `b`. ASMJIT_INLINE X86Mem& setGpdBase(uint32_t b) { - _packed[0].u32[0] &=~IntUtil::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask); - _packed[0].u32[0] |= IntUtil::pack32_4x8(0x00, 0x00, 0x00, b << kX86MemGpdIndex); + _packed[0].u32[0] &=~Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask); + _packed[0].u32[0] |= Utils::pack32_4x8(0x00, 0x00, 0x00, b << kX86MemGpdIndex); return *this; } @@ -1346,8 +1419,8 @@ struct X86Mem : public BaseMem { //! Set V-SIB type. ASMJIT_INLINE X86Mem& _setVSib(uint32_t vsib) { - _packed[0].u32[0] &=~IntUtil::pack32_4x8(0x00, 0x00, 0x00, kX86MemVSibMask); - _packed[0].u32[0] |= IntUtil::pack32_4x8(0x00, 0x00, 0x00, vsib << kX86MemVSibIndex); + _packed[0].u32[0] &=~Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemVSibMask); + _packed[0].u32[0] |= Utils::pack32_4x8(0x00, 0x00, 0x00, vsib << kX86MemVSibIndex); return *this; } @@ -1512,8 +1585,8 @@ struct X86Mem : public BaseMem { //! Set memory operand index scale (0, 1, 2 or 3). ASMJIT_INLINE X86Mem& setShift(uint32_t shift) { - _packed[0].u32[0] &=~IntUtil::pack32_4x8(0x00, 0x00, 0x00, kX86MemShiftMask); - _packed[0].u32[0] |= IntUtil::pack32_4x8(0x00, 0x00, 0x00, shift << kX86MemShiftIndex); + _packed[0].u32[0] &=~Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemShiftMask); + _packed[0].u32[0] |= Utils::pack32_4x8(0x00, 0x00, 0x00, shift << kX86MemShiftIndex); return *this; } @@ -1576,6 +1649,469 @@ struct X86Mem : public BaseMem { return (base._vreg.size & 0x4) << (kX86MemGpdIndex - 2); } }; + +// ============================================================================ +// [asmjit::X86Var] +// ============================================================================ + +#if !defined(ASMJIT_DISABLE_COMPILER) +//! Base class for all X86 variables. +struct X86Var : public Var { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new uninitialized `X86Var` instance. + ASMJIT_INLINE X86Var() : Var(NoInit) { reset(); } + //! Create a clone of `other`. + ASMJIT_INLINE X86Var(const X86Var& other) : Var(other) {} + //! Create a new uninitialized `X86Var` instance (internal). + explicit ASMJIT_INLINE X86Var(const _NoInit&) : Var(NoInit) {} + + // -------------------------------------------------------------------------- + // [X86Var Specific] + // -------------------------------------------------------------------------- + + //! Clone X86Var operand. + ASMJIT_INLINE X86Var clone() const { return X86Var(*this); } + + // -------------------------------------------------------------------------- + // [Type] + // -------------------------------------------------------------------------- + + //! Get register type. + ASMJIT_INLINE uint32_t getRegType() const { return _vreg.type; } + //! Get variable type. + ASMJIT_INLINE uint32_t getVarType() const { return _vreg.vType; } + + //! Get whether the variable is Gp register. + ASMJIT_INLINE bool isGp() const { return _vreg.type <= kX86RegTypeGpq; } + //! Get whether the variable is Gpb (8-bit) register. + ASMJIT_INLINE bool isGpb() const { return _vreg.type <= kX86RegTypeGpbHi; } + //! Get whether the variable is Gpb-lo (8-bit) register. + ASMJIT_INLINE bool isGpbLo() const { return _vreg.type == kX86RegTypeGpbLo; } + //! Get whether the variable is Gpb-hi (8-bit) register. + ASMJIT_INLINE bool isGpbHi() const { return _vreg.type == kX86RegTypeGpbHi; } + //! Get whether the variable is Gpw (16-bit) register. + ASMJIT_INLINE bool isGpw() const { return _vreg.type == kX86RegTypeGpw; } + //! Get whether the variable is Gpd (32-bit) register. + ASMJIT_INLINE bool isGpd() const { return _vreg.type == kX86RegTypeGpd; } + //! Get whether the variable is Gpq (64-bit) register. + ASMJIT_INLINE bool isGpq() const { return _vreg.type == kX86RegTypeGpq; } + + //! Get whether the variable is Mm (64-bit) register. + ASMJIT_INLINE bool isMm() const { return _vreg.type == kX86RegTypeMm; } + //! Get whether the variable is K (64-bit) register. + ASMJIT_INLINE bool isK() const { return _vreg.type == kX86RegTypeK; } + + //! Get whether the variable is Xmm (128-bit) register. + ASMJIT_INLINE bool isXmm() const { return _vreg.type == kX86RegTypeXmm; } + //! Get whether the variable is Ymm (256-bit) register. + ASMJIT_INLINE bool isYmm() const { return _vreg.type == kX86RegTypeYmm; } + //! Get whether the variable is Zmm (512-bit) register. + ASMJIT_INLINE bool isZmm() const { return _vreg.type == kX86RegTypeZmm; } + + // -------------------------------------------------------------------------- + // [Memory Cast] + // -------------------------------------------------------------------------- + + //! Cast this variable to a memory operand. + //! + //! \note Size of operand depends on native variable type, you can use other + //! variants if you want specific one. + ASMJIT_INLINE X86Mem m(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, getSize()); + } + + //! \overload + ASMJIT_INLINE X86Mem m(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, getSize()); + } + + //! Cast this variable to 8-bit memory operand. + ASMJIT_INLINE X86Mem m8(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, 1); + } + + //! \overload + ASMJIT_INLINE X86Mem m8(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 1); + } + + //! Cast this variable to 16-bit memory operand. + ASMJIT_INLINE X86Mem m16(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, 2); + } + + //! \overload + ASMJIT_INLINE X86Mem m16(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 2); + } + + //! Cast this variable to 32-bit memory operand. + ASMJIT_INLINE X86Mem m32(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, 4); + } + + //! \overload + ASMJIT_INLINE X86Mem m32(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 4); + } + + //! Cast this variable to 64-bit memory operand. + ASMJIT_INLINE X86Mem m64(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, 8); + } + + //! \overload + ASMJIT_INLINE X86Mem m64(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 8); + } + + //! Cast this variable to 80-bit memory operand (long double). + ASMJIT_INLINE X86Mem m80(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, 10); + } + + //! \overload + ASMJIT_INLINE X86Mem m80(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 10); + } + + //! Cast this variable to 128-bit memory operand. + ASMJIT_INLINE X86Mem m128(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, 16); + } + + //! \overload + ASMJIT_INLINE X86Mem m128(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 16); + } + + //! Cast this variable to 256-bit memory operand. + ASMJIT_INLINE X86Mem m256(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, 32); + } + + //! \overload + ASMJIT_INLINE X86Mem m256(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 32); + } + + //! Cast this variable to 256-bit memory operand. + ASMJIT_INLINE X86Mem m512(int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, disp, 64); + } + + //! \overload + ASMJIT_INLINE X86Mem m512(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 64); + } + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86Var& operator=(const X86Var& other) { + _copy(other); + return *this; + } + + ASMJIT_INLINE bool operator==(const X86Var& other) const { + return _packed[0] == other._packed[0]; + } + + ASMJIT_INLINE bool operator!=(const X86Var& other) const { + return _packed[0] != other._packed[0]; + } + + // -------------------------------------------------------------------------- + // [Private] + // -------------------------------------------------------------------------- + +protected: + ASMJIT_INLINE X86Var(const X86Var& other, uint32_t reg, uint32_t size) : Var(NoInit) { + _init_packed_op_sz_w0_id(kOperandTypeVar, size, (reg << 8) + other._vreg.index, other._base.id); + _vreg.vType = other._vreg.vType; + } +}; +#endif // !ASMJIT_DISABLE_COMPILER + +// ============================================================================ +// [asmjit::X86GpVar] +// ============================================================================ + +#if !defined(ASMJIT_DISABLE_COMPILER) +//! Gp variable. +struct X86GpVar : public X86Var { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + +protected: + ASMJIT_INLINE X86GpVar(const X86GpVar& other, uint32_t reg, uint32_t size) + : X86Var(other, reg, size) {} + +public: + //! Create a new uninitialized `X86GpVar` instance. + ASMJIT_INLINE X86GpVar() : X86Var() {} + //! Create a clone of `other`. + ASMJIT_INLINE X86GpVar(const X86GpVar& other) : X86Var(other) {} + //! Create a new uninitialized `X86GpVar` instance (internal). + explicit ASMJIT_INLINE X86GpVar(const _NoInit&) : X86Var(NoInit) {} + + // -------------------------------------------------------------------------- + // [X86GpVar Specific] + // -------------------------------------------------------------------------- + + //! Clone X86GpVar operand. + ASMJIT_INLINE X86GpVar clone() const { return X86GpVar(*this); } + //! Reset X86GpVar operand. + ASMJIT_INLINE void reset() { X86Var::reset(); } + + // -------------------------------------------------------------------------- + // [X86GpVar Cast] + // -------------------------------------------------------------------------- + + //! Cast this variable to the same register type/size as `other`. + //! + //! This function has been designed to help with maintaining code that runs + //! in both 32-bit and 64-bit modes. If you have variables that have mixed + //! types, use `X86GpVar::as()` to cast one type to another. + ASMJIT_INLINE X86GpVar as(const X86GpVar& other) const { + return X86GpVar(*this, other.getRegType(), other.getSize()); + } + + //! Cast this variable to 8-bit (LO) part of variable. + ASMJIT_INLINE X86GpVar r8() const { return X86GpVar(*this, kX86RegTypeGpbLo, 1); } + //! Cast this variable to 8-bit (LO) part of variable. + ASMJIT_INLINE X86GpVar r8Lo() const { return X86GpVar(*this, kX86RegTypeGpbLo, 1); } + //! Cast this variable to 8-bit (HI) part of variable. + ASMJIT_INLINE X86GpVar r8Hi() const { return X86GpVar(*this, kX86RegTypeGpbHi, 1); } + + //! Cast this variable to 16-bit part of variable. + ASMJIT_INLINE X86GpVar r16() const { return X86GpVar(*this, kX86RegTypeGpw, 2); } + //! Cast this variable to 32-bit part of variable. + ASMJIT_INLINE X86GpVar r32() const { return X86GpVar(*this, kX86RegTypeGpd, 4); } + //! Cast this variable to 64-bit part of variable. + ASMJIT_INLINE X86GpVar r64() const { return X86GpVar(*this, kX86RegTypeGpq, 8); } + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86GpVar& operator=(const X86GpVar& other) { _copy(other); return *this; } + + ASMJIT_INLINE bool operator==(const X86GpVar& other) const { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86GpVar& other) const { return X86Var::operator!=(other); } +}; +#endif // !ASMJIT_DISABLE_COMPILER + +// ============================================================================ +// [asmjit::X86MmVar] +// ============================================================================ + +#if !defined(ASMJIT_DISABLE_COMPILER) +//! Mm variable. +struct X86MmVar : public X86Var { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new uninitialized `X86MmVar` instance. + ASMJIT_INLINE X86MmVar() : X86Var() {} + //! Create a clone of `other`. + ASMJIT_INLINE X86MmVar(const X86MmVar& other) : X86Var(other) {} + + //! Create a new uninitialized `X86MmVar` instance (internal). + explicit ASMJIT_INLINE X86MmVar(const _NoInit&) : X86Var(NoInit) {} + + // -------------------------------------------------------------------------- + // [X86MmVar Specific] + // -------------------------------------------------------------------------- + + //! Clone X86MmVar operand. + ASMJIT_INLINE X86MmVar clone() const { return X86MmVar(*this); } + //! Reset X86MmVar operand. + ASMJIT_INLINE void reset() { X86Var::reset(); } + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86MmVar& operator=(const X86MmVar& other) { _copy(other); return *this; } + + ASMJIT_INLINE bool operator==(const X86MmVar& other) const { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86MmVar& other) const { return X86Var::operator!=(other); } +}; +#endif // !ASMJIT_DISABLE_COMPILER + +// ============================================================================ +// [asmjit::X86XmmVar] +// ============================================================================ + +#if !defined(ASMJIT_DISABLE_COMPILER) +//! Xmm variable. +struct X86XmmVar : public X86Var { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + +protected: + ASMJIT_INLINE X86XmmVar(const X86Var& other, uint32_t reg, uint32_t size) + : X86Var(other, reg, size) {} + + friend struct X86YmmVar; + friend struct X86ZmmVar; + +public: + //! Create a new uninitialized `X86XmmVar` instance. + ASMJIT_INLINE X86XmmVar() : X86Var() {} + //! Create a clone of `other`. + ASMJIT_INLINE X86XmmVar(const X86XmmVar& other) : X86Var(other) {} + //! Create a new uninitialized `X86XmmVar` instance (internal). + explicit ASMJIT_INLINE X86XmmVar(const _NoInit&) : X86Var(NoInit) {} + + // -------------------------------------------------------------------------- + // [X86XmmVar Specific] + // -------------------------------------------------------------------------- + + //! Clone X86XmmVar operand. + ASMJIT_INLINE X86XmmVar clone() const { return X86XmmVar(*this); } + //! Reset X86XmmVar operand. + ASMJIT_INLINE void reset() { X86Var::reset(); } + + // -------------------------------------------------------------------------- + // [X86XmmVar Cast] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86XmmVar xmm() const { return X86XmmVar(*this); } + ASMJIT_INLINE X86YmmVar ymm() const; + ASMJIT_INLINE X86ZmmVar zmm() const; + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86XmmVar& operator=(const X86XmmVar& other) { _copy(other); return *this; } + + ASMJIT_INLINE bool operator==(const X86XmmVar& other) const { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86XmmVar& other) const { return X86Var::operator!=(other); } +}; +#endif // !ASMJIT_DISABLE_COMPILER + +// ============================================================================ +// [asmjit::X86YmmVar] +// ============================================================================ + +#if !defined(ASMJIT_DISABLE_COMPILER) +//! Ymm variable. +struct X86YmmVar : public X86Var { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + +protected: + ASMJIT_INLINE X86YmmVar(const X86Var& other, uint32_t reg, uint32_t size) + : X86Var(other, reg, size) {} + + friend struct X86XmmVar; + friend struct X86ZmmVar; + +public: + //! Create a new uninitialized `X86YmmVar` instance. + ASMJIT_INLINE X86YmmVar() : X86Var() {} + //! Create a clone of `other`. + ASMJIT_INLINE X86YmmVar(const X86YmmVar& other) : X86Var(other) {} + //! Create a new uninitialized `X86YmmVar` instance (internal). + explicit ASMJIT_INLINE X86YmmVar(const _NoInit&) : X86Var(NoInit) {} + + // -------------------------------------------------------------------------- + // [X86YmmVar Specific] + // -------------------------------------------------------------------------- + + //! Clone X86YmmVar operand. + ASMJIT_INLINE X86YmmVar clone() const { return X86YmmVar(*this); } + //! Reset X86YmmVar operand. + ASMJIT_INLINE void reset() { X86Var::reset(); } + + // -------------------------------------------------------------------------- + // [X86YmmVar Cast] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86XmmVar xmm() const { return X86XmmVar(*this, kX86RegTypeXmm, 8); } + ASMJIT_INLINE X86YmmVar ymm() const { return X86YmmVar(*this); } + ASMJIT_INLINE X86ZmmVar zmm() const; + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86YmmVar& operator=(const X86YmmVar& other) { _copy(other); return *this; } + + ASMJIT_INLINE bool operator==(const X86YmmVar& other) const { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86YmmVar& other) const { return X86Var::operator!=(other); } +}; + +ASMJIT_INLINE X86YmmVar X86XmmVar::ymm() const { return X86YmmVar(*this, kX86RegTypeYmm, 16); } +#endif // !ASMJIT_DISABLE_COMPILER + +// ============================================================================ +// [asmjit::X86ZmmVar] +// ============================================================================ + +#if !defined(ASMJIT_DISABLE_COMPILER) +//! Zmm variable. +struct X86ZmmVar : public X86Var { + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + +protected: + ASMJIT_INLINE X86ZmmVar(const X86Var& other, uint32_t reg, uint32_t size) + : X86Var(other, reg, size) {} + + friend struct X86XmmVar; + friend struct X86YmmVar; + +public: + //! Create a new uninitialized `X86ZmmVar` instance. + ASMJIT_INLINE X86ZmmVar() : X86Var() {} + //! Create a clone of `other`. + ASMJIT_INLINE X86ZmmVar(const X86ZmmVar& other) : X86Var(other) {} + //! Create a new uninitialized `X86ZmmVar` instance (internal). + explicit ASMJIT_INLINE X86ZmmVar(const _NoInit&) : X86Var(NoInit) {} + + // -------------------------------------------------------------------------- + // [X86ZmmVar Specific] + // -------------------------------------------------------------------------- + + //! Clone X86ZmmVar operand. + ASMJIT_INLINE X86ZmmVar clone() const { return X86ZmmVar(*this); } + //! Reset X86ZmmVar operand. + ASMJIT_INLINE void reset() { X86Var::reset(); } + + // -------------------------------------------------------------------------- + // [X86ZmmVar Cast] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86XmmVar xmm() const { return X86XmmVar(*this, kX86RegTypeYmm, 8); } + ASMJIT_INLINE X86YmmVar ymm() const { return X86YmmVar(*this, kX86RegTypeYmm, 16); } + ASMJIT_INLINE X86ZmmVar zmm() const { return X86ZmmVar(*this); } + + // -------------------------------------------------------------------------- + // [Operator Overload] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE X86ZmmVar& operator=(const X86ZmmVar& other) { _copy(other); return *this; } + + ASMJIT_INLINE bool operator==(const X86ZmmVar& other) const { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86ZmmVar& other) const { return X86Var::operator!=(other); } +}; + +ASMJIT_INLINE X86ZmmVar X86XmmVar::zmm() const { return X86ZmmVar(*this, kX86RegTypeZmm, 32); } +ASMJIT_INLINE X86ZmmVar X86YmmVar::zmm() const { return X86ZmmVar(*this, kX86RegTypeZmm, 32); } +#endif // !ASMJIT_DISABLE_COMPILER #endif // ASMJIT_EXPORTS_X86OPERAND_REGS // ============================================================================ @@ -1603,13 +2139,15 @@ struct X86RegData { X86YmmReg ymm[32]; X86ZmmReg zmm[32]; }; - -ASMJIT_VAR const X86RegData x86RegData; +ASMJIT_VARAPI const X86RegData x86RegData; // ============================================================================ // [asmjit::x86] // ============================================================================ +// This is only defined by `x86operand_regs.cpp` when exporting registers. +#if !defined(ASMJIT_EXPORTS_X86OPERAND_REGS) + namespace x86 { // ============================================================================ @@ -1617,221 +2155,218 @@ namespace x86 { // ============================================================================ #define ASMJIT_DEF_REG(_Type_, _Name_, _Field_) \ - static const _Type_& _Name_ = x86RegData._Field_ + static const _Type_& _Name_ = x86RegData._Field_; -ASMJIT_DEF_REG(X86RipReg, rip, rip); //!< RIP register. -ASMJIT_DEF_REG(X86GpReg , noGpReg, noGp); //!< No GP register (for `X86Mem` operand). +ASMJIT_DEF_REG(X86RipReg, rip, rip) //!< RIP register. +ASMJIT_DEF_REG(X86GpReg , noGpReg, noGp) //!< No GP register (for `X86Mem` operand). -ASMJIT_DEF_REG(X86SegReg, es , seg[1]); //!< Cs segment register. -ASMJIT_DEF_REG(X86SegReg, cs , seg[2]); //!< Ss segment register. -ASMJIT_DEF_REG(X86SegReg, ss , seg[3]); //!< Ds segment register. -ASMJIT_DEF_REG(X86SegReg, ds , seg[4]); //!< Es segment register. -ASMJIT_DEF_REG(X86SegReg, fs , seg[5]); //!< Fs segment register. -ASMJIT_DEF_REG(X86SegReg, gs , seg[6]); //!< Gs segment register. +ASMJIT_DEF_REG(X86SegReg, es , seg[1]) //!< Cs segment register. +ASMJIT_DEF_REG(X86SegReg, cs , seg[2]) //!< Ss segment register. +ASMJIT_DEF_REG(X86SegReg, ss , seg[3]) //!< Ds segment register. +ASMJIT_DEF_REG(X86SegReg, ds , seg[4]) //!< Es segment register. +ASMJIT_DEF_REG(X86SegReg, fs , seg[5]) //!< Fs segment register. +ASMJIT_DEF_REG(X86SegReg, gs , seg[6]) //!< Gs segment register. -ASMJIT_DEF_REG(X86GpReg , al , gpbLo[0]); //!< 8-bit Gpb-lo register. -ASMJIT_DEF_REG(X86GpReg , cl , gpbLo[1]); //!< 8-bit Gpb-lo register. -ASMJIT_DEF_REG(X86GpReg , dl , gpbLo[2]); //!< 8-bit Gpb-lo register. -ASMJIT_DEF_REG(X86GpReg , bl , gpbLo[3]); //!< 8-bit Gpb-lo register. -ASMJIT_DEF_REG(X86GpReg , spl , gpbLo[4]); //!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , bpl , gpbLo[5]); //!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , sil , gpbLo[6]); //!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , dil , gpbLo[7]); //!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , r8b , gpbLo[8]); //!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , r9b , gpbLo[9]); //!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , r10b , gpbLo[10]);//!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , r11b , gpbLo[11]);//!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , r12b , gpbLo[12]);//!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , r13b , gpbLo[13]);//!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , r14b , gpbLo[14]);//!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , r15b , gpbLo[15]);//!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , al , gpbLo[0]) //!< 8-bit Gpb-lo register. +ASMJIT_DEF_REG(X86GpReg , cl , gpbLo[1]) //!< 8-bit Gpb-lo register. +ASMJIT_DEF_REG(X86GpReg , dl , gpbLo[2]) //!< 8-bit Gpb-lo register. +ASMJIT_DEF_REG(X86GpReg , bl , gpbLo[3]) //!< 8-bit Gpb-lo register. +ASMJIT_DEF_REG(X86GpReg , spl , gpbLo[4]) //!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , bpl , gpbLo[5]) //!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , sil , gpbLo[6]) //!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , dil , gpbLo[7]) //!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , r8b , gpbLo[8]) //!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , r9b , gpbLo[9]) //!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , r10b , gpbLo[10])//!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , r11b , gpbLo[11])//!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , r12b , gpbLo[12])//!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , r13b , gpbLo[13])//!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , r14b , gpbLo[14])//!< 8-bit Gpb-lo register (X64). +ASMJIT_DEF_REG(X86GpReg , r15b , gpbLo[15])//!< 8-bit Gpb-lo register (X64). -ASMJIT_DEF_REG(X86GpReg , ah , gpbHi[0]); //!< 8-bit Gpb-hi register. -ASMJIT_DEF_REG(X86GpReg , ch , gpbHi[1]); //!< 8-bit Gpb-hi register. -ASMJIT_DEF_REG(X86GpReg , dh , gpbHi[2]); //!< 8-bit Gpb-hi register. -ASMJIT_DEF_REG(X86GpReg , bh , gpbHi[3]); //!< 8-bit Gpb-hi register. +ASMJIT_DEF_REG(X86GpReg , ah , gpbHi[0]) //!< 8-bit Gpb-hi register. +ASMJIT_DEF_REG(X86GpReg , ch , gpbHi[1]) //!< 8-bit Gpb-hi register. +ASMJIT_DEF_REG(X86GpReg , dh , gpbHi[2]) //!< 8-bit Gpb-hi register. +ASMJIT_DEF_REG(X86GpReg , bh , gpbHi[3]) //!< 8-bit Gpb-hi register. -ASMJIT_DEF_REG(X86GpReg , ax , gpw[0]); //!< 16-bit Gpw register. -ASMJIT_DEF_REG(X86GpReg , cx , gpw[1]); //!< 16-bit Gpw register. -ASMJIT_DEF_REG(X86GpReg , dx , gpw[2]); //!< 16-bit Gpw register. -ASMJIT_DEF_REG(X86GpReg , bx , gpw[3]); //!< 16-bit Gpw register. -ASMJIT_DEF_REG(X86GpReg , sp , gpw[4]); //!< 16-bit Gpw register. -ASMJIT_DEF_REG(X86GpReg , bp , gpw[5]); //!< 16-bit Gpw register. -ASMJIT_DEF_REG(X86GpReg , si , gpw[6]); //!< 16-bit Gpw register. -ASMJIT_DEF_REG(X86GpReg , di , gpw[7]); //!< 16-bit Gpw register. -ASMJIT_DEF_REG(X86GpReg , r8w , gpw[8]); //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , r9w , gpw[9]); //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , r10w , gpw[10]); //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , r11w , gpw[11]); //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , r12w , gpw[12]); //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , r13w , gpw[13]); //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , r14w , gpw[14]); //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , r15w , gpw[15]); //!< 16-bit Gpw register (X64). +ASMJIT_DEF_REG(X86GpReg , ax , gpw[0]) //!< 16-bit Gpw register. +ASMJIT_DEF_REG(X86GpReg , cx , gpw[1]) //!< 16-bit Gpw register. +ASMJIT_DEF_REG(X86GpReg , dx , gpw[2]) //!< 16-bit Gpw register. +ASMJIT_DEF_REG(X86GpReg , bx , gpw[3]) //!< 16-bit Gpw register. +ASMJIT_DEF_REG(X86GpReg , sp , gpw[4]) //!< 16-bit Gpw register. +ASMJIT_DEF_REG(X86GpReg , bp , gpw[5]) //!< 16-bit Gpw register. +ASMJIT_DEF_REG(X86GpReg , si , gpw[6]) //!< 16-bit Gpw register. +ASMJIT_DEF_REG(X86GpReg , di , gpw[7]) //!< 16-bit Gpw register. +ASMJIT_DEF_REG(X86GpReg , r8w , gpw[8]) //!< 16-bit Gpw register (X64). +ASMJIT_DEF_REG(X86GpReg , r9w , gpw[9]) //!< 16-bit Gpw register (X64). +ASMJIT_DEF_REG(X86GpReg , r10w , gpw[10]) //!< 16-bit Gpw register (X64). +ASMJIT_DEF_REG(X86GpReg , r11w , gpw[11]) //!< 16-bit Gpw register (X64). +ASMJIT_DEF_REG(X86GpReg , r12w , gpw[12]) //!< 16-bit Gpw register (X64). +ASMJIT_DEF_REG(X86GpReg , r13w , gpw[13]) //!< 16-bit Gpw register (X64). +ASMJIT_DEF_REG(X86GpReg , r14w , gpw[14]) //!< 16-bit Gpw register (X64). +ASMJIT_DEF_REG(X86GpReg , r15w , gpw[15]) //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , eax , gpd[0]); //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , ecx , gpd[1]); //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , edx , gpd[2]); //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , ebx , gpd[3]); //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , esp , gpd[4]); //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , ebp , gpd[5]); //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , esi , gpd[6]); //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , edi , gpd[7]); //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , r8d , gpd[8]); //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r9d , gpd[9]); //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r10d , gpd[10]); //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r11d , gpd[11]); //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r12d , gpd[12]); //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r13d , gpd[13]); //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r14d , gpd[14]); //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r15d , gpd[15]); //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , eax , gpd[0]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , ecx , gpd[1]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , edx , gpd[2]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , ebx , gpd[3]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , esp , gpd[4]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , ebp , gpd[5]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , esi , gpd[6]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , edi , gpd[7]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , r8d , gpd[8]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r9d , gpd[9]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r10d , gpd[10]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r11d , gpd[11]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r12d , gpd[12]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r13d , gpd[13]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r14d , gpd[14]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r15d , gpd[15]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , rax , gpq[0]); //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , rcx , gpq[1]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , rdx , gpq[2]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , rbx , gpq[3]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , rsp , gpq[4]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , rbp , gpq[5]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , rsi , gpq[6]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , rdi , gpq[7]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , r8 , gpq[8]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , r9 , gpq[9]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , r10 , gpq[10]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , r11 , gpq[11]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , r12 , gpq[12]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , r13 , gpq[13]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , r14 , gpq[14]); //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86GpReg , r15 , gpq[15]); //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , rax , gpq[0]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , rcx , gpq[1]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , rdx , gpq[2]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , rbx , gpq[3]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , rsp , gpq[4]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , rbp , gpq[5]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , rsi , gpq[6]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , rdi , gpq[7]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , r8 , gpq[8]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , r9 , gpq[9]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , r10 , gpq[10]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , r11 , gpq[11]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , r12 , gpq[12]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , r13 , gpq[13]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , r14 , gpq[14]) //!< 64-bit Gpq register (X64) +ASMJIT_DEF_REG(X86GpReg , r15 , gpq[15]) //!< 64-bit Gpq register (X64) -ASMJIT_DEF_REG(X86FpReg , fp0 , fp[0]); //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp1 , fp[1]); //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp2 , fp[2]); //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp3 , fp[3]); //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp4 , fp[4]); //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp5 , fp[5]); //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp6 , fp[6]); //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp7 , fp[7]); //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp0 , fp[0]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp1 , fp[1]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp2 , fp[2]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp3 , fp[3]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp4 , fp[4]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp5 , fp[5]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp6 , fp[6]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp7 , fp[7]) //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86MmReg , mm0 , mm[0]); //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm1 , mm[1]); //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm2 , mm[2]); //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm3 , mm[3]); //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm4 , mm[4]); //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm5 , mm[5]); //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm6 , mm[6]); //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm7 , mm[7]); //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm0 , mm[0]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm1 , mm[1]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm2 , mm[2]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm3 , mm[3]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm4 , mm[4]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm5 , mm[5]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm6 , mm[6]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm7 , mm[7]) //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86KReg , k0 , k[0]); //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k1 , k[1]); //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k2 , k[2]); //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k3 , k[3]); //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k4 , k[4]); //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k5 , k[5]); //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k6 , k[6]); //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k7 , k[7]); //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k0 , k[0]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k1 , k[1]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k2 , k[2]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k3 , k[3]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k4 , k[4]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k5 , k[5]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k6 , k[6]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k7 , k[7]) //!< 64-bit K register. -ASMJIT_DEF_REG(X86XmmReg, xmm0 , xmm[0]); //!< 128-bit Xmm register. -ASMJIT_DEF_REG(X86XmmReg, xmm1 , xmm[1]); //!< 128-bit Xmm register. -ASMJIT_DEF_REG(X86XmmReg, xmm2 , xmm[2]); //!< 128-bit Xmm register. -ASMJIT_DEF_REG(X86XmmReg, xmm3 , xmm[3]); //!< 128-bit Xmm register. -ASMJIT_DEF_REG(X86XmmReg, xmm4 , xmm[4]); //!< 128-bit Xmm register. -ASMJIT_DEF_REG(X86XmmReg, xmm5 , xmm[5]); //!< 128-bit Xmm register. -ASMJIT_DEF_REG(X86XmmReg, xmm6 , xmm[6]); //!< 128-bit Xmm register. -ASMJIT_DEF_REG(X86XmmReg, xmm7 , xmm[7]); //!< 128-bit Xmm register. -ASMJIT_DEF_REG(X86XmmReg, xmm8 , xmm[8]); //!< 128-bit Xmm register (X64). -ASMJIT_DEF_REG(X86XmmReg, xmm9 , xmm[9]); //!< 128-bit Xmm register (X64). -ASMJIT_DEF_REG(X86XmmReg, xmm10, xmm[10]); //!< 128-bit Xmm register (X64). -ASMJIT_DEF_REG(X86XmmReg, xmm11, xmm[11]); //!< 128-bit Xmm register (X64). -ASMJIT_DEF_REG(X86XmmReg, xmm12, xmm[12]); //!< 128-bit Xmm register (X64). -ASMJIT_DEF_REG(X86XmmReg, xmm13, xmm[13]); //!< 128-bit Xmm register (X64). -ASMJIT_DEF_REG(X86XmmReg, xmm14, xmm[14]); //!< 128-bit Xmm register (X64). -ASMJIT_DEF_REG(X86XmmReg, xmm15, xmm[15]); //!< 128-bit Xmm register (X64). -ASMJIT_DEF_REG(X86XmmReg, xmm16, xmm[16]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm17, xmm[17]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm18, xmm[18]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm19, xmm[19]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm20, xmm[20]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm21, xmm[21]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm22, xmm[22]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm23, xmm[23]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm24, xmm[24]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm25, xmm[25]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm26, xmm[26]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm27, xmm[27]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm28, xmm[28]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm29, xmm[29]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm30, xmm[30]); //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86XmmReg, xmm31, xmm[31]); //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm0 , xmm[0]) //!< 128-bit Xmm register. +ASMJIT_DEF_REG(X86XmmReg, xmm1 , xmm[1]) //!< 128-bit Xmm register. +ASMJIT_DEF_REG(X86XmmReg, xmm2 , xmm[2]) //!< 128-bit Xmm register. +ASMJIT_DEF_REG(X86XmmReg, xmm3 , xmm[3]) //!< 128-bit Xmm register. +ASMJIT_DEF_REG(X86XmmReg, xmm4 , xmm[4]) //!< 128-bit Xmm register. +ASMJIT_DEF_REG(X86XmmReg, xmm5 , xmm[5]) //!< 128-bit Xmm register. +ASMJIT_DEF_REG(X86XmmReg, xmm6 , xmm[6]) //!< 128-bit Xmm register. +ASMJIT_DEF_REG(X86XmmReg, xmm7 , xmm[7]) //!< 128-bit Xmm register. +ASMJIT_DEF_REG(X86XmmReg, xmm8 , xmm[8]) //!< 128-bit Xmm register (X64). +ASMJIT_DEF_REG(X86XmmReg, xmm9 , xmm[9]) //!< 128-bit Xmm register (X64). +ASMJIT_DEF_REG(X86XmmReg, xmm10, xmm[10]) //!< 128-bit Xmm register (X64). +ASMJIT_DEF_REG(X86XmmReg, xmm11, xmm[11]) //!< 128-bit Xmm register (X64). +ASMJIT_DEF_REG(X86XmmReg, xmm12, xmm[12]) //!< 128-bit Xmm register (X64). +ASMJIT_DEF_REG(X86XmmReg, xmm13, xmm[13]) //!< 128-bit Xmm register (X64). +ASMJIT_DEF_REG(X86XmmReg, xmm14, xmm[14]) //!< 128-bit Xmm register (X64). +ASMJIT_DEF_REG(X86XmmReg, xmm15, xmm[15]) //!< 128-bit Xmm register (X64). +ASMJIT_DEF_REG(X86XmmReg, xmm16, xmm[16]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm17, xmm[17]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm18, xmm[18]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm19, xmm[19]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm20, xmm[20]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm21, xmm[21]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm22, xmm[22]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm23, xmm[23]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm24, xmm[24]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm25, xmm[25]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm26, xmm[26]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm27, xmm[27]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm28, xmm[28]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm29, xmm[29]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm30, xmm[30]) //!< 128-bit Xmm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86XmmReg, xmm31, xmm[31]) //!< 128-bit Xmm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm0 , ymm[0]); //!< 256-bit Ymm register. -ASMJIT_DEF_REG(X86YmmReg, ymm1 , ymm[1]); //!< 256-bit Ymm register. -ASMJIT_DEF_REG(X86YmmReg, ymm2 , ymm[2]); //!< 256-bit Ymm register. -ASMJIT_DEF_REG(X86YmmReg, ymm3 , ymm[3]); //!< 256-bit Ymm register. -ASMJIT_DEF_REG(X86YmmReg, ymm4 , ymm[4]); //!< 256-bit Ymm register. -ASMJIT_DEF_REG(X86YmmReg, ymm5 , ymm[5]); //!< 256-bit Ymm register. -ASMJIT_DEF_REG(X86YmmReg, ymm6 , ymm[6]); //!< 256-bit Ymm register. -ASMJIT_DEF_REG(X86YmmReg, ymm7 , ymm[7]); //!< 256-bit Ymm register. -ASMJIT_DEF_REG(X86YmmReg, ymm8 , ymm[8]); //!< 256-bit Ymm register (X64). -ASMJIT_DEF_REG(X86YmmReg, ymm9 , ymm[9]); //!< 256-bit Ymm register (X64). -ASMJIT_DEF_REG(X86YmmReg, ymm10, ymm[10]); //!< 256-bit Ymm register (X64). -ASMJIT_DEF_REG(X86YmmReg, ymm11, ymm[11]); //!< 256-bit Ymm register (X64). -ASMJIT_DEF_REG(X86YmmReg, ymm12, ymm[12]); //!< 256-bit Ymm register (X64). -ASMJIT_DEF_REG(X86YmmReg, ymm13, ymm[13]); //!< 256-bit Ymm register (X64). -ASMJIT_DEF_REG(X86YmmReg, ymm14, ymm[14]); //!< 256-bit Ymm register (X64). -ASMJIT_DEF_REG(X86YmmReg, ymm15, ymm[15]); //!< 256-bit Ymm register (X64). -ASMJIT_DEF_REG(X86YmmReg, ymm16, ymm[16]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm17, ymm[17]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm18, ymm[18]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm19, ymm[19]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm20, ymm[20]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm21, ymm[21]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm22, ymm[22]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm23, ymm[23]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm24, ymm[24]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm25, ymm[25]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm26, ymm[26]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm27, ymm[27]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm28, ymm[28]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm29, ymm[29]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm30, ymm[30]); //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86YmmReg, ymm31, ymm[31]); //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm0 , ymm[0]) //!< 256-bit Ymm register. +ASMJIT_DEF_REG(X86YmmReg, ymm1 , ymm[1]) //!< 256-bit Ymm register. +ASMJIT_DEF_REG(X86YmmReg, ymm2 , ymm[2]) //!< 256-bit Ymm register. +ASMJIT_DEF_REG(X86YmmReg, ymm3 , ymm[3]) //!< 256-bit Ymm register. +ASMJIT_DEF_REG(X86YmmReg, ymm4 , ymm[4]) //!< 256-bit Ymm register. +ASMJIT_DEF_REG(X86YmmReg, ymm5 , ymm[5]) //!< 256-bit Ymm register. +ASMJIT_DEF_REG(X86YmmReg, ymm6 , ymm[6]) //!< 256-bit Ymm register. +ASMJIT_DEF_REG(X86YmmReg, ymm7 , ymm[7]) //!< 256-bit Ymm register. +ASMJIT_DEF_REG(X86YmmReg, ymm8 , ymm[8]) //!< 256-bit Ymm register (X64). +ASMJIT_DEF_REG(X86YmmReg, ymm9 , ymm[9]) //!< 256-bit Ymm register (X64). +ASMJIT_DEF_REG(X86YmmReg, ymm10, ymm[10]) //!< 256-bit Ymm register (X64). +ASMJIT_DEF_REG(X86YmmReg, ymm11, ymm[11]) //!< 256-bit Ymm register (X64). +ASMJIT_DEF_REG(X86YmmReg, ymm12, ymm[12]) //!< 256-bit Ymm register (X64). +ASMJIT_DEF_REG(X86YmmReg, ymm13, ymm[13]) //!< 256-bit Ymm register (X64). +ASMJIT_DEF_REG(X86YmmReg, ymm14, ymm[14]) //!< 256-bit Ymm register (X64). +ASMJIT_DEF_REG(X86YmmReg, ymm15, ymm[15]) //!< 256-bit Ymm register (X64). +ASMJIT_DEF_REG(X86YmmReg, ymm16, ymm[16]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm17, ymm[17]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm18, ymm[18]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm19, ymm[19]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm20, ymm[20]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm21, ymm[21]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm22, ymm[22]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm23, ymm[23]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm24, ymm[24]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm25, ymm[25]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm26, ymm[26]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm27, ymm[27]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm28, ymm[28]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm29, ymm[29]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm30, ymm[30]) //!< 256-bit Ymm register (X64 & AVX512VL+). +ASMJIT_DEF_REG(X86YmmReg, ymm31, ymm[31]) //!< 256-bit Ymm register (X64 & AVX512VL+). -ASMJIT_DEF_REG(X86ZmmReg, zmm0 , zmm[0]); //!< 512-bit Zmm register. -ASMJIT_DEF_REG(X86ZmmReg, zmm1 , zmm[1]); //!< 512-bit Zmm register. -ASMJIT_DEF_REG(X86ZmmReg, zmm2 , zmm[2]); //!< 512-bit Zmm register. -ASMJIT_DEF_REG(X86ZmmReg, zmm3 , zmm[3]); //!< 512-bit Zmm register. -ASMJIT_DEF_REG(X86ZmmReg, zmm4 , zmm[4]); //!< 512-bit Zmm register. -ASMJIT_DEF_REG(X86ZmmReg, zmm5 , zmm[5]); //!< 512-bit Zmm register. -ASMJIT_DEF_REG(X86ZmmReg, zmm6 , zmm[6]); //!< 512-bit Zmm register. -ASMJIT_DEF_REG(X86ZmmReg, zmm7 , zmm[7]); //!< 512-bit Zmm register. -ASMJIT_DEF_REG(X86ZmmReg, zmm8 , zmm[8]); //!< 512-bit Zmm register (X64). -ASMJIT_DEF_REG(X86ZmmReg, zmm9 , zmm[9]); //!< 512-bit Zmm register (X64). -ASMJIT_DEF_REG(X86ZmmReg, zmm10, zmm[10]); //!< 512-bit Zmm register (X64). -ASMJIT_DEF_REG(X86ZmmReg, zmm11, zmm[11]); //!< 512-bit Zmm register (X64). -ASMJIT_DEF_REG(X86ZmmReg, zmm12, zmm[12]); //!< 512-bit Zmm register (X64). -ASMJIT_DEF_REG(X86ZmmReg, zmm13, zmm[13]); //!< 512-bit Zmm register (X64). -ASMJIT_DEF_REG(X86ZmmReg, zmm14, zmm[14]); //!< 512-bit Zmm register (X64). -ASMJIT_DEF_REG(X86ZmmReg, zmm15, zmm[15]); //!< 512-bit Zmm register (X64). -ASMJIT_DEF_REG(X86ZmmReg, zmm16, zmm[16]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm17, zmm[17]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm18, zmm[18]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm19, zmm[19]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm20, zmm[20]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm21, zmm[21]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm22, zmm[22]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm23, zmm[23]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm24, zmm[24]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm25, zmm[25]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm26, zmm[26]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm27, zmm[27]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm28, zmm[28]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm29, zmm[29]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm30, zmm[30]); //!< 512-bit Zmm register (X64 & AVX512+). -ASMJIT_DEF_REG(X86ZmmReg, zmm31, zmm[31]); //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm0 , zmm[0]) //!< 512-bit Zmm register. +ASMJIT_DEF_REG(X86ZmmReg, zmm1 , zmm[1]) //!< 512-bit Zmm register. +ASMJIT_DEF_REG(X86ZmmReg, zmm2 , zmm[2]) //!< 512-bit Zmm register. +ASMJIT_DEF_REG(X86ZmmReg, zmm3 , zmm[3]) //!< 512-bit Zmm register. +ASMJIT_DEF_REG(X86ZmmReg, zmm4 , zmm[4]) //!< 512-bit Zmm register. +ASMJIT_DEF_REG(X86ZmmReg, zmm5 , zmm[5]) //!< 512-bit Zmm register. +ASMJIT_DEF_REG(X86ZmmReg, zmm6 , zmm[6]) //!< 512-bit Zmm register. +ASMJIT_DEF_REG(X86ZmmReg, zmm7 , zmm[7]) //!< 512-bit Zmm register. +ASMJIT_DEF_REG(X86ZmmReg, zmm8 , zmm[8]) //!< 512-bit Zmm register (X64). +ASMJIT_DEF_REG(X86ZmmReg, zmm9 , zmm[9]) //!< 512-bit Zmm register (X64). +ASMJIT_DEF_REG(X86ZmmReg, zmm10, zmm[10]) //!< 512-bit Zmm register (X64). +ASMJIT_DEF_REG(X86ZmmReg, zmm11, zmm[11]) //!< 512-bit Zmm register (X64). +ASMJIT_DEF_REG(X86ZmmReg, zmm12, zmm[12]) //!< 512-bit Zmm register (X64). +ASMJIT_DEF_REG(X86ZmmReg, zmm13, zmm[13]) //!< 512-bit Zmm register (X64). +ASMJIT_DEF_REG(X86ZmmReg, zmm14, zmm[14]) //!< 512-bit Zmm register (X64). +ASMJIT_DEF_REG(X86ZmmReg, zmm15, zmm[15]) //!< 512-bit Zmm register (X64). +ASMJIT_DEF_REG(X86ZmmReg, zmm16, zmm[16]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm17, zmm[17]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm18, zmm[18]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm19, zmm[19]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm20, zmm[20]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm21, zmm[21]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm22, zmm[22]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm23, zmm[23]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm24, zmm[24]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm25, zmm[25]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm26, zmm[26]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm27, zmm[27]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm28, zmm[28]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm29, zmm[29]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm30, zmm[30]) //!< 512-bit Zmm register (X64 & AVX512+). +ASMJIT_DEF_REG(X86ZmmReg, zmm31, zmm[31]) //!< 512-bit Zmm register (X64 & AVX512+). #undef ASMJIT_DEF_REG -// This is only defined by `x86operand_regs.cpp` when exporting registers. -#if !defined(ASMJIT_EXPORTS_X86OPERAND_REGS) - //! Create 8-bit Gpb-lo register operand. static ASMJIT_INLINE X86GpReg gpb_lo(uint32_t index) { return X86GpReg(kX86RegTypeGpbLo, index, 1); } //! Create 8-bit Gpb-hi register operand. @@ -2025,12 +2560,13 @@ ASMJIT_EXPAND_PTR_VAR(oword, 16) ASMJIT_EXPAND_PTR_VAR(yword, 32) ASMJIT_EXPAND_PTR_VAR(zword, 64) #undef ASMJIT_EXPAND_PTR_VAR + #endif // !ASMJIT_DISABLE_COMPILER -#endif // !ASMJIT_EXPORTS_X86OPERAND_REGS - } // x86 namespace +#endif // !ASMJIT_EXPORTS_X86OPERAND_REGS + //! \} } // asmjit namespace diff --git a/src/asmjit/x86/x86operand_regs.cpp b/src/asmjit/x86/x86operand_regs.cpp index cc649e9..4bcd327 100644 --- a/src/asmjit/x86/x86operand_regs.cpp +++ b/src/asmjit/x86/x86operand_regs.cpp @@ -20,9 +20,9 @@ namespace asmjit { -#define REG(_Type_, _Index_, _Size_) {{ \ +#define REG(_Type_, _Index_, _Size_) {{{ \ kOperandTypeReg, _Size_, { ((_Type_) << 8) + _Index_ }, kInvalidValue, {{ kInvalidVar, 0 }} \ -}} +}}} const X86RegData x86RegData = { // RIP. diff --git a/src/asmjit/x86/x86scheduler.cpp b/src/asmjit/x86/x86scheduler.cpp index 10a2332..fb93e87 100644 --- a/src/asmjit/x86/x86scheduler.cpp +++ b/src/asmjit/x86/x86scheduler.cpp @@ -47,9 +47,9 @@ struct X86ScheduleData { uint16_t reserved; //! All instructions that this instruction depends on. - PodList::Link* dependsOn; + PodList::Link* dependsOn; //! All instructions that use the result of this instruction. - PodList::Link* usedBy; + PodList::Link* usedBy; }; // ============================================================================ @@ -65,18 +65,18 @@ X86Scheduler::~X86Scheduler() {} // [asmjit::X86Scheduler - Run] // ============================================================================ -Error X86Scheduler::run(Node* start, Node* stop) { +Error X86Scheduler::run(HLNode* start, HLNode* stop) { /* ASMJIT_TLOG("[Schedule] === Begin ==="); - Zone zone(8096 - kZoneOverhead); - Node* node_ = start; + Zone zone(8096 - Zone::kZoneOverhead); + HLNode* node_ = start; while (node_ != stop) { - Node* next = node_->getNext(); - ASMJIT_ASSERT(node_->getType() == kNodeTypeInst); + HLNode* next = node_->getNext(); + ASMJIT_ASSERT(node_->getType() == kHLNodeTypeInst); - printf(" %s\n", X86Util::getInstInfo(static_cast(node_)->getInstId()).getInstName()); + printf(" %s\n", X86Util::getInstInfo(static_cast(node_)->getInstId()).getInstName()); node_ = next; } diff --git a/src/asmjit/x86/x86scheduler_p.h b/src/asmjit/x86/x86scheduler_p.h index 1afeb90..f5b4930 100644 --- a/src/asmjit/x86/x86scheduler_p.h +++ b/src/asmjit/x86/x86scheduler_p.h @@ -13,7 +13,7 @@ // [Dependencies - AsmJit] #include "../x86/x86compiler.h" -#include "../x86/x86context_p.h" +#include "../x86/x86compilercontext_p.h" #include "../x86/x86cpuinfo.h" #include "../x86/x86inst.h" @@ -41,7 +41,7 @@ struct X86Scheduler { // [Run] // -------------------------------------------------------------------------- - Error run(Node* start, Node* stop); + Error run(HLNode* start, HLNode* stop); // -------------------------------------------------------------------------- // [Members] diff --git a/src/test/asmjit_bench_x86.cpp b/src/test/asmjit_bench_x86.cpp index c33b53f..d5ed2c9 100644 --- a/src/test/asmjit_bench_x86.cpp +++ b/src/test/asmjit_bench_x86.cpp @@ -22,7 +22,7 @@ struct Performance { static inline uint32_t now() { - return asmjit::CpuTicks::now(); + return asmjit::Utils::getTickCount(); } inline void reset() { @@ -67,7 +67,7 @@ int main(int argc, char* argv[]) { JitRuntime runtime; X86Assembler a(&runtime); - X86Compiler c(&runtime); + X86Compiler c; uint32_t r, i; @@ -100,12 +100,12 @@ int main(int argc, char* argv[]) { for (r = 0; r < kNumRepeats; r++) { perf.start(); for (i = 0; i < kNumIterations; i++) { + c.attach(&a); asmgen::blend(c); + c.finalize(); - void* p = c.make(); + void* p = a.make(); runtime.release(p); - - c.reset(); } perf.end(); } diff --git a/src/test/asmjit_test_unit.cpp b/src/test/asmjit_test_unit.cpp index 32626d3..af1db85 100644 --- a/src/test/asmjit_test_unit.cpp +++ b/src/test/asmjit_test_unit.cpp @@ -6,6 +6,8 @@ // [Dependencies - AsmJit] #include "../asmjit/asmjit.h" +#include "../asmjit/base/compilercontext_p.h" +#include "../asmjit/x86/x86compilercontext_p.h" // ============================================================================ // [DumpCpu] @@ -38,7 +40,7 @@ static void dumpCpu(void) { // [X86] // -------------------------------------------------------------------------- -#if defined(ASMJIT_ARCH_X86) || defined(ASMJIT_ARCH_X64) +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 const asmjit::X86CpuInfo* x86Cpu = static_cast(cpu); static const DumpCpuFeature x86FeaturesList[] = { @@ -50,12 +52,12 @@ static void dumpCpu(void) { { asmjit::kX86CpuFeatureCMPXCHG8B , "CMPXCHG8B" }, { asmjit::kX86CpuFeatureCMPXCHG16B , "CMPXCHG16B" }, { asmjit::kX86CpuFeatureCLFLUSH , "CLFLUSH" }, - { asmjit::kX86CpuFeatureCLFLUSHOpt , "CLFLUSH (Opt)" }, + { asmjit::kX86CpuFeatureCLFLUSH_OPT , "CLFLUSH (Opt)" }, { asmjit::kX86CpuFeaturePREFETCH , "PREFETCH" }, { asmjit::kX86CpuFeaturePREFETCHWT1 , "PREFETCHWT1" }, { asmjit::kX86CpuFeatureLahfSahf , "LAHF/SAHF" }, { asmjit::kX86CpuFeatureFXSR , "FXSR" }, - { asmjit::kX86CpuFeatureFXSROpt , "FXSR (Opt)" }, + { asmjit::kX86CpuFeatureFXSR_OPT , "FXSR (Opt)" }, { asmjit::kX86CpuFeatureMMX , "MMX" }, { asmjit::kX86CpuFeatureMMX2 , "MMX2" }, { asmjit::kX86CpuFeature3DNOW , "3DNOW" }, @@ -77,8 +79,8 @@ static void dumpCpu(void) { { asmjit::kX86CpuFeatureRDRAND , "RDRAND" }, { asmjit::kX86CpuFeatureRDSEED , "RDSEED" }, { asmjit::kX86CpuFeatureSHA , "SHA" }, - { asmjit::kX86CpuFeatureXSave , "XSAVE" }, - { asmjit::kX86CpuFeatureXSaveOS , "XSAVE (OS)" }, + { asmjit::kX86CpuFeatureXSAVE , "XSAVE" }, + { asmjit::kX86CpuFeatureXSAVE_OS , "XSAVE (OS)" }, { asmjit::kX86CpuFeatureAVX , "AVX" }, { asmjit::kX86CpuFeatureAVX2 , "AVX2" }, { asmjit::kX86CpuFeatureF16C , "F16C" }, @@ -91,8 +93,8 @@ static void dumpCpu(void) { { asmjit::kX86CpuFeatureRTM , "RTM" }, { asmjit::kX86CpuFeatureADX , "ADX" }, { asmjit::kX86CpuFeatureMPX , "MPX" }, - { asmjit::kX86CpuFeatureFSGSBase , "FS/GS Base" }, - { asmjit::kX86CpuFeatureMOVSBSTOSBOpt , "REP MOVSB/STOSB (Opt)" }, + { asmjit::kX86CpuFeatureFSGSBASE , "FS/GS Base" }, + { asmjit::kX86CpuFeatureMOVSBSTOSB_OPT, "REP MOVSB/STOSB (Opt)" }, { asmjit::kX86CpuFeatureAVX512F , "AVX512F" }, { asmjit::kX86CpuFeatureAVX512CD , "AVX512CD" }, { asmjit::kX86CpuFeatureAVX512PF , "AVX512PF" }, @@ -138,7 +140,6 @@ static void dumpSizeOf(void) { INFO(""); INFO("SizeOf Base:"); - DUMP_TYPE(asmjit::CodeGen); DUMP_TYPE(asmjit::ConstPool); DUMP_TYPE(asmjit::Runtime); DUMP_TYPE(asmjit::Zone); @@ -164,23 +165,23 @@ static void dumpSizeOf(void) { #if !defined(ASMJIT_DISABLE_COMPILER) INFO("SizeOf Compiler:"); DUMP_TYPE(asmjit::Compiler); - DUMP_TYPE(asmjit::Node); - DUMP_TYPE(asmjit::AlignNode); - DUMP_TYPE(asmjit::CallNode); - DUMP_TYPE(asmjit::CommentNode); - DUMP_TYPE(asmjit::EmbedNode); - DUMP_TYPE(asmjit::FuncNode); - DUMP_TYPE(asmjit::EndNode); - DUMP_TYPE(asmjit::InstNode); - DUMP_TYPE(asmjit::JumpNode); - DUMP_TYPE(asmjit::TargetNode); + DUMP_TYPE(asmjit::VarMap); + DUMP_TYPE(asmjit::VarAttr); + DUMP_TYPE(asmjit::VarData); + DUMP_TYPE(asmjit::VarState); + DUMP_TYPE(asmjit::HLNode); + DUMP_TYPE(asmjit::HLInst); + DUMP_TYPE(asmjit::HLJump); + DUMP_TYPE(asmjit::HLData); + DUMP_TYPE(asmjit::HLAlign); + DUMP_TYPE(asmjit::HLLabel); + DUMP_TYPE(asmjit::HLComment); + DUMP_TYPE(asmjit::HLSentinel); + DUMP_TYPE(asmjit::HLFunc); + DUMP_TYPE(asmjit::HLCall); DUMP_TYPE(asmjit::FuncDecl); DUMP_TYPE(asmjit::FuncInOut); DUMP_TYPE(asmjit::FuncPrototype); - DUMP_TYPE(asmjit::VarAttr); - DUMP_TYPE(asmjit::VarData); - DUMP_TYPE(asmjit::VarMap); - DUMP_TYPE(asmjit::VarState); INFO(""); #endif // !ASMJIT_DISABLE_COMPILER @@ -196,12 +197,12 @@ static void dumpSizeOf(void) { #if !defined(ASMJIT_DISABLE_COMPILER) DUMP_TYPE(asmjit::X86Compiler); - DUMP_TYPE(asmjit::X86CallNode); - DUMP_TYPE(asmjit::X86FuncNode); - DUMP_TYPE(asmjit::X86FuncDecl); DUMP_TYPE(asmjit::X86VarMap); DUMP_TYPE(asmjit::X86VarInfo); DUMP_TYPE(asmjit::X86VarState); + DUMP_TYPE(asmjit::X86CallNode); + DUMP_TYPE(asmjit::X86FuncNode); + DUMP_TYPE(asmjit::X86FuncDecl); #endif // !ASMJIT_DISABLE_COMPILER INFO(""); diff --git a/src/test/asmjit_test_x86.cpp b/src/test/asmjit_test_x86.cpp index 5a330c8..55ccb38 100644 --- a/src/test/asmjit_test_x86.cpp +++ b/src/test/asmjit_test_x86.cpp @@ -40,50 +40,43 @@ struct X86Test { // ============================================================================ struct X86Test_AlignBase : public X86Test { - X86Test_AlignBase(uint32_t argCount, uint32_t varCount, bool naked, bool pushPop) : - _argCount(argCount), - _varCount(varCount), - _naked(naked), - _pushPop(pushPop) { + X86Test_AlignBase(uint32_t numArgs, uint32_t numVars, bool naked) : + _numArgs(numArgs), + _numVars(numVars), + _naked(naked) { - _name.setFormat("[Align] Args=%u Vars=%u Naked=%c PushPop=%c", - argCount, - varCount, - naked ? 'Y' : 'N', - pushPop ? 'Y' : 'N'); + _name.setFormat("[Align] NumArgs=%u NumVars=%u Naked=%c", + numArgs, numVars, naked ? 'Y' : 'N'); } static void add(PodVector& tests) { for (unsigned int i = 0; i <= 6; i++) { for (unsigned int j = 0; j <= 4; j++) { - tests.append(new X86Test_AlignBase(i, j, false, false)); - tests.append(new X86Test_AlignBase(i, j, false, true )); - tests.append(new X86Test_AlignBase(i, j, true , false)); - tests.append(new X86Test_AlignBase(i, j, true , true )); + tests.append(new X86Test_AlignBase(i, j, false)); + tests.append(new X86Test_AlignBase(i, j, true)); } } } virtual void compile(X86Compiler& c) { - switch (_argCount) { - case 0: c.addFunc(kFuncConvHost, FuncBuilder0()); break; - case 1: c.addFunc(kFuncConvHost, FuncBuilder1()); break; - case 2: c.addFunc(kFuncConvHost, FuncBuilder2()); break; - case 3: c.addFunc(kFuncConvHost, FuncBuilder3()); break; - case 4: c.addFunc(kFuncConvHost, FuncBuilder4()); break; - case 5: c.addFunc(kFuncConvHost, FuncBuilder5()); break; - case 6: c.addFunc(kFuncConvHost, FuncBuilder6()); break; + switch (_numArgs) { + case 0: c.addFunc(FuncBuilder0(kCallConvHost)); break; + case 1: c.addFunc(FuncBuilder1(kCallConvHost)); break; + case 2: c.addFunc(FuncBuilder2(kCallConvHost)); break; + case 3: c.addFunc(FuncBuilder3(kCallConvHost)); break; + case 4: c.addFunc(FuncBuilder4(kCallConvHost)); break; + case 5: c.addFunc(FuncBuilder5(kCallConvHost)); break; + case 6: c.addFunc(FuncBuilder6(kCallConvHost)); break; } c.getFunc()->setHint(kFuncHintNaked, _naked); - c.getFunc()->setHint(kX86FuncHintPushPop, _pushPop); - X86GpVar gpVar(c, kVarTypeIntPtr); - X86GpVar gpSum(c, kVarTypeInt32); - X86XmmVar xmmVar(c, kX86VarTypeXmm); + X86GpVar gpVar = c.newIntPtr("gpVar"); + X86GpVar gpSum = c.newInt32("gpSum"); + X86XmmVar xmmVar = c.newXmm("xmmVar"); // Alloc, use and spill preserved registers. - if (_varCount) { + if (_numVars) { uint32_t gpCount = c.getRegCount().getGp(); uint32_t varIndex = 0; uint32_t regIndex = 0; @@ -92,7 +85,7 @@ struct X86Test_AlignBase : public X86Test { do { if ((preservedMask & regMask) != 0 && (regIndex != kX86RegIndexSp && regIndex != kX86RegIndexBp)) { - X86GpVar tmp(c, kVarTypeInt32); + X86GpVar tmp = c.newInt32("tmp"); c.alloc(tmp, regIndex); c.xor_(tmp, tmp); c.spill(tmp); @@ -101,14 +94,14 @@ struct X86Test_AlignBase : public X86Test { regIndex++; regMask <<= 1; - } while (varIndex < _varCount && regIndex < gpCount); + } while (varIndex < _numVars && regIndex < gpCount); } // Do a sum of arguments to verify possible relocation when misaligned. - if (_argCount) { + if (_numArgs) { c.xor_(gpSum, gpSum); - for (uint32_t argIndex = 0; argIndex < _argCount; argIndex++) { - X86GpVar gpArg(c, kVarTypeInt32); + for (uint32_t argIndex = 0; argIndex < _numArgs; argIndex++) { + X86GpVar gpArg = c.newInt32("gpArg%u", argIndex); c.setArg(argIndex, gpArg); c.add(gpSum, gpArg); @@ -120,7 +113,7 @@ struct X86Test_AlignBase : public X86Test { c.shl(gpVar.r32(), 28); // Add a sum of arguments to check whether they are correct. - if (_argCount) + if (_numArgs) c.or_(gpVar.r32(), gpSum); c.ret(gpVar); @@ -139,7 +132,7 @@ struct X86Test_AlignBase : public X86Test { unsigned int resultRet = 0; unsigned int expectRet = 0; - switch (_argCount) { + switch (_numArgs) { case 0: resultRet = asmjit_cast(_func)(); expectRet = 0; @@ -176,11 +169,10 @@ struct X86Test_AlignBase : public X86Test { return resultRet == expectRet; } - unsigned int _argCount; - unsigned int _varCount; + unsigned int _numArgs; + unsigned int _numVars; bool _naked; - bool _pushPop; }; // ============================================================================ @@ -195,7 +187,7 @@ struct X86Test_JumpCross : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); Label L_1(c); Label L_2(c); @@ -236,7 +228,7 @@ struct X86Test_JumpUnreachable1 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); Label L_1(c); Label L_2(c); @@ -246,8 +238,8 @@ struct X86Test_JumpUnreachable1 : public X86Test { Label L_6(c); Label L_7(c); - X86GpVar v0(c, kVarTypeUInt32, "v0"); - X86GpVar v1(c, kVarTypeUInt32, "v1"); + X86GpVar v0 = c.newUInt32("v0"); + X86GpVar v1 = c.newUInt32("v1"); c.bind(L_2); c.bind(L_3); @@ -298,13 +290,13 @@ struct X86Test_JumpUnreachable2 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); Label L_1(c); Label L_2(c); - X86GpVar v0(c, kVarTypeUInt32, "v0"); - X86GpVar v1(c, kVarTypeUInt32, "v1"); + X86GpVar v0 = c.newUInt32("v0"); + X86GpVar v1 = c.newUInt32("v1"); c.jmp(L_1); c.bind(L_2); @@ -344,13 +336,13 @@ struct X86Test_AllocBase : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); - X86GpVar v0(c, kVarTypeInt32, "v0"); - X86GpVar v1(c, kVarTypeInt32, "v1"); - X86GpVar v2(c, kVarTypeInt32, "v2"); - X86GpVar v3(c, kVarTypeInt32, "v3"); - X86GpVar v4(c, kVarTypeInt32, "v4"); + X86GpVar v0 = c.newInt32("v0"); + X86GpVar v1 = c.newInt32("v1"); + X86GpVar v2 = c.newInt32("v2"); + X86GpVar v3 = c.newInt32("v3"); + X86GpVar v4 = c.newInt32("v4"); c.xor_(v0, v0); @@ -394,11 +386,11 @@ struct X86Test_AllocManual : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); - X86GpVar v0(c, kVarTypeInt32, "v0"); - X86GpVar v1(c, kVarTypeInt32, "v1"); - X86GpVar cnt(c, kVarTypeInt32, "cnt"); + X86GpVar v0 = c.newInt32("v0"); + X86GpVar v1 = c.newInt32("v1"); + X86GpVar cnt = c.newInt32("cnt"); c.xor_(v0, v0); c.xor_(v1, v1); @@ -449,13 +441,13 @@ struct X86Test_AllocUseMem : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar iIdx(c, kVarTypeInt32); - X86GpVar iEnd(c, kVarTypeInt32); + X86GpVar iIdx = c.newInt32("iIdx"); + X86GpVar iEnd = c.newInt32("iEnd"); - X86GpVar aIdx(c, kVarTypeInt32); - X86GpVar aEnd(c, kVarTypeInt32); + X86GpVar aIdx = c.newInt32("aIdx"); + X86GpVar aEnd = c.newInt32("aEnd"); Label L_1(c); @@ -503,21 +495,21 @@ struct X86Test_AllocMany1 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar a0(c, kVarTypeIntPtr, "a0"); - X86GpVar a1(c, kVarTypeIntPtr, "a1"); + X86GpVar a0 = c.newIntPtr("a0"); + X86GpVar a1 = c.newIntPtr("a1"); c.setArg(0, a0); c.setArg(1, a1); // Create some variables. - X86GpVar t(c, kVarTypeInt32); + X86GpVar t = c.newInt32("t"); X86GpVar x[kCount]; uint32_t i; for (i = 0; i < kCount; i++) { - x[i] = c.newGpVar(kVarTypeInt32); + x[i] = c.newInt32("x%u", i); } // Setup variables (use mov with reg/imm to se if register allocator works). @@ -580,23 +572,23 @@ struct X86Test_AllocMany2 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder1()); + c.addFunc(FuncBuilder1(kCallConvHost)); X86GpVar var[32]; - X86GpVar a(c, kVarTypeIntPtr, "a"); + X86GpVar a = c.newIntPtr("a"); c.setArg(0, a); int i; for (i = 0; i < ASMJIT_ARRAY_SIZE(var); i++) { - var[i] = c.newGpVar(kVarTypeInt32); + var[i] = c.newInt32("var[%d]", i); } for (i = 0; i < ASMJIT_ARRAY_SIZE(var); i++) { c.xor_(var[i], var[i]); } - X86GpVar v0(c, kVarTypeInt32); + X86GpVar v0 = c.newInt32("v0"); Label L(c); c.mov(v0, 32); @@ -654,14 +646,14 @@ struct X86Test_AllocImul1 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder4()); + c.addFunc(FuncBuilder4(kCallConvHost)); - X86GpVar dstHi(c, kVarTypeIntPtr, "dstHi"); - X86GpVar dstLo(c, kVarTypeIntPtr, "dstLo"); + X86GpVar dstHi = c.newIntPtr("dstHi"); + X86GpVar dstLo = c.newIntPtr("dstLo"); - X86GpVar vHi(c, kVarTypeInt32, "vHi"); - X86GpVar vLo(c, kVarTypeInt32, "vLo"); - X86GpVar src(c, kVarTypeInt32, "src"); + X86GpVar vHi = c.newInt32("vHi"); + X86GpVar vLo = c.newInt32("vLo"); + X86GpVar src = c.newInt32("src"); c.setArg(0, dstHi); c.setArg(1, dstLo); @@ -709,18 +701,18 @@ struct X86Test_AllocImul2 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar dst(c, kVarTypeIntPtr, "dst"); - X86GpVar src(c, kVarTypeIntPtr, "src"); + X86GpVar dst = c.newIntPtr("dst"); + X86GpVar src = c.newIntPtr("src"); c.setArg(0, dst); c.setArg(1, src); for (unsigned int i = 0; i < 4; i++) { - X86GpVar x(c, kVarTypeInt32, "x"); - X86GpVar y(c, kVarTypeInt32, "y"); - X86GpVar hi(c, kVarTypeInt32, "hi"); + X86GpVar x = c.newInt32("x"); + X86GpVar y = c.newInt32("y"); + X86GpVar hi = c.newInt32("hi"); c.mov(x, x86::dword_ptr(src, 0)); c.mov(y, x86::dword_ptr(src, 4)); @@ -762,11 +754,11 @@ struct X86Test_AllocIdiv1 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar a(c, kVarTypeIntPtr, "a"); - X86GpVar b(c, kVarTypeIntPtr, "b"); - X86GpVar dummy(c, kVarTypeInt32, "dummy"); + X86GpVar a = c.newIntPtr("a"); + X86GpVar b = c.newIntPtr("b"); + X86GpVar dummy = c.newInt32("dummy"); c.setArg(0, a); c.setArg(1, b); @@ -807,11 +799,11 @@ struct X86Test_AllocSetz : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); - X86GpVar src0(c, kVarTypeInt32, "src0"); - X86GpVar src1(c, kVarTypeInt32, "src1"); - X86GpVar dst0(c, kVarTypeIntPtr, "dst0"); + X86GpVar src0 = c.newInt32("src0"); + X86GpVar src1 = c.newInt32("src1"); + X86GpVar dst0 = c.newIntPtr("dst0"); c.setArg(0, src0); c.setArg(1, src1); @@ -857,12 +849,12 @@ struct X86Test_AllocShlRor : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder4()); + c.addFunc(FuncBuilder4(kCallConvHost)); - X86GpVar dst(c, kVarTypeIntPtr, "dst"); - X86GpVar var(c, kVarTypeInt32, "var"); - X86GpVar vShlParam(c, kVarTypeInt32, "vShlParam"); - X86GpVar vRorParam(c, kVarTypeInt32, "vRorParam"); + X86GpVar dst = c.newIntPtr("dst"); + X86GpVar var = c.newInt32("var"); + X86GpVar vShlParam = c.newInt32("vShlParam"); + X86GpVar vRorParam = c.newInt32("vRorParam"); c.setArg(0, dst); c.setArg(1, var); @@ -908,10 +900,10 @@ struct X86Test_AllocGpLo : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder1()); + c.addFunc(FuncBuilder1(kCallConvHost)); - X86GpVar rPtr(c, kVarTypeUIntPtr); - X86GpVar rSum(c, kVarTypeUInt32); + X86GpVar rPtr = c.newUIntPtr("rPtr"); + X86GpVar rSum = c.newUInt32("rSum"); c.setArg(0, rPtr); @@ -919,7 +911,7 @@ struct X86Test_AllocGpLo : public X86Test { uint32_t i; for (i = 0; i < kCount; i++) { - rVar[i] = c.newGpVar(kVarTypeUInt32); + rVar[i] = c.newUInt32("rVar[%u]", i); } // Init pseudo-regs with values from our array. @@ -996,11 +988,11 @@ struct X86Test_AllocRepMovsb : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); - X86GpVar dst(c, kVarTypeIntPtr, "dst"); - X86GpVar src(c, kVarTypeIntPtr, "src"); - X86GpVar cnt(c, kVarTypeIntPtr, "cnt"); + X86GpVar dst = c.newIntPtr("dst"); + X86GpVar src = c.newIntPtr("src"); + X86GpVar cnt = c.newIntPtr("cnt"); c.setArg(0, dst); c.setArg(1, src); @@ -1037,10 +1029,10 @@ struct X86Test_AllocIfElse1 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar v1(c, kVarTypeInt32); - X86GpVar v2(c, kVarTypeInt32); + X86GpVar v1 = c.newInt32("v1"); + X86GpVar v2 = c.newInt32("v2"); Label L_1(c); Label L_2(c); @@ -1088,10 +1080,10 @@ struct X86Test_AllocIfElse2 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar v1(c, kVarTypeInt32); - X86GpVar v2(c, kVarTypeInt32); + X86GpVar v1 = c.newInt32("v1"); + X86GpVar v2 = c.newInt32("v2"); Label L_1(c); Label L_2(c); @@ -1148,11 +1140,11 @@ struct X86Test_AllocIfElse3 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar v1(c, kVarTypeInt32); - X86GpVar v2(c, kVarTypeInt32); - X86GpVar counter(c, kVarTypeInt32); + X86GpVar v1 = c.newInt32("v1"); + X86GpVar v2 = c.newInt32("v2"); + X86GpVar counter = c.newInt32("counter"); Label L_1(c); Label L_Loop(c); @@ -1208,11 +1200,11 @@ struct X86Test_AllocIfElse4 : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar v1(c, kVarTypeInt32); - X86GpVar v2(c, kVarTypeInt32); - X86GpVar counter(c, kVarTypeInt32); + X86GpVar v1 = c.newInt32("v1"); + X86GpVar v2 = c.newInt32("v2"); + X86GpVar counter = c.newInt32("counter"); Label L_1(c); Label L_Loop1(c); @@ -1273,10 +1265,10 @@ struct X86Test_AllocInt8 : public X86Test { } virtual void compile(X86Compiler& c) { - X86GpVar x(c, kVarTypeInt8, "x"); - X86GpVar y(c, kVarTypeInt32, "y"); + X86GpVar x = c.newInt8("x"); + X86GpVar y = c.newInt32("y"); - c.addFunc(kFuncConvHost, FuncBuilder1()); + c.addFunc(FuncBuilder1(kCallConvHost)); c.setArg(0, x); c.movsx(y, x); @@ -1311,14 +1303,13 @@ struct X86Test_AllocArgsIntPtr : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, - FuncBuilder8()); + c.addFunc(FuncBuilder8(kCallConvHost)); uint32_t i; X86GpVar var[8]; for (i = 0; i < 8; i++) { - var[i] = c.newGpVar(); + var[i] = c.newIntPtr("var%u", i); c.setArg(i, var[i]); } @@ -1370,16 +1361,15 @@ struct X86Test_AllocArgsFloat : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, - FuncBuilder8()); + c.addFunc(FuncBuilder8(kCallConvHost)); uint32_t i; + X86GpVar p = c.newIntPtr("p"); X86XmmVar xv[7]; - X86GpVar p(c); for (i = 0; i < 7; i++) { - xv[i] = c.newXmmVar(kX86VarTypeXmmSs); + xv[i] = c.newXmmSs("xv%u", i); c.setArg(i, xv[i]); } @@ -1424,16 +1414,15 @@ struct X86Test_AllocArgsDouble : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, - FuncBuilder8()); + c.addFunc(FuncBuilder8(kCallConvHost)); uint32_t i; + X86GpVar p = c.newIntPtr("p"); X86XmmVar xv[7]; - X86GpVar p(c); for (i = 0; i < 7; i++) { - xv[i] = c.newXmmVar(kX86VarTypeXmmSd); + xv[i] = c.newXmmSd("xv%u", i); c.setArg(i, xv[i]); } @@ -1478,10 +1467,10 @@ struct X86Test_AllocRetFloat : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86XmmVar a(c, kX86VarTypeXmmSs); - X86XmmVar b(c, kX86VarTypeXmmSs); + X86XmmVar a = c.newXmmSs("a"); + X86XmmVar b = c.newXmmSs("b"); c.setArg(0, a); c.setArg(1, b); @@ -1518,10 +1507,10 @@ struct X86Test_AllocRetDouble : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86XmmVar a(c, kX86VarTypeXmmSd); - X86XmmVar b(c, kX86VarTypeXmmSd); + X86XmmVar a = c.newXmmSd("a"); + X86XmmVar b = c.newXmmSd("b"); c.setArg(0, a); c.setArg(1, b); @@ -1560,12 +1549,12 @@ struct X86Test_AllocStack : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); X86Mem stack = c.newStack(kSize, 1).setSize(1); - X86GpVar i(c, kVarTypeIntPtr, "i"); - X86GpVar a(c, kVarTypeInt32, "a"); - X86GpVar b(c, kVarTypeInt32, "b"); + X86GpVar i = c.newIntPtr("i"); + X86GpVar a = c.newInt32("a"); + X86GpVar b = c.newInt32("b"); Label L_1(c); Label L_2(c); @@ -1622,14 +1611,14 @@ struct X86Test_AllocMemcpy : public X86Test { } virtual void compile(X86Compiler& c) { - X86GpVar dst(c, kVarTypeIntPtr, "dst"); - X86GpVar src(c, kVarTypeIntPtr, "src"); - X86GpVar cnt(c, kVarTypeUIntPtr, "cnt"); + X86GpVar dst = c.newIntPtr("dst"); + X86GpVar src = c.newIntPtr("src"); + X86GpVar cnt = c.newUIntPtr("cnt"); Label L_Loop(c); // Create base labels we use Label L_Exit(c); // in our function. - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); c.setArg(0, dst); c.setArg(1, src); c.setArg(2, cnt); @@ -1643,7 +1632,7 @@ struct X86Test_AllocMemcpy : public X86Test { c.bind(L_Loop); // Bind the loop label here. - X86GpVar tmp(c, kVarTypeInt32); // Copy a single dword (4 bytes). + X86GpVar tmp = c.newInt32("tmp"); // Copy a single dword (4 bytes). c.mov(tmp, x86::dword_ptr(src)); c.mov(x86::dword_ptr(dst), tmp); @@ -1736,8 +1725,8 @@ struct X86Test_AllocBlend : public X86Test { uint32_t _srcBuffer[kCount + 3]; // Has to be aligned. - uint32_t* dstBuffer = (uint32_t*)IntUtil::alignTo((intptr_t)_dstBuffer, 16); - uint32_t* srcBuffer = (uint32_t*)IntUtil::alignTo((intptr_t)_srcBuffer, 16); + uint32_t* dstBuffer = (uint32_t*)Utils::alignTo((intptr_t)_dstBuffer, 16); + uint32_t* srcBuffer = (uint32_t*)Utils::alignTo((intptr_t)_srcBuffer, 16); ::memcpy(dstBuffer, dstConstData, sizeof(dstConstData)); ::memcpy(srcBuffer, srcConstData, sizeof(srcConstData)); @@ -1783,11 +1772,11 @@ struct X86Test_CallBase : public X86Test { } virtual void compile(X86Compiler& c) { - X86GpVar v0(c, kVarTypeInt32, "v0"); - X86GpVar v1(c, kVarTypeInt32, "v1"); - X86GpVar v2(c, kVarTypeInt32, "v2"); + X86GpVar v0 = c.newInt32("v0"); + X86GpVar v1 = c.newInt32("v1"); + X86GpVar v2 = c.newInt32("v2"); - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); c.setArg(0, v0); c.setArg(1, v1); c.setArg(2, v2); @@ -1798,10 +1787,10 @@ struct X86Test_CallBase : public X86Test { c.shl(v2, 1); // Call function. - X86GpVar fn(c, kVarTypeIntPtr, "fn"); + X86GpVar fn = c.newIntPtr("fn"); c.mov(fn, imm_ptr(calledFunc)); - X86CallNode* call = c.call(fn, kFuncConvHost, FuncBuilder3()); + X86CallNode* call = c.call(fn, FuncBuilder3(kCallConvHost)); call->setArg(0, v2); call->setArg(1, v1); call->setArg(2, v0); @@ -1839,20 +1828,20 @@ struct X86Test_CallFast : public X86Test { } virtual void compile(X86Compiler& c) { - X86GpVar var(c, kVarTypeInt32, "var"); - X86GpVar fn(c, kVarTypeIntPtr, "fn"); + X86GpVar var = c.newInt32("var"); + X86GpVar fn = c.newIntPtr("fn"); - c.addFunc(kFuncConvHost, FuncBuilder1()); + c.addFunc(FuncBuilder1(kCallConvHost)); c.setArg(0, var); c.mov(fn, imm_ptr(calledFunc)); X86CallNode* call; - call = c.call(fn, kFuncConvHostFastCall, FuncBuilder1()); + call = c.call(fn, FuncBuilder1(kCallConvHostFastCall)); call->setArg(0, var); call->setRet(0, var); - call = c.call(fn, kFuncConvHostFastCall, FuncBuilder1()); + call = c.call(fn, FuncBuilder1(kCallConvHostFastCall)); call->setArg(0, var); call->setRet(0, var); @@ -1895,20 +1884,20 @@ struct X86Test_CallManyArgs : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); // Prepare. - X86GpVar fn(c, kVarTypeIntPtr, "fn"); - X86GpVar va(c, kVarTypeInt32, "va"); - X86GpVar vb(c, kVarTypeInt32, "vb"); - X86GpVar vc(c, kVarTypeInt32, "vc"); - X86GpVar vd(c, kVarTypeInt32, "vd"); - X86GpVar ve(c, kVarTypeInt32, "ve"); - X86GpVar vf(c, kVarTypeInt32, "vf"); - X86GpVar vg(c, kVarTypeInt32, "vg"); - X86GpVar vh(c, kVarTypeInt32, "vh"); - X86GpVar vi(c, kVarTypeInt32, "vi"); - X86GpVar vj(c, kVarTypeInt32, "vj"); + X86GpVar fn = c.newIntPtr("fn"); + X86GpVar va = c.newInt32("va"); + X86GpVar vb = c.newInt32("vb"); + X86GpVar vc = c.newInt32("vc"); + X86GpVar vd = c.newInt32("vd"); + X86GpVar ve = c.newInt32("ve"); + X86GpVar vf = c.newInt32("vf"); + X86GpVar vg = c.newInt32("vg"); + X86GpVar vh = c.newInt32("vh"); + X86GpVar vi = c.newInt32("vi"); + X86GpVar vj = c.newInt32("vj"); c.mov(fn, imm_ptr(calledFunc)); c.mov(va, 0x03); @@ -1923,8 +1912,7 @@ struct X86Test_CallManyArgs : public X86Test { c.mov(vj, 0x1E); // Call function. - X86CallNode* call = c.call(fn, kFuncConvHost, - FuncBuilder10()); + X86CallNode* call = c.call(fn, FuncBuilder10(kCallConvHost)); call->setArg(0, va); call->setArg(1, vb); call->setArg(2, vc); @@ -1971,18 +1959,17 @@ struct X86Test_CallDuplicateArgs : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); // Prepare. - X86GpVar fn(c, kVarTypeIntPtr, "fn"); - X86GpVar a(c, kVarTypeInt32, "a"); + X86GpVar fn = c.newIntPtr("fn"); + X86GpVar a = c.newInt32("a"); c.mov(fn, imm_ptr(calledFunc)); c.mov(a, 3); // Call function. - X86CallNode* call = c.call(fn, kFuncConvHost, - FuncBuilder10()); + X86CallNode* call = c.call(fn, FuncBuilder10(kCallConvHost)); call->setArg(0, a); call->setArg(1, a); call->setArg(2, a); @@ -2025,17 +2012,16 @@ struct X86Test_CallImmArgs : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); // Prepare. - X86GpVar fn(c, kVarTypeIntPtr, "fn"); - X86GpVar rv(c, kVarTypeInt32, "rv"); + X86GpVar fn = c.newIntPtr("fn"); + X86GpVar rv = c.newInt32("rv"); c.mov(fn, imm_ptr(X86Test_CallManyArgs::calledFunc)); // Call function. - X86CallNode* call = c.call(fn, kFuncConvHost, - FuncBuilder10()); + X86CallNode* call = c.call(fn, FuncBuilder10(kCallConvHost)); call->setArg(0, imm(0x03)); call->setArg(1, imm(0x12)); call->setArg(2, imm(0xA0)); @@ -2091,17 +2077,16 @@ struct X86Test_CallPtrArgs : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); // Prepare. - X86GpVar fn(c, kVarTypeIntPtr, "fn"); - X86GpVar rv(c, kVarTypeInt32, "rv"); + X86GpVar fn = c.newIntPtr("fn"); + X86GpVar rv = c.newInt32("rv"); c.mov(fn, imm_ptr(calledFunc)); // Call function. - X86CallNode* call = c.call(fn, kFuncConvHost, - FuncBuilder10()); + X86CallNode* call = c.call(fn, FuncBuilder10(kCallConvHost)); call->setArg(0, imm(0x01)); call->setArg(1, imm(0x02)); call->setArg(2, imm(0x03)); @@ -2148,22 +2133,21 @@ struct X86Test_CallFloatAsXmmRet : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86XmmVar a(c, kX86VarTypeXmmSs, "a"); - X86XmmVar b(c, kX86VarTypeXmmSs, "b"); - X86XmmVar ret(c, kX86VarTypeXmmSs, "ret"); + X86XmmVar a = c.newXmmSs("a"); + X86XmmVar b = c.newXmmSs("b"); + X86XmmVar ret = c.newXmmSs("ret"); c.setArg(0, a); c.setArg(1, b); // Prepare. - X86GpVar fn(c); + X86GpVar fn = c.newIntPtr("fn"); c.mov(fn, imm_ptr(calledFunc)); // Call function. - X86CallNode* call = c.call(fn, kFuncConvHost, - FuncBuilder2()); + X86CallNode* call = c.call(fn, FuncBuilder2(kCallConvHost)); call->setArg(0, a); call->setArg(1, b); @@ -2203,20 +2187,19 @@ struct X86Test_CallDoubleAsXmmRet : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + c.addFunc(FuncBuilder2(kCallConvHost)); - X86XmmVar a(c, kX86VarTypeXmmSd, "a"); - X86XmmVar b(c, kX86VarTypeXmmSd, "b"); - X86XmmVar ret(c, kX86VarTypeXmmSd, "ret"); + X86XmmVar a = c.newXmmSd("a"); + X86XmmVar b = c.newXmmSd("b"); + X86XmmVar ret = c.newXmmSd("ret"); c.setArg(0, a); c.setArg(1, b); - X86GpVar fn(c); + X86GpVar fn = c.newIntPtr("fn"); c.mov(fn, imm_ptr(calledFunc)); - X86CallNode* call = c.call(fn, kFuncConvHost, - FuncBuilder2()); + X86CallNode* call = c.call(fn, FuncBuilder2(kCallConvHost)); call->setArg(0, a); call->setArg(1, b); @@ -2252,14 +2235,14 @@ struct X86Test_CallConditional : public X86Test { } virtual void compile(X86Compiler& c) { - X86GpVar x(c, kVarTypeInt32, "x"); - X86GpVar y(c, kVarTypeInt32, "y"); - X86GpVar op(c, kVarTypeInt32, "op"); + X86GpVar x = c.newInt32("x"); + X86GpVar y = c.newInt32("y"); + X86GpVar op = c.newInt32("op"); X86CallNode* call; X86GpVar result; - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); c.setArg(0, x); c.setArg(1, y); c.setArg(2, op); @@ -2272,23 +2255,23 @@ struct X86Test_CallConditional : public X86Test { c.cmp(op, 1); c.jz(opMul); - result = c.newGpVar(kVarTypeInt32, "result"); + result = c.newInt32("result_0"); c.mov(result, 0); c.ret(result); c.bind(opAdd); - result = c.newGpVar(kVarTypeInt32, "result"); + result = c.newInt32("result_1"); - call = c.call((Ptr)calledFuncAdd, kFuncConvHost, FuncBuilder2()); + call = c.call((Ptr)calledFuncAdd, FuncBuilder2(kCallConvHost)); call->setArg(0, x); call->setArg(1, y); call->setRet(0, result); c.ret(result); c.bind(opMul); - result = c.newGpVar(kVarTypeInt32, "result"); + result = c.newInt32("result_2"); - call = c.call((Ptr)calledFuncMul, kFuncConvHost, FuncBuilder2()); + call = c.call((Ptr)calledFuncMul, FuncBuilder2(kCallConvHost)); call->setArg(0, x); call->setArg(1, y); call->setRet(0, result); @@ -2338,26 +2321,26 @@ struct X86Test_CallMultiple : public X86Test { virtual void compile(X86Compiler& c) { unsigned int i; - X86GpVar buf(c, kVarTypeIntPtr, "buf"); - X86GpVar acc0(c, kVarTypeInt32, "acc0"); - X86GpVar acc1(c, kVarTypeInt32, "acc1"); + X86GpVar buf = c.newIntPtr("buf"); + X86GpVar acc0 = c.newInt32("acc0"); + X86GpVar acc1 = c.newInt32("acc1"); - c.addFunc(kFuncConvHost, FuncBuilder1()); + c.addFunc(FuncBuilder1(kCallConvHost)); c.setArg(0, buf); c.mov(acc0, 0); c.mov(acc1, 0); for (i = 0; i < 4; i++) { - X86GpVar ret(c, kVarTypeInt32); - X86GpVar ptr(c, kVarTypeIntPtr); - X86GpVar idx(c, kVarTypeInt32); + X86GpVar ret = c.newInt32("ret"); + X86GpVar ptr = c.newIntPtr("ptr"); + X86GpVar idx = c.newInt32("idx"); X86CallNode* call; c.mov(ptr, buf); c.mov(idx, static_cast(i)); - call = c.call((Ptr)calledFunc, kFuncConvHostFastCall, FuncBuilder2()); + call = c.call((Ptr)calledFunc, FuncBuilder2(kCallConvHostFastCall)); call->setArg(0, ptr); call->setArg(1, idx); call->setRet(0, ret); @@ -2367,7 +2350,7 @@ struct X86Test_CallMultiple : public X86Test { c.mov(ptr, buf); c.mov(idx, static_cast(i)); - call = c.call((Ptr)calledFunc, kFuncConvHostFastCall, FuncBuilder2()); + call = c.call((Ptr)calledFunc, FuncBuilder2(kCallConvHostFastCall)); call->setArg(0, ptr); call->setArg(1, idx); call->setRet(0, ret); @@ -2408,23 +2391,23 @@ struct X86Test_CallRecursive : public X86Test { } virtual void compile(X86Compiler& c) { - X86GpVar val(c, kVarTypeInt32, "val"); + X86GpVar val = c.newInt32("val"); Label skip(c); - X86FuncNode* func = c.addFunc(kFuncConvHost, FuncBuilder1()); + X86FuncNode* func = c.addFunc(FuncBuilder1(kCallConvHost)); c.setArg(0, val); c.cmp(val, 1); c.jle(skip); - X86GpVar tmp(c, kVarTypeInt32, "tmp"); + X86GpVar tmp = c.newInt32("tmp"); c.mov(tmp, val); c.dec(tmp); - X86CallNode* call = c.call(func->getEntryLabel(), kFuncConvHost, FuncBuilder1()); + X86CallNode* call = c.call(func->getEntryLabel(), FuncBuilder1(kCallConvHost)); call->setArg(0, tmp); call->setRet(0, tmp); - c.mul(c.newGpVar(kVarTypeInt32), val, tmp); + c.mul(c.newInt32(), val, tmp); c.bind(skip); c.ret(val); @@ -2459,14 +2442,14 @@ struct X86Test_CallMisc1 : public X86Test { static void dummy(int a, int b) {} virtual void compile(X86Compiler& c) { - X86GpVar val(c, kVarTypeInt32, "val"); + X86GpVar val = c.newInt32("val"); Label skip(c); - X86FuncNode* func = c.addFunc(kFuncConvHost, FuncBuilder2()); + X86FuncNode* func = c.addFunc(FuncBuilder2(kCallConvHost)); - X86GpVar a(c, kVarTypeInt32, "a"); - X86GpVar b(c, kVarTypeInt32, "b"); - X86GpVar r(c, kVarTypeInt32, "r"); + X86GpVar a = c.newInt32("a"); + X86GpVar b = c.newInt32("b"); + X86GpVar r = c.newInt32("r"); c.setArg(0, a); c.setArg(1, b); @@ -2474,7 +2457,7 @@ struct X86Test_CallMisc1 : public X86Test { c.alloc(a, x86::eax); c.alloc(b, x86::ebx); - X86CallNode* call = c.call(imm_ptr(dummy), kFuncConvHost, FuncBuilder2()); + X86CallNode* call = c.call(imm_ptr(dummy), FuncBuilder2(kCallConvHost)); call->setArg(0, a); call->setArg(1, b); @@ -2510,19 +2493,19 @@ struct X86Test_CallMisc2 : public X86Test { } virtual void compile(X86Compiler& c) { - X86FuncNode* func = c.addFunc(kFuncConvHost, FuncBuilder1()); + X86FuncNode* func = c.addFunc(FuncBuilder1(kCallConvHost)); - X86GpVar p(c, kVarTypeIntPtr, "p"); - X86GpVar fn(c, kVarTypeIntPtr, "fn"); + X86GpVar p = c.newIntPtr("p"); + X86GpVar fn = c.newIntPtr("fn"); - X86XmmVar arg(c, kX86VarTypeXmmSd, "arg"); - X86XmmVar ret(c, kX86VarTypeXmmSd, "ret"); + X86XmmVar arg = c.newXmmSd("arg"); + X86XmmVar ret = c.newXmmSd("ret"); c.setArg(0, p); c.movsd(arg, x86::ptr(p)); c.mov(fn, imm_ptr(op)); - X86CallNode* call = c.call(fn, kFuncConvHost, FuncBuilder1()); + X86CallNode* call = c.call(fn, FuncBuilder1(kCallConvHost)); call->setArg(0, arg); call->setRet(0, ret); @@ -2560,19 +2543,19 @@ struct X86Test_CallMisc3 : public X86Test { } virtual void compile(X86Compiler& c) { - X86FuncNode* func = c.addFunc(kFuncConvHost, FuncBuilder1()); + X86FuncNode* func = c.addFunc(FuncBuilder1(kCallConvHost)); - X86GpVar p(c, kVarTypeIntPtr, "p"); - X86GpVar fn(c, kVarTypeIntPtr, "fn"); + X86GpVar p = c.newIntPtr("p"); + X86GpVar fn = c.newIntPtr("fn"); - X86XmmVar arg(c, kX86VarTypeXmmSd, "arg"); - X86XmmVar ret(c, kX86VarTypeXmmSd, "ret"); + X86XmmVar arg = c.newXmmSd("arg"); + X86XmmVar ret = c.newXmmSd("ret"); c.setArg(0, p); c.movsd(arg, x86::ptr(p)); c.mov(fn, imm_ptr(op)); - X86CallNode* call = c.call(fn, kFuncConvHost, FuncBuilder1()); + X86CallNode* call = c.call(fn, FuncBuilder1(kCallConvHost)); call->setArg(0, arg); call->setRet(0, ret); @@ -2614,14 +2597,17 @@ struct X86Test_CallMisc4 : public X86Test { virtual void compile(X86Compiler& c) { FuncBuilderX funcPrototype; + + funcPrototype.setCallConv(kCallConvHost); funcPrototype.setRet(kVarTypeFp64); - X86FuncNode* func = c.addFunc(kFuncConvHost, funcPrototype); + X86FuncNode* func = c.addFunc(funcPrototype); FuncBuilderX callPrototype; + callPrototype.setCallConv(kCallConvHost); callPrototype.setRet(kVarTypeFp64); - X86CallNode* call = c.call(imm_ptr(calledFunc), kFuncConvHost, callPrototype); + X86CallNode* call = c.call(imm_ptr(calledFunc), callPrototype); - X86XmmVar ret(c, kX86VarTypeXmmSd, "ret"); + X86XmmVar ret = c.newXmmSd("ret"); call->setRet(0, ret); c.ret(ret); @@ -2656,13 +2642,13 @@ struct X86Test_MiscConstPool : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder0()); + c.addFunc(FuncBuilder0(kCallConvHost)); - X86GpVar v0(c, kVarTypeInt32, "v0"); - X86GpVar v1(c, kVarTypeInt32, "v1"); + X86GpVar v0 = c.newInt32("v0"); + X86GpVar v1 = c.newInt32("v1"); - X86Mem c0(c.newInt32Const(kConstScopeLocal, 200)); - X86Mem c1(c.newInt32Const(kConstScopeLocal, 33)); + X86Mem c0 = c.newInt32Const(kConstScopeLocal, 200); + X86Mem c1 = c.newInt32Const(kConstScopeLocal, 33); c.mov(v0, c0); c.mov(v1, c1); @@ -2698,11 +2684,11 @@ struct X86Test_MiscMultiRet : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); - X86GpVar op(c, kVarTypeInt32, "op"); - X86GpVar a(c, kVarTypeInt32, "a"); - X86GpVar b(c, kVarTypeInt32, "b"); + X86GpVar op = c.newInt32("op"); + X86GpVar a = c.newInt32("a"); + X86GpVar b = c.newInt32("b"); Label L_Zero(c); Label L_Add(c); @@ -2746,7 +2732,7 @@ struct X86Test_MiscMultiRet : public X86Test { c.cmp(b, 0); c.jz(L_Zero); - X86GpVar zero(c, kVarTypeInt32, "zero"); + X86GpVar zero = c.newInt32("zero"); c.xor_(zero, zero); c.idiv(zero, a, b); c.ret(a); @@ -2793,10 +2779,14 @@ struct X86Test_MiscUnfollow : public X86Test { } virtual void compile(X86Compiler& c) { - c.addFunc(kFuncConvHost, FuncBuilder2()); + // NOTE: Fastcall calling convention is the most appropriate here, as all + // arguments will be passed in registers and there won't be any stack + // misalignment when we call the `handler()`. This was failing on OSX + // when targetting 32-bit. + c.addFunc(FuncBuilder2(kCallConvHostFastCall)); - X86GpVar a(c, kVarTypeInt32); - X86GpVar b(c, kVarTypeIntPtr); + X86GpVar a = c.newInt32("a"); + X86GpVar b = c.newIntPtr("b"); Label tramp(c); @@ -2815,7 +2805,7 @@ struct X86Test_MiscUnfollow : public X86Test { } virtual bool run(void* _func, StringBuilder& result, StringBuilder& expect) { - typedef int (*Func)(int, void*); + typedef int (ASMJIT_FASTCALL *Func)(int, void*); Func func = asmjit_cast(_func); @@ -2833,7 +2823,7 @@ struct X86Test_MiscUnfollow : public X86Test { return resultRet == expectRet; } - static void handler() { longjmp(globalJmpBuf, 1); } + static ASMJIT_FASTCALL void handler() { longjmp(globalJmpBuf, 1); } }; // ============================================================================ @@ -2956,25 +2946,25 @@ int X86TestSuite::run() { for (i = 0; i < count; i++) { JitRuntime runtime; - X86Compiler compiler(&runtime); + X86Assembler a(&runtime); + X86Compiler c(&a); if (alwaysPrintLog) { fprintf(file, "\n"); - compiler.setLogger(&fileLogger); + a.setLogger(&fileLogger); } else { stringLogger.clearString(); - compiler.setLogger(&stringLogger); + a.setLogger(&stringLogger); } X86Test* test = tests[i]; - test->compile(compiler); + test->compile(c); + c.finalize(); - void* func = compiler.make(); - - if (alwaysPrintLog) { + void* func = a.make(); + if (alwaysPrintLog) fflush(file); - } if (func != NULL) { StringBuilder result; diff --git a/src/test/genblend.h b/src/test/genblend.h index a5e5c39..5e18c43 100644 --- a/src/test/genblend.h +++ b/src/test/genblend.h @@ -22,22 +22,22 @@ static void blend(asmjit::X86Compiler& c) { using namespace asmjit; using namespace asmjit::x86; - X86GpVar dst(c, kVarTypeIntPtr, "dst"); - X86GpVar src(c, kVarTypeIntPtr, "src"); + X86GpVar dst = c.newIntPtr("dst"); + X86GpVar src = c.newIntPtr("src"); - X86GpVar i(c, kVarTypeIntPtr, "i"); - X86GpVar j(c, kVarTypeIntPtr, "j"); - X86GpVar t(c, kVarTypeIntPtr, "t"); + X86GpVar i = c.newIntPtr("i"); + X86GpVar j = c.newIntPtr("j"); + X86GpVar t = c.newIntPtr("t"); - X86XmmVar cZero(c, kX86VarTypeXmm, "cZero"); - X86XmmVar cMul255A(c, kX86VarTypeXmm, "cMul255A"); - X86XmmVar cMul255M(c, kX86VarTypeXmm, "cMul255M"); + X86XmmVar cZero = c.newXmm("cZero"); + X86XmmVar cMul255A = c.newXmm("cMul255A"); + X86XmmVar cMul255M = c.newXmm("cMul255M"); - X86XmmVar x0(c, kX86VarTypeXmm, "x0"); - X86XmmVar x1(c, kX86VarTypeXmm, "x1"); - X86XmmVar y0(c, kX86VarTypeXmm, "y0"); - X86XmmVar a0(c, kX86VarTypeXmm, "a0"); - X86XmmVar a1(c, kX86VarTypeXmm, "a1"); + X86XmmVar x0 = c.newXmm("x0"); + X86XmmVar x1 = c.newXmm("x1"); + X86XmmVar y0 = c.newXmm("y0"); + X86XmmVar a0 = c.newXmm("a0"); + X86XmmVar a1 = c.newXmm("a1"); Label L_SmallLoop(c); Label L_SmallEnd(c); @@ -47,7 +47,7 @@ static void blend(asmjit::X86Compiler& c) { Label L_Data(c); - c.addFunc(kFuncConvHost, FuncBuilder3()); + c.addFunc(FuncBuilder3(kCallConvHost)); c.setArg(0, dst); c.setArg(1, src); @@ -90,7 +90,7 @@ static void blend(asmjit::X86Compiler& c) { c.psrlw(a0, 8); c.punpcklbw(x0, cZero); - c.pshuflw(a0, a0, X86Util::mmShuffle(1, 1, 1, 1)); + c.pshuflw(a0, a0, X86Util::shuffle(1, 1, 1, 1)); c.punpcklbw(y0, cZero); c.pmullw(x0, a0); @@ -138,8 +138,8 @@ static void blend(asmjit::X86Compiler& c) { c.punpckhbw(x1, cZero); c.punpckhwd(a1, a1); - c.pshufd(a0, a0, X86Util::mmShuffle(3, 3, 1, 1)); - c.pshufd(a1, a1, X86Util::mmShuffle(3, 3, 1, 1)); + c.pshufd(a0, a0, X86Util::shuffle(3, 3, 1, 1)); + c.pshufd(a1, a1, X86Util::shuffle(3, 3, 1, 1)); c.pmullw(x0, a0); c.pmullw(x1, a1); diff --git a/tools/autoexp-patch.py b/tools/autoexp-patch.py deleted file mode 100644 index 3df3e3e..0000000 --- a/tools/autoexp-patch.py +++ /dev/null @@ -1,424 +0,0 @@ -#!/usr/bin/env python - -import os - -AUTOEXP_FILES = [ - # Visual Studio 8.0 (2005). - "C:\\Program Files\\Microsoft Visual Studio 8\\Common7\\Packages\\Debugger\\autoexp.dat", - "C:\\Program Files (x86)\\Microsoft Visual Studio 8\\Common7\\Packages\\Debugger\\autoexp.dat", - - # Visual Studio 9.0 (2008). - "C:\\Program Files\\Microsoft Visual Studio 9.0\\Common7\\Packages\\Debugger\\autoexp.dat", - "C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\Common7\\Packages\\Debugger\\autoexp.dat", - - # Visual Studio 10.0 (2010). - "C:\\Program Files\\Microsoft Visual Studio 10.0\\Common7\\Packages\\Debugger\\autoexp.dat", - "C:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\Packages\\Debugger\\autoexp.dat" -] - -DIRECTIVE_SYMBOL = '@' - -# ============================================================================= -# [Log] -# ============================================================================= - -def log(str): - print(str) - -# ============================================================================= -# [Is...] -# ============================================================================= - -def isDirective(c): - return c == DIRECTIVE_SYMBOL - -def isAlpha(c): - c = ord(c) - return (c >= ord('a') and c <= ord('z')) or (c >= ord('A') and c <= ord('Z')) - -def isAlpha_(c): - return isAlpha(c) or (c == '_') - -def isNumber(c): - c = ord(c) - return (c >= ord('0')) and (c <= ord('9')) - -def isAlnum(c): - return isAlpha(c) or isNumber(c) - -def isAlnum_(c): - return isAlnum(c) or (c == '_') - -def isSpace(c): - return (c == ' ') or (c == '\t') - -def isNewLine(c): - return c == '\n' - -# ============================================================================= -# [SyntaxError] -# ============================================================================= - -class SyntaxError(Exception): - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return repr(self.msg) - -# ============================================================================= -# [Context] -# ============================================================================= - -class Context(object): - def __init__(self, data): - self.data = data - self.index = 0 - self.size = len(data) - - def isNewLine(self): - if self.index == 0: - return True - else: - return self.data[self.index - 1] == '\n' - - def isEnd(self): - return self.index >= self.size - - def getChar(self): - if self.index >= self.size: - return '\0' - return self.data[self.index] - - def advance(self): - if self.index < self.size: - self.index += 1 - - def parseUntilTrue(self, func, advance): - while not self.isEnd(): - c = self.data[self.index] - if func(c): - self.index += 1 - continue - if advance: - self.index += 1 - return True - return False - - def parseUntilFalse(self, func, advance): - while not self.isEnd(): - c = self.data[self.index] - if not func(c): - self.index += 1 - continue - if advance: - self.index += 1 - return True - return False - - def skipString(self): - def func(c): - return c == '"' - return self.parseUntilFalse(func, True) - - def skipSpaces(self): - return self.parseUntilTrue(isSpace, False) - - def skipLine(self): - return self.parseUntilFalse(isNewLine, True) - - def parseDirective(self, index): - start = index - - data = self.data - size = self.size - - c = data[index] - assert isAlpha_(c) - - while True: - index += 1 - if index >= size: - break - c = data[index] - if isAlnum_(c): - continue - break - - self.index = index - return data[start:index] - - def parseSymbol(self, index): - start = index - - data = self.data - size = self.size - - c = data[index] - assert isAlpha_(c) - - while True: - index += 1 - if index >= size: - return data[start:index] - c = data[index] - if isAlnum_(c): - continue - if c == ':' and index + 2 < size and data[index + 1] == ':' and isAlpha_(data[index + 2]): - index += 2 - continue - - self.index = index - return data[start:index] - - def parseMacro(self, index): - start = index - end = None - - data = self.data - size = self.size - - if index >= size: - return "" - - while True: - c = data[index] - index += 1 - - if c == '\n' or index >= size: - if end == None: - end = index - 1 - break - - if c == ';': - if end == None: - end = index - - while start < end and isSpace(data[end - 1]): - end -= 1 - - self.index = index - return data[start:end] - - def replaceRange(self, start, end, content): - old = self.data - - self.data = old[0:start] + content + old[end:] - self.size = len(self.data) - - assert(self.index >= end) - - self.index -= end - start - self.index += len(content) - -# ============================================================================= -# [AutoExpDat] -# ============================================================================= - -class AutoExpDat(object): - def __init__(self, data): - self.library = None - self.symbols = {} - self.data = self.process(data.replace('\r', '')) - - def process(self, data): - ctx = Context(data) - - while not ctx.isEnd(): - c = ctx.getChar() - - # Skip comments. - if c == ';': - ctx.skipLine() - continue - - # Skip strings. - if c == '"': - ctx.advance() - ctx.skipString() - continue - - # Skip numbers. - if isNumber(c): - ctx.parseUntilTrue(isAlnum_, True) - continue - - # Parse directives. - if isDirective(c) and ctx.isNewLine(): - start = ctx.index - - ctx.advance() - c = ctx.getChar() - - # Remove lines that have '@' followed by space or newline. - if isNewLine(c) or c == '\0': - ctx.advance() - ctx.replaceRange(start, ctx.index, "") - continue - if isSpace(c): - ctx.skipLine() - ctx.replaceRange(start, ctx.index, "") - continue - - directive = ctx.parseDirective(ctx.index) - - c = ctx.getChar() - if not isSpace(c): - self.error("Directive Error: @" + directive + ".") - ctx.skipSpaces() - - # Directive '@library'. - if directive == "library": - self.library = ctx.parseMacro(ctx.index) - - # Directive '@define'. - elif directive == "define": - c = ctx.getChar() - if not isAlpha_(c): - self.error("Define Directive has to start with alpha character or underscore") - symbol = ctx.parseSymbol(ctx.index) - - c = ctx.getChar() - - # No Macro. - if isNewLine(c): - ctx.advance() - self.addSymbol(symbol, "") - # Has Macro. - else: - ctx.skipSpaces() - - macro = ctx.parseMacro(ctx.index) - self.addSymbol(symbol, macro) - - # Unknown Directive. - else: - self.error("Unknown Directive: @" + directive + ".") - - ctx.replaceRange(start, ctx.index, "") - continue - - # Parse/Replace symbol. - if isAlpha_(c) and ctx.index > 0 and ctx.data[ctx.index - 1] != '#': - start = ctx.index - symbol = ctx.parseSymbol(start) - - if symbol in self.symbols: - ctx.replaceRange(start, start + len(symbol), self.symbols[symbol]) - continue - - ctx.advance() - - return ctx.data - - def addSymbol(self, symbol, macro): - if symbol in self.symbols: - self.error("Symbol '" + symbol + "' redefinition.") - else: - # Recurse. - macro = self.process(macro) - - log("-- @define " + symbol + " " + macro) - self.symbols[symbol] = macro - - def error(self, msg): - raise SyntaxError(msg) - -# ============================================================================= -# [LoadFile / SaveFile] -# ============================================================================= - -def loadFile(file): - h = None - data = None - - try: - h = open(file, "rb") - data = h.read() - except: - pass - finally: - if h: - h.close() - - return data - -def saveFile(file, data): - h = None - result = False - - try: - h = open(file, "wb") - h.truncate() - h.write(data) - result = True - except: - pass - finally: - if h: - h.close() - - return result - -# ============================================================================= -# [PatchFile] -# ============================================================================= - -def patchFile(file, mark, data): - input = loadFile(file) - if not input: - return - - beginMark = ";${" + mark + ":Begin}" - endMark = ";${" + mark + ":End}" - - if beginMark in input: - # Replace. - if not endMark in input: - log("-- Corrupted File:\n" + " " + file) - return - - beginMarkIndex = input.find(beginMark) - endMarkIndex = input.find(endMark) - - beginMarkIndex = input.find('\n', beginMarkIndex) + 1 - endMarkIndex = input.rfind('\n', 0, endMarkIndex) + 1 - - if beginMarkIndex == -1 or \ - endMarkIndex == -1 or \ - beginMarkIndex > endMarkIndex: - log("-- Corrupted File:\n" + " " + file) - return - - output = input[:beginMarkIndex] + data + input[endMarkIndex:] - - else: - # Add. - output = input - output += "\n" - output += beginMark + "\n" - output += data - output += endMark + "\n" - - if input == output: - log("-- Unaffected:\n" + " " + file) - else: - log("-- Patching:\n" + " " + file) - if not saveFile(file, output): - log("!! Can't write:\n" + " " + file) - -def main(): - src = loadFile("autoexp.dat") - if src == None: - log("!! Can't read autoexp.dat") - return - - src = AutoExpDat(src) - if not src.library: - log("!! Library not defined, use @library directive.") - return - - for file in AUTOEXP_FILES: - patchFile(file, src.library, src.data) - -main() diff --git a/tools/autoexp.dat b/tools/autoexp.dat deleted file mode 100644 index f30b99f..0000000 --- a/tools/autoexp.dat +++ /dev/null @@ -1,920 +0,0 @@ -@library asmjit -@ -@define NULL (0) -@ -@define asmjit::kInvalidValue (0xFFFFFFFF) -@ -@define asmjit::kOperandTypeNone (0x0) -@define asmjit::kOperandTypeReg (0x1) -@define asmjit::kOperandTypeVar (0x2) -@define asmjit::kOperandTypeMem (0x3) -@define asmjit::kOperandTypeImm (0x4) -@define asmjit::kOperandTypeLabel (0x5) -@ -@define asmjit::kMemTypeBaseIndex (0x0) -@define asmjit::kMemTypeStackIndex (0x1) -@define asmjit::kMemTypeLabel (0x2) -@define asmjit::kMemTypeAbsolute (0x3) -@ -@define asmjit::kVarAttrInReg (0x00000001) -@define asmjit::kVarAttrOutReg (0x00000002) -@define asmjit::kVarAttrInOutReg (0x00000003) -@define asmjit::kVarAttrInMem (0x00000004) -@define asmjit::kVarAttrOutMem (0x00000008) -@define asmjit::kVarAttrInOutMem (0x0000000C) -@define asmjit::kVarAttrInDecide (0x00000010) -@define asmjit::kVarAttrOutDecide (0x00000020) -@define asmjit::kVarAttrInOutDecide (0x00000030) -@define asmjit::kVarAttrInConv (0x00000040) -@define asmjit::kVarAttrOutConv (0x00000080) -@define asmjit::kVarAttrInOutConv (0x000000C0) -@define asmjit::kVarAttrInCall (0x00000100) -@define asmjit::kVarAttrInArg (0x00000200) -@define asmjit::kVarAttrOutRet (0x00000400) -@define asmjit::kVarAttrUnuse (0x00000800) -@ -@define asmjit::kVarTypeInt8 (0x0) -@define asmjit::kVarTypeUInt8 (0x1) -@define asmjit::kVarTypeInt16 (0x2) -@define asmjit::kVarTypeUInt16 (0x3) -@define asmjit::kVarTypeInt32 (0x4) -@define asmjit::kVarTypeUInt32 (0x5) -@define asmjit::kVarTypeInt64 (0x6) -@define asmjit::kVarTypeUInt64 (0x7) -@define asmjit::kVarTypeFp32 (0x8) -@define asmjit::kVarTypeFp64 (0x9) -@define asmjit::kVarTypeFpEx (0xA) -@define asmjit::kVarTypeInvalid (0xFF) -@ -@define asmjit::x86::kVarTypeMm (0xB) -@define asmjit::x86::kVarTypeXmm (0xC) -@define asmjit::x86::kVarTypeXmmSs (0xD) -@define asmjit::x86::kVarTypeXmmPs (0xE) -@define asmjit::x86::kVarTypeXmmSd (0xF) -@define asmjit::x86::kVarTypeXmmPd (0x10) -@ -@define asmjit::kVarStateUnused (0x0) -@define asmjit::kVarStateReg (0x1) -@define asmjit::kVarStateMem (0x2) -@ -@define asmjit::kNodeTypeNone (0x0) -@define asmjit::kNodeTypeAlign (0x1) -@define asmjit::kNodeTypeEmbed (0x2) -@define asmjit::kNodeTypeComment (0x3) -@define asmjit::kNodeTypeHint (0x4) -@define asmjit::kNodeTypeTarget (0x5) -@define asmjit::kNodeTypeInst (0x6) -@define asmjit::kNodeTypeFunc (0x7) -@define asmjit::kNodeTypeEnd (0x8) -@define asmjit::kNodeTypeRet (0x9) -@define asmjit::kNodeTypeCall (0xA) -@define asmjit::kNodeTypeSArg (0xB) -@ -@define asmjit::kNodeFlagIsTranslated (0x0001) -@define asmjit::kNodeFlagIsJmp (0x0002) -@define asmjit::kNodeFlagIsJcc (0x0004) -@define asmjit::kNodeFlagIsTaken (0x0008) -@define asmjit::kNodeFlagIsRet (0x0010) -@define asmjit::kNodeFlagIsSpecial (0x0020) -@define asmjit::kNodeFlagIsFp (0x0040) - -[Visualizer] - -; ============================================================================= -; [asmjit::base] -; ============================================================================= - -asmjit::PodVector<*> { - preview( - #( - "[", $e._length, "]", - "(", - #array( - expr: ((($T1*)($e._d + 1))[$i]), - size: $e._d->length - ), - ")" - ) - ) - - children( - #( - #([...]: [$c,!]), - - #array( - expr: ((($T1*)($e._d + 1))[$i]), - size: $e._d->length - ) - ) - ) -} - -; ============================================================================= -; [asmjit::x86x64 - Operand] -; ============================================================================= - -asmjit::Operand { - preview( - #( - #if ($e._base.op == asmjit::kOperandTypeReg) ([(*(asmjit::BaseReg *) &$e)]) - #elif ($e._base.op == asmjit::kOperandTypeVar) ([(*(asmjit::BaseVar *) &$e)]) - #elif ($e._base.op == asmjit::kOperandTypeMem) ([(*(asmjit::BaseMem *) &$e)]) - #elif ($e._base.op == asmjit::kOperandTypeImm) ([(*(asmjit::Imm *) &$e)]) - #elif ($e._base.op == asmjit::kOperandTypeLabel) ([(*(asmjit::Label *) &$e)]) - #else ("noOperand") - ) - ) - - children( - #( - #([...]: [$c,!]), - #(base: [$e._base]), - #(reg: [(*(asmjit::BaseReg*) &$e)]), - #(var: [(*(asmjit::BaseVar*) &$e)]), - #(mem: [(*(asmjit::BaseMem*) &$e)]), - #(imm: [(*(asmjit::Imm*) &$e)]), - #(label: [(*(asmjit::Label*) &$e)]) - ) - ) -} - -asmjit::BaseReg|asmjit::x86x64::X86Reg|asmjit::x86x64::GpReg|asmjit::x86x64::FpReg|asmjit::x86x64::MmReg|asmjit::x86x64::XmmReg|asmjit::x86x64::SegReg { - preview( - #( - #if ($e._reg.code == 0x0100) ("al") - #elif ($e._reg.code == 0x0101) ("cl") - #elif ($e._reg.code == 0x0102) ("dl") - #elif ($e._reg.code == 0x0103) ("bl") - #elif ($e._reg.code == 0x0104) ("spl") - #elif ($e._reg.code == 0x0105) ("bpl") - #elif ($e._reg.code == 0x0106) ("sil") - #elif ($e._reg.code == 0x0107) ("dil") - #elif ($e._reg.code == 0x0108) ("r8b") - #elif ($e._reg.code == 0x0109) ("r9b") - #elif ($e._reg.code == 0x010A) ("r10b") - #elif ($e._reg.code == 0x010B) ("r11b") - #elif ($e._reg.code == 0x010C) ("r12b") - #elif ($e._reg.code == 0x010D) ("r13b") - #elif ($e._reg.code == 0x010E) ("r14b") - #elif ($e._reg.code == 0x010F) ("r15b") - - #elif ($e._reg.code == 0x0200) ("ah") - #elif ($e._reg.code == 0x0201) ("ch") - #elif ($e._reg.code == 0x0202) ("dh") - #elif ($e._reg.code == 0x0203) ("bh") - - #elif ($e._reg.code == 0x1000) ("ax") - #elif ($e._reg.code == 0x1001) ("cx") - #elif ($e._reg.code == 0x1002) ("dx") - #elif ($e._reg.code == 0x1003) ("bx") - #elif ($e._reg.code == 0x1004) ("sp") - #elif ($e._reg.code == 0x1005) ("bp") - #elif ($e._reg.code == 0x1006) ("si") - #elif ($e._reg.code == 0x1007) ("di") - #elif ($e._reg.code == 0x1008) ("r8w") - #elif ($e._reg.code == 0x1009) ("r9w") - #elif ($e._reg.code == 0x100A) ("r10w") - #elif ($e._reg.code == 0x100B) ("r11w") - #elif ($e._reg.code == 0x100C) ("r12w") - #elif ($e._reg.code == 0x100D) ("r13w") - #elif ($e._reg.code == 0x100E) ("r14w") - #elif ($e._reg.code == 0x100F) ("r15w") - - #elif ($e._reg.code == 0x2000) ("eax") - #elif ($e._reg.code == 0x2001) ("ecx") - #elif ($e._reg.code == 0x2002) ("edx") - #elif ($e._reg.code == 0x2003) ("ebx") - #elif ($e._reg.code == 0x2004) ("esp") - #elif ($e._reg.code == 0x2005) ("ebp") - #elif ($e._reg.code == 0x2006) ("esi") - #elif ($e._reg.code == 0x2007) ("edi") - #elif ($e._reg.code == 0x2008) ("r8d") - #elif ($e._reg.code == 0x2009) ("r9d") - #elif ($e._reg.code == 0x200A) ("r10d") - #elif ($e._reg.code == 0x200B) ("r11d") - #elif ($e._reg.code == 0x200C) ("r12d") - #elif ($e._reg.code == 0x200D) ("r13d") - #elif ($e._reg.code == 0x200E) ("r14d") - #elif ($e._reg.code == 0x200F) ("r15d") - - #elif ($e._reg.code == 0x3000) ("rax") - #elif ($e._reg.code == 0x3001) ("rcx") - #elif ($e._reg.code == 0x3002) ("rdx") - #elif ($e._reg.code == 0x3003) ("rbx") - #elif ($e._reg.code == 0x3004) ("rsp") - #elif ($e._reg.code == 0x3005) ("rbp") - #elif ($e._reg.code == 0x3006) ("rsi") - #elif ($e._reg.code == 0x3007) ("rdi") - #elif ($e._reg.code == 0x3008) ("r8") - #elif ($e._reg.code == 0x3009) ("r9") - #elif ($e._reg.code == 0x300A) ("r10") - #elif ($e._reg.code == 0x300B) ("r11") - #elif ($e._reg.code == 0x300C) ("r12") - #elif ($e._reg.code == 0x300D) ("r13") - #elif ($e._reg.code == 0x300E) ("r14") - #elif ($e._reg.code == 0x300F) ("r15") - - #elif ($e._reg.code == 0x5000) ("fp0") - #elif ($e._reg.code == 0x5001) ("fp1") - #elif ($e._reg.code == 0x5002) ("fp2") - #elif ($e._reg.code == 0x5003) ("fp3") - #elif ($e._reg.code == 0x5004) ("fp4") - #elif ($e._reg.code == 0x5005) ("fp5") - #elif ($e._reg.code == 0x5006) ("fp6") - #elif ($e._reg.code == 0x5007) ("fp7") - - #elif ($e._reg.code == 0x6000) ("mm0") - #elif ($e._reg.code == 0x6001) ("mm1") - #elif ($e._reg.code == 0x6002) ("mm2") - #elif ($e._reg.code == 0x6003) ("mm3") - #elif ($e._reg.code == 0x6004) ("mm4") - #elif ($e._reg.code == 0x6005) ("mm5") - #elif ($e._reg.code == 0x6006) ("mm6") - #elif ($e._reg.code == 0x6007) ("mm7") - - #elif ($e._reg.code == 0x7000) ("xmm0") - #elif ($e._reg.code == 0x7001) ("xmm1") - #elif ($e._reg.code == 0x7002) ("xmm2") - #elif ($e._reg.code == 0x7003) ("xmm3") - #elif ($e._reg.code == 0x7004) ("xmm4") - #elif ($e._reg.code == 0x7005) ("xmm5") - #elif ($e._reg.code == 0x7006) ("xmm6") - #elif ($e._reg.code == 0x7007) ("xmm7") - #elif ($e._reg.code == 0x7008) ("xmm8") - #elif ($e._reg.code == 0x7009) ("xmm9") - #elif ($e._reg.code == 0x700A) ("xmm10") - #elif ($e._reg.code == 0x700B) ("xmm11") - #elif ($e._reg.code == 0x700C) ("xmm12") - #elif ($e._reg.code == 0x700D) ("xmm13") - #elif ($e._reg.code == 0x700E) ("xmm14") - #elif ($e._reg.code == 0x700F) ("xmm15") - - #elif ($e._reg.code == 0x8000) ("ymm0") - #elif ($e._reg.code == 0x8001) ("ymm1") - #elif ($e._reg.code == 0x8002) ("ymm2") - #elif ($e._reg.code == 0x8003) ("ymm3") - #elif ($e._reg.code == 0x8004) ("ymm4") - #elif ($e._reg.code == 0x8005) ("ymm5") - #elif ($e._reg.code == 0x8006) ("ymm6") - #elif ($e._reg.code == 0x8007) ("ymm7") - #elif ($e._reg.code == 0x8008) ("ymm8") - #elif ($e._reg.code == 0x8009) ("ymm9") - #elif ($e._reg.code == 0x800A) ("ymm10") - #elif ($e._reg.code == 0x800B) ("ymm11") - #elif ($e._reg.code == 0x800C) ("ymm12") - #elif ($e._reg.code == 0x800D) ("ymm13") - #elif ($e._reg.code == 0x800E) ("ymm14") - #elif ($e._reg.code == 0x800F) ("ymm15") - - #elif ($e._reg.code == 0xD000) ("es") - #elif ($e._reg.code == 0xD001) ("cs") - #elif ($e._reg.code == 0xD002) ("ss") - #elif ($e._reg.code == 0xD003) ("ds") - #elif ($e._reg.code == 0xD004) ("fs") - #elif ($e._reg.code == 0xD005) ("gs") - - #else ("noReg") - ) - ) - - children( - #( - #([...]: [$c,!]), - #(op: [$e._reg.op, x]), - #(size: [$e._reg.size, u]), - #(code: [$e._reg.code, x]) - ) - ) -} - -asmjit::BaseVar|asmjit::x86x64::X86Var|asmjit::x86x64::GpVar|asmjit::x86x64::FpVar|asmjit::x86x64::MmVar|asmjit::x86x64::XmmVar { - preview( - #( - #if ($e._var.varType == asmjit::kVarTypeInt8) ("gpbLo") - #elif ($e._var.varType == asmjit::kVarTypeUInt8) ("gpbLo") - #elif ($e._var.varType == asmjit::kVarTypeInt16) ("gpw") - #elif ($e._var.varType == asmjit::kVarTypeUInt16) ("gpw") - #elif ($e._var.varType == asmjit::kVarTypeInt32) ("gpd") - #elif ($e._var.varType == asmjit::kVarTypeUInt32) ("gpd") - #elif ($e._var.varType == asmjit::kVarTypeInt64) ("gpq") - #elif ($e._var.varType == asmjit::kVarTypeUInt64) ("gpq") - #elif ($e._var.varType == asmjit::kVarTypeFp32) ("fp32") - #elif ($e._var.varType == asmjit::kVarTypeFp64) ("fp64") - #elif ($e._var.varType == asmjit::kVarTypeFpEx) ("fpEx") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeMm) ("mm") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeXmm) ("xmm") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeXmmSs) ("xmmSs") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeXmmSd) ("xmmSd") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeXmmPs) ("xmmPs") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeXmmPd) ("xmmPd") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeYmm) ("ymm") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeYmmPs) ("ymmPs") - #elif ($e._var.varType == asmjit::x86x64::kVarTypeYmmPd) ("ymmPd") - #else ("noVar"), - "(", - "id=", - #if ($e._var.id != asmjit::kInvalidValue) ( - [$e._var.id, x] - ) - #else ( - "INVALID" - ), - ")" - ) - ) - - children( - #( - #([...]: [$c,!]), - #(op: [$e._var.op, x]), - #(size: [$e._var.size, u]), - #(id: [$e._var.id, x]), - #(code: [$e._var.code, x]), - #(varType: [$e._var.varType, x]) - ) - ) -} - -asmjit::BaseMem|asmjit::x86x64::Mem { - preview( - #( - #if ($e._mem.size == 1) ("byte ptr") - #elif ($e._mem.size == 2) ("word ptr") - #elif ($e._mem.size == 4) ("dword ptr") - #elif ($e._mem.size == 8) ("qword ptr") - #elif ($e._mem.size == 10) ("tword ptr") - #elif ($e._mem.size == 16) ("dqword ptr") - #elif ($e._mem.size == 32) ("qqword ptr") - #else ("ptr"), - - #if ($e._mem.segment == 0) (" es:") - #elif ($e._mem.segment == 1) (" cs:") - #elif ($e._mem.segment == 2) (" ss:") - #elif ($e._mem.segment == 3) (" ds:") - #elif ($e._mem.segment == 4) (" fs:") - #elif ($e._mem.segment == 5) (" gs:") - #else (""), - - "[", - - #if ($e._mem.id == asmjit::kInvalidValue) ( - #( - #if ($e._mem.type == asmjit::kMemTypeBaseIndex) ( - #if ((sizeof(uintptr_t) == 4) && ($e._mem.sizePrefix == 1)) ( - #if (($e._mem.base & 0xFF) == 0x0) ("ax") - #elif (($e._mem.base & 0xFF) == 0x1) ("cx") - #elif (($e._mem.base & 0xFF) == 0x2) ("dx") - #elif (($e._mem.base & 0xFF) == 0x3) ("bx") - #elif (($e._mem.base & 0xFF) == 0x4) ("sp") - #elif (($e._mem.base & 0xFF) == 0x5) ("bp") - #elif (($e._mem.base & 0xFF) == 0x6) ("si") - #elif (($e._mem.base & 0xFF) == 0x7) ("di") - #elif (($e._mem.base & 0xFF) == 0x8) ("r8w") - #elif (($e._mem.base & 0xFF) == 0x9) ("r9w") - #elif (($e._mem.base & 0xFF) == 0xA) ("r10w") - #elif (($e._mem.base & 0xFF) == 0xB) ("r11w") - #elif (($e._mem.base & 0xFF) == 0xC) ("r12w") - #elif (($e._mem.base & 0xFF) == 0xD) ("r13w") - #elif (($e._mem.base & 0xFF) == 0xE) ("r14w") - #elif (($e._mem.base & 0xFF) == 0xF) ("r15w") - #else ("INVALID") - ) - #elif ((sizeof(uintptr_t) == 4) || ($e._mem.sizePrefix == 1)) ( - #if (($e._mem.base & 0xFF) == 0x0) ("eax") - #elif (($e._mem.base & 0xFF) == 0x1) ("ecx") - #elif (($e._mem.base & 0xFF) == 0x2) ("edx") - #elif (($e._mem.base & 0xFF) == 0x3) ("ebx") - #elif (($e._mem.base & 0xFF) == 0x4) ("esp") - #elif (($e._mem.base & 0xFF) == 0x5) ("ebp") - #elif (($e._mem.base & 0xFF) == 0x6) ("esi") - #elif (($e._mem.base & 0xFF) == 0x7) ("edi") - #elif (($e._mem.base & 0xFF) == 0x8) ("r8d") - #elif (($e._mem.base & 0xFF) == 0x9) ("r9d") - #elif (($e._mem.base & 0xFF) == 0xA) ("r10d") - #elif (($e._mem.base & 0xFF) == 0xB) ("r11d") - #elif (($e._mem.base & 0xFF) == 0xC) ("r12d") - #elif (($e._mem.base & 0xFF) == 0xD) ("r13d") - #elif (($e._mem.base & 0xFF) == 0xE) ("r14d") - #elif (($e._mem.base & 0xFF) == 0xF) ("r15d") - #else ("INVALID") - ) - #else ( - #if (($e._mem.base & 0xFF) == 0x0) ("rax") - #elif (($e._mem.base & 0xFF) == 0x1) ("rcx") - #elif (($e._mem.base & 0xFF) == 0x2) ("rdx") - #elif (($e._mem.base & 0xFF) == 0x3) ("rbx") - #elif (($e._mem.base & 0xFF) == 0x4) ("rsp") - #elif (($e._mem.base & 0xFF) == 0x5) ("rbp") - #elif (($e._mem.base & 0xFF) == 0x6) ("rsi") - #elif (($e._mem.base & 0xFF) == 0x7) ("rdi") - #elif (($e._mem.base & 0xFF) == 0x8) ("r8") - #elif (($e._mem.base & 0xFF) == 0x9) ("r9") - #elif (($e._mem.base & 0xFF) == 0xA) ("r10") - #elif (($e._mem.base & 0xFF) == 0xB) ("r11") - #elif (($e._mem.base & 0xFF) == 0xC) ("r12") - #elif (($e._mem.base & 0xFF) == 0xD) ("r13") - #elif (($e._mem.base & 0xFF) == 0xE) ("r14") - #elif (($e._mem.base & 0xFF) == 0xF) ("r15") - #else ("INVALID") - ) - ) - #elif ($e._mem.type == asmjit::kMemTypeLabel) ( - #( - "L.", - #if ($e._mem.base != asmjit::kInvalidValue) ( - [$e._mem.base & 0x3FFFFFFF, x] - ) - #else ( - "INVALID" - ) - ) - ) - #else ( - [$e._mem.target] - ) - ) - ) - #else ( - #("{id=", [$e._mem.id, x], "}") - ), - - #if ($e._mem.index != asmjit::kInvalidValue) ( - #( - " + ", - - #if ((sizeof(uintptr_t) == 4) && ($e._mem.sizePrefix == 1)) ( - #if (($e._mem.index & 0xFF) == 0x0) ("ax") - #elif (($e._mem.index & 0xFF) == 0x1) ("cx") - #elif (($e._mem.index & 0xFF) == 0x2) ("dx") - #elif (($e._mem.index & 0xFF) == 0x3) ("bx") - #elif (($e._mem.index & 0xFF) == 0x4) ("sp") - #elif (($e._mem.index & 0xFF) == 0x5) ("bp") - #elif (($e._mem.index & 0xFF) == 0x6) ("si") - #elif (($e._mem.index & 0xFF) == 0x7) ("di") - #elif (($e._mem.index & 0xFF) == 0x8) ("r8w") - #elif (($e._mem.index & 0xFF) == 0x9) ("r9w") - #elif (($e._mem.index & 0xFF) == 0xA) ("r10w") - #elif (($e._mem.index & 0xFF) == 0xB) ("r11w") - #elif (($e._mem.index & 0xFF) == 0xC) ("r12w") - #elif (($e._mem.index & 0xFF) == 0xD) ("r13w") - #elif (($e._mem.index & 0xFF) == 0xE) ("r14w") - #elif (($e._mem.index & 0xFF) == 0xF) ("r15w") - #else ("INVALID") - ) - #elif ((sizeof(uintptr_t) == 4) || ($e._mem.sizePrefix == 1)) ( - #if (($e._mem.index & 0xFF) == 0x0) ("eax") - #elif (($e._mem.index & 0xFF) == 0x1) ("ecx") - #elif (($e._mem.index & 0xFF) == 0x2) ("edx") - #elif (($e._mem.index & 0xFF) == 0x3) ("ebx") - #elif (($e._mem.index & 0xFF) == 0x4) ("esp") - #elif (($e._mem.index & 0xFF) == 0x5) ("ebp") - #elif (($e._mem.index & 0xFF) == 0x6) ("esi") - #elif (($e._mem.index & 0xFF) == 0x7) ("edi") - #elif (($e._mem.index & 0xFF) == 0x8) ("r8d") - #elif (($e._mem.index & 0xFF) == 0x9) ("r9d") - #elif (($e._mem.index & 0xFF) == 0xA) ("r10d") - #elif (($e._mem.index & 0xFF) == 0xB) ("r11d") - #elif (($e._mem.index & 0xFF) == 0xC) ("r12d") - #elif (($e._mem.index & 0xFF) == 0xD) ("r13d") - #elif (($e._mem.index & 0xFF) == 0xE) ("r14d") - #elif (($e._mem.index & 0xFF) == 0xF) ("r15d") - #else ("INVALID") - ) - #else ( - #if (($e._mem.index & 0xFF) == 0x0) ("rax") - #elif (($e._mem.index & 0xFF) == 0x1) ("rcx") - #elif (($e._mem.index & 0xFF) == 0x2) ("rdx") - #elif (($e._mem.index & 0xFF) == 0x3) ("rbx") - #elif (($e._mem.index & 0xFF) == 0x4) ("rsp") - #elif (($e._mem.index & 0xFF) == 0x5) ("rbp") - #elif (($e._mem.index & 0xFF) == 0x6) ("rsi") - #elif (($e._mem.index & 0xFF) == 0x7) ("rdi") - #elif (($e._mem.index & 0xFF) == 0x8) ("r8") - #elif (($e._mem.index & 0xFF) == 0x9) ("r9") - #elif (($e._mem.index & 0xFF) == 0xA) ("r10") - #elif (($e._mem.index & 0xFF) == 0xB) ("r11") - #elif (($e._mem.index & 0xFF) == 0xC) ("r12") - #elif (($e._mem.index & 0xFF) == 0xD) ("r13") - #elif (($e._mem.index & 0xFF) == 0xE) ("r14") - #elif (($e._mem.index & 0xFF) == 0xF) ("r15") - #else ("INVALID") - ), - - #if ($e._mem.shift == 1) (" * 2") - #elif ($e._mem.shift == 2) (" * 4") - #elif ($e._mem.shift == 3) (" * 8") - #else ("") - ) - ), - - #if (($e._mem.type != asmjit::kMemTypeAbsolute) && ($e._mem.displacement != 0)) ( - #if ($e._mem.displacement < 0) ( - #(" - ", [-$e._mem.displacement, i]) - ) - #else ( - #(" + ", [$e._mem.displacement, i]) - ) - ), - - "]" - ) - ) - - children( - #( - #([...]: [$c,!]), - - #(op: [$e._mem.op, x]), - #(size: [$e._mem.size, u]), - - #(type: [$e._mem.type, u]), - #(segment: [$e._mem.segment, u]), - - #(sizePrefix: [$e._mem.sizePrefix, u]), - #(shift: [$e._mem.shift, u]), - - #(id: [$e._mem.id, x]), - #(base: [$e._mem.base, u]), - #(index: [$e._mem.index, u]), - - #(target: [$e._mem.target]), - #(displacement: [$e._mem.displacement, i]) - ) - ) -} - -asmjit::Imm { - preview( - #( - "i=", [(int64_t)$e._imm.value, i], - " ", - "u=", [(uint64_t)$e._imm.value, u], - " ", - "x=", [(uint64_t)$e._imm.value, x] - ) - ) - - children( - #( - #([...]: [$c,!]), - - #(op: [$e._imm.op, x]), - #(size: [$e._imm.size, u]), - - #(value_s: [(int64_t)$e._imm.value, i]), - #(value_u: [(uint64_t)$e._imm.value, u]), - #(value_x: [(uint64_t)$e._imm.value, x]) - ) - ) -} - -asmjit::Label { - preview( - #( - "L_", - #if ($e._label.id != asmjit::kInvalidValue) ( - [$e._label.id, x] - ) - #else ( - "INVALID" - ), - "" - ) - ) - - children( - #( - #([...]: [$c,!]), - - #(op: [$e._label.op, x]), - #(size: [$e._label.size, u]), - - #(id: [$e._label.id, x]) - ) - ) -} - -; ============================================================================= -; [asmjit::x86x64 - RegMask] -; ============================================================================= - -asmjit::x86x64::RegMask { - preview( - #( - #if (($e._gp | $e._fp | $e._mm | $e._xy) != 0) ( - #( - #if ($e._gp != 0) ( - #( - "gp=", [$e._gp, x], - #if ($e._gp & 0x0001) ("|rax"), - #if ($e._gp & 0x0002) ("|rcx"), - #if ($e._gp & 0x0004) ("|rdx"), - #if ($e._gp & 0x0008) ("|rbx"), - #if ($e._gp & 0x0010) ("|rsp"), - #if ($e._gp & 0x0020) ("|rbp"), - #if ($e._gp & 0x0040) ("|rsi"), - #if ($e._gp & 0x0080) ("|rdi"), - #if ($e._gp & 0x0100) ("|r8"), - #if ($e._gp & 0x0200) ("|r9"), - #if ($e._gp & 0x0400) ("|r10"), - #if ($e._gp & 0x0800) ("|r11"), - #if ($e._gp & 0x1000) ("|r12"), - #if ($e._gp & 0x2000) ("|r13"), - #if ($e._gp & 0x4000) ("|r14"), - #if ($e._gp & 0x8000) ("|r15"), - #if (($e._fp | $e._mm | $e._xy) != 0) (" ") - ) - ), - - #if ($e._fp != 0) ( - #( - "fp=", [$e._fp, x], - #if ($e._fp & 0x0001) ("|fp0"), - #if ($e._fp & 0x0002) ("|fp1"), - #if ($e._fp & 0x0004) ("|fp2"), - #if ($e._fp & 0x0008) ("|fp3"), - #if ($e._fp & 0x0010) ("|fp4"), - #if ($e._fp & 0x0020) ("|fp5"), - #if ($e._fp & 0x0040) ("|fp6"), - #if ($e._fp & 0x0080) ("|fp7"), - #if (($e._mm | $e._xy) != 0) (" ") - ) - ), - - #if ($e._mm != 0) ( - #( - "mm=", [$e._mm, x], - #if ($e._mm & 0x0001) ("|mm0"), - #if ($e._mm & 0x0002) ("|mm1"), - #if ($e._mm & 0x0004) ("|mm2"), - #if ($e._mm & 0x0008) ("|mm3"), - #if ($e._mm & 0x0010) ("|mm4"), - #if ($e._mm & 0x0020) ("|mm5"), - #if ($e._mm & 0x0040) ("|mm6"), - #if ($e._mm & 0x0080) ("|mm7"), - #if ($e._xy != 0) (" ") - ) - ), - - #if ($e._xy != 0) ( - #( - "xy=", [$e._xy, x], - #if ($e._xy & 0x0001) ("|xy0"), - #if ($e._xy & 0x0002) ("|xy1"), - #if ($e._xy & 0x0004) ("|xy2"), - #if ($e._xy & 0x0008) ("|xy3"), - #if ($e._xy & 0x0010) ("|xy4"), - #if ($e._xy & 0x0020) ("|xy5"), - #if ($e._xy & 0x0040) ("|xy6"), - #if ($e._xy & 0x0080) ("|xy7"), - #if ($e._xy & 0x0100) ("|xy8"), - #if ($e._xy & 0x0200) ("|xy9"), - #if ($e._xy & 0x0400) ("|xy10"), - #if ($e._xy & 0x0800) ("|xy11"), - #if ($e._xy & 0x1000) ("|xy12"), - #if ($e._xy & 0x2000) ("|xy13"), - #if ($e._xy & 0x4000) ("|xy14"), - #if ($e._xy & 0x8000) ("|xy15") - ) - ) - ) - ) - #else ( - "empty" - ) - ) - ) - - children( - #( - #([...]: [$c,!]), - - #(gp: [$e._gp, x]), - #(fp: [$e._fp, x]), - #(mm: [$e._mm, x]), - #(xy: [$e._xy, x]) - ) - ) -} - -; ============================================================================= -; [asmjit::x86x64 - Var] -; ============================================================================= - -asmjit::BaseVarAttr|asmjit::x86x64::VarAttr { - preview( - #( - "VarAttr(", - #if ($e._vd != NULL) ( - #( - "id=", - [$e._vd->_id, x], - " ", - - #if (($e._vd->_contextId) != asmjit::kInvalidValue) ( - #("cid=", [($e._vd->_contextId), u], " ") - ), - - #if (($e._vd->_name) != NULL) ( - #("name=", [($e._vd->_name), s], " ") - ), - - "state=", - #if ($e._vd->_state == asmjit::kVarStateUnused) ("unused") - #elif ($e._vd->_state == asmjit::kVarStateReg) (#("reg|", [$e._vd->_regIndex, u])) - #elif ($e._vd->_state == asmjit::kVarStateMem) ("mem") - #else ("INVALID"), - " ", - - #if (($e._flags & (asmjit::kVarAttrInReg | asmjit::kVarAttrInMem | asmjit::kVarAttrInDecide | asmjit::kVarAttrInConv | asmjit::kVarAttrInCall | asmjit::kVarAttrInArg)) != 0) ( - #( - "in[", - #if (($e._flags & asmjit::kVarAttrInReg) != 0) ("reg "), - #if (($e._flags & asmjit::kVarAttrInMem) != 0) ("mem "), - #if (($e._flags & asmjit::kVarAttrInDecide) != 0) ("decide "), - #if (($e._flags & asmjit::kVarAttrInConv) != 0) ("conv "), - #if (($e._flags & asmjit::kVarAttrInCall) != 0) ("call "), - #if (($e._flags & asmjit::kVarAttrInArg) != 0) ("arg "), - "] " - ) - ), - #if (($e._flags & (asmjit::kVarAttrOutReg | asmjit::kVarAttrOutMem | asmjit::kVarAttrOutDecide | asmjit::kVarAttrOutConv | asmjit::kVarAttrOutRet)) != 0) ( - #( - "out[", - #if (($e._flags & asmjit::kVarAttrOutReg) != 0) ("reg "), - #if (($e._flags & asmjit::kVarAttrOutMem) != 0) ("mem "), - #if (($e._flags & asmjit::kVarAttrOutDecide) != 0) ("decide "), - #if (($e._flags & asmjit::kVarAttrOutConv) != 0) ("conv "), - #if (($e._flags & asmjit::kVarAttrOutRet) != 0) ("ret "), - "] " - ) - ), - #if (($e._flags & asmjit::kVarAttrUnuse) == asmjit::kVarAttrUnuse) ("unuse ") - ) - ) - #else ( - "INVALID " - ), - ")" - ) - ) - - children( - #( - #([...]: [$c,!]), - - #(vd: [(asmjit::x86x64::VarData*)$e._vd]), - #(flags: [$e._flags, x]), - #(varCount: [$e._varCount, u]), - #(argCount: [$e._argCount, u]), - #(inRegIndex: [$e._inRegIndex, u]), - #(outRegIndex: [$e._outRegIndex, u]), - #(inRegs: [$e._inRegs, x]), - #(allocableRegs: [$e._allocableRegs, x]) - ) - ) -} - -asmjit::BaseVarInst|asmjit::x86x64::VarInst { - children( - #( - #([...]: [$c,!]), - - #(inRegs: [$e._inRegs]), - #(outRegs: [$e._outRegs]), - #(clobberedRegs: [$e._clobberedRegs]), - #(start: [$e._start]), - #(count: [$e._count]), - - #(vaCount: [$e._vaCount, u]), - #array( - expr: $e._list[$i], - size: $e._vaCount - ) - ) - ) -} - -; ============================================================================= -; [asmjit::X86 - Compiler - BaseNode] -; ============================================================================= - -asmjit::BaseNode|asmjit::AlignNode|asmjit::EmbedNode|asmjit::CommentNode|asmjit::HintNode|asmjit::TargetNode|asmjit::InstNode|asmjit::JumpNode::asmjit::FuncNode|asmjit::EndNode|asmjit::RetNode|asmjit::x86x64::X86X64FuncNode|asmjit::x86x64::X86X64CallNode|asmjit::SArgNode { - preview( - #( - #if ($e._type == asmjit::kNodeTypeAlign) ("AlignNode") - #elif ($e._type == asmjit::kNodeTypeEmbed) ("EmbedNode") - #elif ($e._type == asmjit::kNodeTypeComment) ("CommentNode") - #elif ($e._type == asmjit::kNodeTypeHint) ("HintNode") - #elif ($e._type == asmjit::kNodeTypeTarget) ("TargetNode") - #elif ($e._type == asmjit::kNodeTypeInst) ("InstNode") - #elif ($e._type == asmjit::kNodeTypeFunc) ("FuncNode") - #elif ($e._type == asmjit::kNodeTypeEnd) ("EndNode") - #elif ($e._type == asmjit::kNodeTypeRet) ("RetNode") - #elif ($e._type == asmjit::kNodeTypeCall) ("CallNode") - #elif ($e._type == asmjit::kNodeTypeSArg) ("SArgNode") - #else ("BaseNode"), - - "(", - #if (($e._liveness) != NULL) ("analyzed "), - #if (($e._flags & asmjit::kNodeFlagIsTranslated) != 0) ("translated "), - #if (($e._flags & asmjit::kNodeFlagIsJmp) != 0) ("jmp "), - #if (($e._flags & asmjit::kNodeFlagIsJcc) != 0) ("jcc "), - #if (($e._flags & asmjit::kNodeFlagIsTaken) != 0) ("taken "), - #if (($e._flags & asmjit::kNodeFlagIsRet) != 0) ("ret "), - ")" - ) - ) - - children( - #( - #([...]: [$c,!]), - - #(prev: [$e._prev]), - #(next: [$e._next]), - - #(type: [$e._type]), - #(flags: [$e._flags]), - #(flowId: [$e._flowId]), - #(comment: [$e._comment]), - - #(varInst: [( (asmjit::x86x64::VarInst*)($e._varInst) )]), - #(liveness: [( (asmjit::VarBits*)($e._liveness) )]), - #(state: [( (asmjit::x86x64::VarState*)($e._state) )]), - - #if ($e._type == asmjit::kNodeTypeAlign) ( - #( - #(size : [( ((asmjit::AlignNode*)&$e)->_size )]) - ) - ) - #elif (($e._type == asmjit::kNodeTypeEmbed) && (($e._packedData.embed.size) <= (sizeof(uintptr_t)))) ( - #( - #(size : [( ((asmjit::EmbedNode*)&$e)->_size )]), - #(data : [( ((asmjit::EmbedNode*)&$e)->_data.buf )]) - ) - ) - #elif (($e._type == asmjit::kNodeTypeEmbed) && (($e._packedData.embed.size) > (sizeof(uintptr_t)))) ( - #( - #(size : [( ((asmjit::EmbedNode*)&$e)->_size )]), - #(data : [( ((asmjit::EmbedNode*)&$e)->_data.ptr )]) - ) - ) - #elif ($e._type == asmjit::kNodeTypeHint) ( - #( - #(var : [( (asmjit::x86x64::VarData*) (((asmjit::HintNode*)&$e)->_var) )]), - #(hint : [( ((asmjit::HintNode*)&$e)->_hint )]), - #(value : [( ((asmjit::HintNode*)&$e)->_value )]) - ) - ) - #elif ($e._type == asmjit::kNodeTypeTarget) ( - #( - #(label : [( ((asmjit::TargetNode*)&$e)->_label )]), - #(from : [( ((asmjit::TargetNode*)&$e)->_from )]), - #(numRefs: [( ((asmjit::TargetNode*)&$e)->_numRefs )]) - ) - ) - #elif ($e._type == asmjit::kNodeTypeInst) ( - #( - #(opCount: [( ((asmjit::x86x64::X86X64InstNode*)&$e)->_opCount )]), - #array( - expr: ( ((asmjit::x86x64::X86X64InstNode*)&$e)->_opList[$i] ), - size: ( ((asmjit::x86x64::X86X64InstNode*)&$e)->_opCount ) - ) - ) - ) - #elif ($e._type == asmjit::kNodeTypeFunc) ( - #( - #(entryTarget : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_entryTarget )]), - #(exitTarget : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_exitTarget )]), - #(decl : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_x86Decl )]), - #(end : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_end )]), - #(argList : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_argList )]), - #(funcHints : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_funcHints )]), - #(funcFlags : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_funcFlags )]), - - #(expectedStackAlignment: [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_expectedStackAlignment )]), - #(requiredStackAlignment: [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_requiredStackAlignment )]), - - #(redZoneSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_redZoneSize )]), - #(spillZoneSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_spillZoneSize )]), - #(argStackSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_argStackSize )]), - #(memStackSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_memStackSize )]), - #(callStackSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_callStackSize )]), - - ; X86. - #(saveRestoreRegs : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_saveRestoreRegs )]), - - #(alignStackSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_alignStackSize )]), - #(alignedMemStackSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_alignedMemStackSize )]), - #(pushPopStackSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_pushPopStackSize )]), - #(moveStackSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_moveStackSize )]), - #(extraStackSize : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_extraStackSize )]), - - #(stackFrameRegIndex : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_stackFrameRegIndex )]), - #(stackFrameRegPreserved: [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_isStackFrameRegPreserved )]), - #(stackFrameCopyGpIndex : [( ((asmjit::x86x64::X86X64FuncNode*)&$e)->_stackFrameCopyGpIndex )]) - ) - ) - ) - ) -} diff --git a/tools/configure-mac-xcode.sh b/tools/configure-mac-xcode.sh index e86b356..45ad3ef 100644 --- a/tools/configure-mac-xcode.sh +++ b/tools/configure-mac-xcode.sh @@ -5,5 +5,5 @@ ASMJIT_BUILD_DIR="build_xcode" mkdir ../${ASMJIT_BUILD_DIR} cd ../${ASMJIT_BUILD_DIR} -cmake .. -G"Xcode" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"Xcode" -DASMJIT_BUILD_TEST=1 cd ${ASMJIT_CURRENT_DIR} diff --git a/tools/configure-unix-makefiles-rel.sh b/tools/configure-unix-makefiles-rel.sh index d999f2a..91e556b 100644 --- a/tools/configure-unix-makefiles-rel.sh +++ b/tools/configure-unix-makefiles-rel.sh @@ -5,5 +5,5 @@ ASMJIT_BUILD_DIR="build_makefiles_rel" mkdir ../${ASMJIT_BUILD_DIR} cd ../${ASMJIT_BUILD_DIR} -cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Release -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Release -DASMJIT_BUILD_TEST=1 cd ${ASMJIT_CURRENT_DIR} diff --git a/tools/configure-win-mingw-dbg.bat b/tools/configure-win-mingw-dbg.bat index c68740b..03024e3 100644 --- a/tools/configure-win-mingw-dbg.bat +++ b/tools/configure-win-mingw-dbg.bat @@ -5,5 +5,5 @@ set ASMJIT_BUILD_DIR="build_mingw_dbg" mkdir ..\%ASMJIT_BUILD_DIR% cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"MinGW Makefiles" -DCMAKE_BUILD_TYPE=Debug -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"MinGW Makefiles" -DCMAKE_BUILD_TYPE=Debug -DASMJIT_BUILD_TEST=1 cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-mingw-rel.bat b/tools/configure-win-mingw-rel.bat index 047a60e..f311383 100644 --- a/tools/configure-win-mingw-rel.bat +++ b/tools/configure-win-mingw-rel.bat @@ -5,5 +5,5 @@ set ASMJIT_BUILD_DIR="build_mingw_rel" mkdir ..\%ASMJIT_BUILD_DIR% cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"MinGW Makefiles" -DCMAKE_BUILD_TYPE=Release -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"MinGW Makefiles" -DCMAKE_BUILD_TYPE=Release -DASMJIT_BUILD_TEST=1 cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-vs2005-x64.bat b/tools/configure-win-vs2005-x64.bat deleted file mode 100644 index 3542bb4..0000000 --- a/tools/configure-win-vs2005-x64.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off - -set ASMJIT_CURRENT_DIR=%CD% -set ASMJIT_BUILD_DIR="build_vs2005_x64" - -mkdir ..\%ASMJIT_BUILD_DIR% -cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"Visual Studio 8 2005 Win64" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 -cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-vs2005-x86.bat b/tools/configure-win-vs2005-x86.bat deleted file mode 100644 index 693cd26..0000000 --- a/tools/configure-win-vs2005-x86.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off - -set ASMJIT_CURRENT_DIR=%CD% -set ASMJIT_BUILD_DIR="build_vs2005_x86" - -mkdir ..\%ASMJIT_BUILD_DIR% -cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"Visual Studio 8 2005" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 -cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-vs2008-x64.bat b/tools/configure-win-vs2008-x64.bat deleted file mode 100644 index fc88c64..0000000 --- a/tools/configure-win-vs2008-x64.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off - -set ASMJIT_CURRENT_DIR=%CD% -set ASMJIT_BUILD_DIR="build_vs2008_x64" - -mkdir ..\%ASMJIT_BUILD_DIR% -cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"Visual Studio 9 2008 Win64" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 -cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-vs2008-x86.bat b/tools/configure-win-vs2008-x86.bat deleted file mode 100644 index f252b83..0000000 --- a/tools/configure-win-vs2008-x86.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off - -set ASMJIT_CURRENT_DIR=%CD% -set ASMJIT_BUILD_DIR="build_vs2008_x86" - -mkdir ..\%ASMJIT_BUILD_DIR% -cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"Visual Studio 9 2008" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 -cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-vs2010-x64.bat b/tools/configure-win-vs2010-x64.bat index 10623f9..296f38b 100644 --- a/tools/configure-win-vs2010-x64.bat +++ b/tools/configure-win-vs2010-x64.bat @@ -5,5 +5,5 @@ set ASMJIT_BUILD_DIR="build_vs2010_x64" mkdir ..\%ASMJIT_BUILD_DIR% cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"Visual Studio 10 Win64" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"Visual Studio 10 Win64" -DASMJIT_BUILD_TEST=1 cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-vs2010-x86.bat b/tools/configure-win-vs2010-x86.bat index 3db715a..f99ffe4 100644 --- a/tools/configure-win-vs2010-x86.bat +++ b/tools/configure-win-vs2010-x86.bat @@ -5,5 +5,5 @@ set ASMJIT_BUILD_DIR="build_vs2010_x86" mkdir ..\%ASMJIT_BUILD_DIR% cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"Visual Studio 10" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"Visual Studio 10" -DASMJIT_BUILD_TEST=1 cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-vs2013-x64.bat b/tools/configure-win-vs2013-x64.bat index f79cbff..03b0999 100644 --- a/tools/configure-win-vs2013-x64.bat +++ b/tools/configure-win-vs2013-x64.bat @@ -5,5 +5,5 @@ set ASMJIT_BUILD_DIR="build_vs2013_x64" mkdir ..\%ASMJIT_BUILD_DIR% cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"Visual Studio 12 Win64" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"Visual Studio 12 Win64" -DASMJIT_BUILD_TEST=1 cd %ASMJIT_CURRENT_DIR% diff --git a/tools/configure-win-vs2013-x86.bat b/tools/configure-win-vs2013-x86.bat index 1cc8b1d..a9c553f 100644 --- a/tools/configure-win-vs2013-x86.bat +++ b/tools/configure-win-vs2013-x86.bat @@ -5,5 +5,5 @@ set ASMJIT_BUILD_DIR="build_vs2013_x86" mkdir ..\%ASMJIT_BUILD_DIR% cd ..\%ASMJIT_BUILD_DIR% -cmake .. -G"Visual Studio 12" -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"Visual Studio 12" -DASMJIT_BUILD_TEST=1 cd %ASMJIT_CURRENT_DIR% diff --git a/tools/src-gendefs.js b/tools/src-gendefs.js index 4c05318..e43555b 100644 --- a/tools/src-gendefs.js +++ b/tools/src-gendefs.js @@ -16,8 +16,7 @@ var hasOwn = Object.prototype.hasOwnProperty; // ---------------------------------------------------------------------------- function upFirst(s) { - if (!s) - return s; + if (!s) return ""; return s[0].toUpperCase() + s.substr(1); } @@ -167,7 +166,7 @@ IndexedString.prototype.index = function() { IndexedString.prototype.format = function(indent) { if (this.size === -1) - throw new Error("IndexedString not indexed yet, call index()"); + throw new Error("IndexedString - not indexed yet, call index()"); var s = ""; var array = this.array; @@ -183,16 +182,16 @@ IndexedString.prototype.format = function(indent) { IndexedString.prototype.getSize = function() { if (this.size === -1) - throw new Error("IndexedString not indexed yet, call index()"); + throw new Error("IndexedString - not indexed yet, call index()"); return this.size; }; IndexedString.prototype.getIndex = function(k) { if (this.size === -1) - throw new Error("IndexedString not indexed yet, call index()"); + throw new Error("IndexedString - not indexed yet, call index()"); if (!hasOwn.call(this.map, k)) - throw new Error("Key '" + k + "' not found in IndexedString."); + throw new Error("IndexedString - key '" + k + "' not found."); return this.map[k]; }; diff --git a/tools/src-sanity.js b/tools/src-sanity.js deleted file mode 100644 index 52dd927..0000000 --- a/tools/src-sanity.js +++ /dev/null @@ -1,293 +0,0 @@ -var assert = require("assert"); -var fs = require("fs"); -var path = require("path"); - -/** - * List all files that can be processed by sanitizer in the given directory. - */ -var filesToSanitize = (function() { - var listPrivate = function(array, dir, displayDir, accept) { - var files = fs.readdirSync(dir); - var subarray = []; - - for (var i = 0; i < files.length; i++) { - var baseName = files[i]; - var fullName = path.normalize(path.join(dir, baseName)); - - var stat = fs.lstatSync(fullName); - if (stat.isSymbolicLink()) { - continue; - } - - if (stat.isDirectory()) { - subarray = listPrivate(subarray, - path.join(dir, baseName), displayDir ? displayDir + "/" + baseName : baseName, accept); - continue; - } - - if (stat.isFile()) { - if (accept(baseName)) - array.push({ name: fullName, display: displayDir ? displayDir + "/" + baseName : baseName }); - continue; - } - } - - return array.concat(subarray); - }; - - return function(dir, accept) { - return listPrivate([], dir, "", accept); - }; -})(); - -/** - * Inject data into string. - */ -var inject = function(s, start, end, what) { - assert(start <= s.length); - assert(end <= s.length); - - return s.substr(0, start) + what + s.substr(end); -}; - -/** - * Is the extension c++ header file? - */ -var isCppHeaderExt = function(ext) { - return ext === ".h" || - ext === ".hh" || - ext === ".hpp" || - ext === ".hxx" ; -}; - -/** - * Is the extension c++ source file? - */ -var isCppSourceExt = function(ext) { - return ext === ".c" || - ext === ".cc" || - ext === ".cpp" || - ext === ".cxx" ; -}; - -/** - * Filter that returns true if the given file name should be processed. - */ -var filesToAccept = function(name) { - var ext = path.extname(name).toLowerCase(); - - return isCppHeaderExt(ext) || - isCppSourceExt(ext) || - ext === ".cmake" || - ext === ".m" || - ext === ".md" || - ext === ".mm" ; -}; - -/** - * Sanity spaces. - */ -var sanitySpaces = function(data, name) { - // Remove carriage return. - data = data.replace(/\r\n/g, "\n"); - // Remove spaces before the end of the line. - data = data.replace(/[ \t]+\n/g, "\n"); - // Convert tabs to spaces. - data = data.replace(/\t/g, " "); - - return data; -}; - -/** - * Sanity header guards. - */ -var sanityHeaderGuards = (function() { - var parseGuardName = function(data, i) { - var m = data.substr(i).match(/[\w][\d\w]*/); - return m ? m[0] : null; - }; - - var makeGuardName = function(name) { - // Remove leading '/' or '\'. - if (/^[\\\/]/.test(name)) - name = name.substr(1); - return "_" + name.toUpperCase().replace(/[\/\\\.-]/g, "_"); - }; - - var directiveMarks = [ - "#ifndef ", - "#endif // ", - "#define " - ]; - - var directiveNames = [ - "#ifndef ", - "#endif ", - "#define " - ]; - - return function(data, name) { - var i = 0; - var nl = true; - - var guard = "// " + "[Guard]" + "\n"; - var nFound = 0; - - while (i < data.length) { - if (nl && data.substr(i, guard.length) === guard) { - i += guard.length; - nFound++; - - if (i >= data.length) - break; - - for (var j = 0; j < directiveMarks.length; ) { - var m = directiveMarks[j]; - if (data.substr(i, m.length) === m && data.charAt(i + m.length) === '_') { - i += directiveMarks[j].length; - - var oldGuardName = parseGuardName(data, i); - var newGuardName; - - if (oldGuardName) { - var startPosition = i; - var endPosition = i + oldGuardName.length; - - newGuardName = makeGuardName(name); - if (oldGuardName !== newGuardName) { - console.log(name + ": " + directiveNames[j] + newGuardName); - data = inject(data, startPosition, endPosition, newGuardName); - - i += newGuardName.length; - i = data.indexOf('\n', i); - - if (i === -1) { - // Terminates the loop. - i = data.length; - j = 9999; - nl = false; - break; - } - else { - i++; - } - } - } - j += 2; - } - // Don't process '#define' directive if previous '#ifndef' wasn't matched. - else { - if (++j == 2) - break; - } - } - } - else { - nl = data.charAt(i) === '\n'; - i++; - } - } - - if (nFound & 1) { - console.log(name + ": Odd number of guards found: " + nFound); - } - - return data; - }; -})(); - -/** - * Sanity #include order. - */ -var sanityIncludeOrder = function(data, name, directive) { - var i = 0; - var nl = true; - - var startPosition = -1; - var endPosition = -1; - var list = null; - var replacement; - - while (i < data.length) { - if (nl && data.substr(i, directive.length) === directive) { - var iLocal = i - - if (startPosition === -1) { - startPosition = i; - list = []; - } - - for (;;) { - if (++i >= data.length) { - list.push(data.substring(iLocal, i)); - break; - } - if (data.charAt(i) === '\n') { - list.push(data.substring(iLocal, i)); - i++; - break; - } - } - } - else if (startPosition !== -1) { - assert(nl === true); - endPosition = i; - - if (list.length > 1) { - list.sort(); - replacement = list.join("\n") + "\n"; - - assert(replacement.length == endPosition - startPosition); - data = inject(data, startPosition, endPosition, replacement); - } - - startPosition = -1; - endPosition = -1; - list = null; - - nl = false; - i++; - } - else { - nl = data.charAt(i) === '\n'; - i++; - } - } - - return data; -}; - -/** - * Sanity the given data of file. - */ -var sanity = function(data, name) { - var ext = path.extname(name).toLowerCase(); - - // Sanity spaces. - data = sanitySpaces(data, name); - - // Fix C/C++ header guards and sort '#include' files. - if (isCppHeaderExt(ext)) { - data = sanityHeaderGuards(data, name); - data = sanityIncludeOrder(data, name, "#include"); - } - - return data; -}; - -/** - * Entry. - */ -var main = function(dir) { - filesToSanitize(dir, filesToAccept).forEach(function(file) { - var oldData = fs.readFileSync(file.name, "utf8"); - var newData = sanity(oldData, file.display); - - if (oldData !== newData) { - console.log(file.display + ": Writing..."); - fs.writeFileSync(file.name, newData, "utf8"); - } - }); -}; - -main(path.join(__dirname, "../src"));