mirror of
https://github.com/asmjit/asmjit.git
synced 2025-12-16 20:17:05 +03:00
[ABI] Initial AArch64 support
This commit is contained in:
@@ -55,6 +55,14 @@ if (NOT DEFINED ASMJIT_NO_X86)
|
||||
set(ASMJIT_NO_X86 FALSE)
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED ASMJIT_NO_AARCH32)
|
||||
set(ASMJIT_NO_AARCH32 FALSE)
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED ASMJIT_NO_AARCH64)
|
||||
set(ASMJIT_NO_AARCH64 FALSE)
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED ASMJIT_NO_FOREIGN)
|
||||
set(ASMJIT_NO_FOREIGN FALSE)
|
||||
endif()
|
||||
@@ -78,7 +86,9 @@ set(ASMJIT_EMBED "${ASMJIT_EMBED}" CACHE BOOL "Embed 'asmjit
|
||||
set(ASMJIT_STATIC "${ASMJIT_STATIC}" CACHE BOOL "Build 'asmjit' library as static")
|
||||
set(ASMJIT_SANITIZE "${ASMJIT_SANITIZE}" CACHE STRING "Build with sanitizers: 'address', 'undefined', etc...")
|
||||
set(ASMJIT_NO_X86 "${ASMJIT_NO_X86}" CACHE BOOL "Disable X86/X64 backend")
|
||||
set(ASMJIT_NO_FOREIGN "${ASMJIT_NO_FOREIGN}" CACHE BOOL "Disable all foreign architectures (builds only host backend)")
|
||||
set(ASMJIT_NO_AARCH32 "${ASMJIT_NO_AARCH32}" CACHE BOOL "Disable AArch32 backend (ARM and THUMB)")
|
||||
set(ASMJIT_NO_AARCH64 "${ASMJIT_NO_AARCH64}" CACHE BOOL "Disable AArch64 backend")
|
||||
set(ASMJIT_NO_FOREIGN "${ASMJIT_NO_FOREIGN}" CACHE BOOL "Disable all foreign architectures (enables only a native architecture)")
|
||||
set(ASMJIT_NO_NATVIS "${ASMJIT_NO_NATVIS}" CACHE BOOL "Disable natvis support (embedding asmjit.natvis in PDB)")
|
||||
set(ASMJIT_NO_CUSTOM_FLAGS "${ASMJIT_NO_CUSTOM_FLAGS}" CACHE BOOL "Disable extra compilation flags added by AsmJit to its targets")
|
||||
|
||||
@@ -189,6 +199,7 @@ if (NOT ASMJIT_NO_CUSTOM_FLAGS)
|
||||
list(APPEND ASMJIT_PRIVATE_CFLAGS
|
||||
-MP # [+] Multi-Process Compilation.
|
||||
-GF # [+] Eliminate duplicate strings.
|
||||
-Zc:__cplusplus # [+] Conforming __cplusplus definition.
|
||||
-Zc:inline # [+] Remove unreferenced COMDAT.
|
||||
-Zc:strictStrings # [+] Strict const qualification of string literals.
|
||||
-Zc:threadSafeInit- # [-] Thread-safe statics.
|
||||
@@ -262,6 +273,8 @@ endif()
|
||||
foreach(build_option ASMJIT_STATIC
|
||||
# AsmJit backends selection.
|
||||
ASMJIT_NO_X86
|
||||
ASMJIT_NO_AARCH32
|
||||
ASMJIT_NO_AARCH64
|
||||
ASMJIT_NO_FOREIGN
|
||||
# AsmJit features selection.
|
||||
ASMJIT_NO_DEPRECATED
|
||||
@@ -381,6 +394,36 @@ set(ASMJIT_SRC_LIST
|
||||
asmjit/core/zonevector.cpp
|
||||
asmjit/core/zonevector.h
|
||||
|
||||
asmjit/arm.h
|
||||
asmjit/arm/armformatter.cpp
|
||||
asmjit/arm/armformatter_p.h
|
||||
asmjit/arm/armglobals.h
|
||||
asmjit/arm/armoperand.h
|
||||
asmjit/arm/a64archtraits_p.h
|
||||
asmjit/arm/a64assembler.cpp
|
||||
asmjit/arm/a64assembler.h
|
||||
asmjit/arm/a64builder.cpp
|
||||
asmjit/arm/a64builder.h
|
||||
asmjit/arm/a64compiler.cpp
|
||||
asmjit/arm/a64compiler.h
|
||||
asmjit/arm/a64emithelper.cpp
|
||||
asmjit/arm/a64emithelper_p.h
|
||||
asmjit/arm/a64emitter.h
|
||||
asmjit/arm/a64formatter.cpp
|
||||
asmjit/arm/a64formatter_p.h
|
||||
asmjit/arm/a64func.cpp
|
||||
asmjit/arm/a64func_p.h
|
||||
asmjit/arm/a64globals.h
|
||||
asmjit/arm/a64instapi.cpp
|
||||
asmjit/arm/a64instapi_p.h
|
||||
asmjit/arm/a64instdb.cpp
|
||||
asmjit/arm/a64instdb.h
|
||||
asmjit/arm/a64operand.cpp
|
||||
asmjit/arm/a64operand.h
|
||||
asmjit/arm/a64rapass.cpp
|
||||
asmjit/arm/a64rapass_p.h
|
||||
asmjit/arm/a64utils.h
|
||||
|
||||
asmjit/x86.h
|
||||
asmjit/x86/x86archtraits_p.h
|
||||
asmjit/x86/x86assembler.cpp
|
||||
@@ -504,9 +547,10 @@ if (NOT ASMJIT_EMBED)
|
||||
|
||||
asmjit_add_target(asmjit_test_assembler TEST
|
||||
SOURCES test/asmjit_test_assembler.cpp
|
||||
test/asmjit_test_assembler.h
|
||||
test/asmjit_test_assembler_a64.cpp
|
||||
test/asmjit_test_assembler_x64.cpp
|
||||
test/asmjit_test_assembler_x86.cpp
|
||||
test/asmjit_test_assembler.h
|
||||
LIBRARIES asmjit::asmjit
|
||||
CFLAGS ${ASMJIT_PRIVATE_CFLAGS}
|
||||
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
|
||||
@@ -514,6 +558,7 @@ if (NOT ASMJIT_EMBED)
|
||||
|
||||
asmjit_add_target(asmjit_test_perf EXECUTABLE
|
||||
SOURCES test/asmjit_test_perf.cpp
|
||||
test/asmjit_test_perf_a64.cpp
|
||||
test/asmjit_test_perf_x86.cpp
|
||||
SOURCES test/asmjit_test_perf.h
|
||||
LIBRARIES asmjit::asmjit
|
||||
@@ -550,8 +595,9 @@ if (NOT ASMJIT_EMBED)
|
||||
endif()
|
||||
asmjit_add_target(asmjit_test_compiler TEST
|
||||
SOURCES test/asmjit_test_compiler.cpp
|
||||
test/asmjit_test_compiler_x86.cpp
|
||||
test/asmjit_test_compiler.h
|
||||
test/asmjit_test_compiler_a64.cpp
|
||||
test/asmjit_test_compiler_x86.cpp
|
||||
LIBRARIES asmjit::asmjit
|
||||
CFLAGS ${ASMJIT_PRIVATE_CFLAGS} ${sse2_flags}
|
||||
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
|
||||
|
||||
62
src/asmjit/a64.h
Normal file
62
src/asmjit/a64.h
Normal file
@@ -0,0 +1,62 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_A64_H_INCLUDED
|
||||
#define ASMJIT_A64_H_INCLUDED
|
||||
|
||||
//! \addtogroup asmjit_a64
|
||||
//!
|
||||
//! ### Emitters
|
||||
//!
|
||||
//! - \ref a64::Assembler - AArch64 assembler (must read, provides examples).
|
||||
//! - \ref a64::Builder - AArch64 builder.
|
||||
//! - \ref a64::Compiler - AArch64 compiler.
|
||||
//! - \ref a64::Emitter - AArch64 emitter (abstract).
|
||||
//!
|
||||
//! ### Supported Instructions
|
||||
//!
|
||||
//! - Emitters:
|
||||
//! - \ref a64::EmitterExplicitT - Provides all instructions that use explicit
|
||||
//! operands, provides also utility functions. The member functions provided
|
||||
//! are part of all ARM/AArch64 emitters.
|
||||
//!
|
||||
//! - Instruction representation:
|
||||
//! - \ref a64::Inst::Id - instruction identifiers.
|
||||
//!
|
||||
//! ### Register Operands
|
||||
//!
|
||||
//! - \ref arm::Reg - Base class for any AArch32/AArch64 register.
|
||||
//! - \ref arm::Gp - General purpose register:
|
||||
//! - \ref arm::GpW - 32-bit register.
|
||||
//! - \ref arm::GpX - 64-bit register.
|
||||
//! - \ref arm::Vec - Vector (SIMD) register:
|
||||
//! - \ref arm::VecB - 8-bit SIMD register (AArch64 only).
|
||||
//! - \ref arm::VecH - 16-bit SIMD register (AArch64 only).
|
||||
//! - \ref arm::VecS - 32-bit SIMD register.
|
||||
//! - \ref arm::VecD - 64-bit SIMD register.
|
||||
//! - \ref arm::VecV - 128-bit SIMD register.
|
||||
//!
|
||||
//! ### Memory Operands
|
||||
//!
|
||||
//! - \ref arm::Mem - AArch32/AArch64 memory operand that provides support for all ARM addressing features
|
||||
//! including base, index, pre/post increment, and ARM-specific shift addressing and index extending.
|
||||
//!
|
||||
//! ### Other
|
||||
//!
|
||||
//! - \ref arm::Shift - Shift operation and value.
|
||||
//! - \ref a64::Utils - Utilities that can help during code generation for AArch64.
|
||||
|
||||
#include "./arm.h"
|
||||
#include "./arm/a64assembler.h"
|
||||
#include "./arm/a64builder.h"
|
||||
#include "./arm/a64compiler.h"
|
||||
#include "./arm/a64emitter.h"
|
||||
#include "./arm/a64globals.h"
|
||||
#include "./arm/a64instdb.h"
|
||||
#include "./arm/a64operand.h"
|
||||
#include "./arm/a64utils.h"
|
||||
|
||||
#endif // ASMJIT_A64_H_INCLUDED
|
||||
|
||||
62
src/asmjit/arm.h
Normal file
62
src/asmjit/arm.h
Normal file
@@ -0,0 +1,62 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_H_INCLUDED
|
||||
#define ASMJIT_ARM_H_INCLUDED
|
||||
|
||||
//! \addtogroup asmjit_arm
|
||||
//!
|
||||
//! ### Namespaces
|
||||
//!
|
||||
//! - \ref arm - arm namespace provides common functionality for both AArch32 and AArch64 backends.
|
||||
//! - \ref a64 - a64 namespace provides support for AArch64 architecture. In addition it includes
|
||||
//! \ref arm namespace, so you can only use a single namespace when targeting AArch64 architecture.
|
||||
//!
|
||||
//! ### Emitters
|
||||
//!
|
||||
//! - AArch64
|
||||
//! - \ref a64::Assembler - AArch64 assembler (must read, provides examples).
|
||||
//! - \ref a64::Builder - AArch64 builder.
|
||||
//! - \ref a64::Compiler - AArch64 compiler.
|
||||
//! - \ref a64::Emitter - AArch64 emitter (abstract).
|
||||
//!
|
||||
//! ### Supported Instructions
|
||||
//!
|
||||
//! - AArch64:
|
||||
//! - Emitters:
|
||||
//! - \ref a64::EmitterExplicitT - Provides all instructions that use explicit operands, provides also
|
||||
//! utility functions. The member functions provided are part of all AArch64 emitters.
|
||||
//! - Instruction representation:
|
||||
//! - \ref a64::Inst::Id - instruction identifiers.
|
||||
//!
|
||||
//! ### Register Operands
|
||||
//!
|
||||
//! - \ref arm::Reg - Base class for any AArch32/AArch64 register.
|
||||
//! - \ref arm::Gp - General purpose register:
|
||||
//! - \ref arm::GpW - 32-bit register.
|
||||
//! - \ref arm::GpX - 64-bit register.
|
||||
//! - \ref arm::Vec - Vector (SIMD) register:
|
||||
//! - \ref arm::VecB - 8-bit SIMD register (AArch64 only).
|
||||
//! - \ref arm::VecH - 16-bit SIMD register (AArch64 only).
|
||||
//! - \ref arm::VecS - 32-bit SIMD register.
|
||||
//! - \ref arm::VecD - 64-bit SIMD register.
|
||||
//! - \ref arm::VecV - 128-bit SIMD register.
|
||||
//!
|
||||
//! ### Memory Operands
|
||||
//!
|
||||
//! - \ref arm::Mem - AArch32/AArch64 memory operand that provides support for all ARM addressing features
|
||||
//! including base, index, pre/post increment, and ARM-specific shift addressing and index extending.
|
||||
//!
|
||||
//! ### Other
|
||||
//!
|
||||
//! - \ref arm::Shift - Shift operation and value (both AArch32 and AArch64).
|
||||
//! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode.
|
||||
//! - \ref a64::Utils - Utilities that can help during code generation for AArch64.
|
||||
|
||||
#include "./core.h"
|
||||
#include "./arm/armglobals.h"
|
||||
#include "./arm/armoperand.h"
|
||||
|
||||
#endif // ASMJIT_ARM_H_INCLUDED
|
||||
81
src/asmjit/arm/a64archtraits_p.h
Normal file
81
src/asmjit/arm/a64archtraits_p.h
Normal file
@@ -0,0 +1,81 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
|
||||
|
||||
#include "../core/archtraits.h"
|
||||
#include "../core/misc_p.h"
|
||||
#include "../core/type.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
static const constexpr ArchTraits a64ArchTraits = {
|
||||
// SP/FP/LR/PC.
|
||||
Gp::kIdSp, Gp::kIdFp, Gp::kIdLr, 0xFF,
|
||||
|
||||
// Reserved.
|
||||
{ 0, 0, 0 },
|
||||
|
||||
// HW stack alignment (AArch64 requires stack aligned to 64 bytes).
|
||||
16,
|
||||
|
||||
// Min/max stack offset - byte addressing is the worst, VecQ addressing the best.
|
||||
4095, 65520,
|
||||
|
||||
// Instruction hints [Gp, Vec, ExtraVirt2, ExtraVirt3].
|
||||
{{
|
||||
InstHints::kPushPop,
|
||||
InstHints::kPushPop,
|
||||
InstHints::kNoHints,
|
||||
InstHints::kNoHints
|
||||
}},
|
||||
|
||||
// RegInfo.
|
||||
#define V(index) OperandSignature{arm::RegTraits<RegType(index)>::kSignature}
|
||||
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
|
||||
#undef V
|
||||
|
||||
// RegTypeToTypeId.
|
||||
#define V(index) TypeId(arm::RegTraits<RegType(index)>::kTypeId)
|
||||
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
|
||||
#undef V
|
||||
|
||||
// TypeIdToRegType.
|
||||
#define V(index) (index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt8) ? RegType::kARM_GpW : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt8) ? RegType::kARM_GpW : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt16) ? RegType::kARM_GpW : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt16) ? RegType::kARM_GpW : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt32) ? RegType::kARM_GpW : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kARM_GpW : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt64) ? RegType::kARM_GpX : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt64) ? RegType::kARM_GpX : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kARM_GpX : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kARM_GpX : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kARM_VecS : \
|
||||
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kARM_VecD : RegType::kNone)
|
||||
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
|
||||
#undef V
|
||||
|
||||
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
|
||||
{
|
||||
ArchTypeNameId::kByte,
|
||||
ArchTypeNameId::kHWord,
|
||||
ArchTypeNameId::kWord,
|
||||
ArchTypeNameId::kXWord
|
||||
}
|
||||
};
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
|
||||
5115
src/asmjit/arm/a64assembler.cpp
Normal file
5115
src/asmjit/arm/a64assembler.cpp
Normal file
File diff suppressed because it is too large
Load Diff
72
src/asmjit/arm/a64assembler.h
Normal file
72
src/asmjit/arm/a64assembler.h
Normal file
@@ -0,0 +1,72 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
|
||||
|
||||
#include "../core/assembler.h"
|
||||
#include "../arm/a64emitter.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
//! AArch64 assembler implementation.
|
||||
class ASMJIT_VIRTAPI Assembler
|
||||
: public BaseAssembler,
|
||||
public EmitterExplicitT<Assembler> {
|
||||
|
||||
public:
|
||||
typedef BaseAssembler Base;
|
||||
|
||||
//! \name Construction / Destruction
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Assembler(CodeHolder* code = nullptr) noexcept;
|
||||
ASMJIT_API virtual ~Assembler() noexcept;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Accessors
|
||||
//! \{
|
||||
|
||||
//! Gets whether the current ARM mode is THUMB (alternative to 32-bit ARM encoding).
|
||||
inline bool isInThumbMode() const noexcept { return _environment.isArchThumb(); }
|
||||
|
||||
//! Gets the current code alignment of the current mode (ARM vs THUMB).
|
||||
inline uint32_t codeAlignment() const noexcept { return isInThumbMode() ? 2 : 4; }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Emit
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Align
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error align(AlignMode alignMode, uint32_t alignment) override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Events
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
|
||||
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
//! \}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
|
||||
51
src/asmjit/arm/a64builder.cpp
Normal file
51
src/asmjit/arm/a64builder.cpp
Normal file
@@ -0,0 +1,51 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_BUILDER)
|
||||
|
||||
#include "../arm/a64assembler.h"
|
||||
#include "../arm/a64builder.h"
|
||||
#include "../arm/a64emithelper_p.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
// a64::Builder - Construction & Destruction
|
||||
// =========================================
|
||||
|
||||
Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
|
||||
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
|
||||
assignEmitterFuncs(this);
|
||||
|
||||
if (code)
|
||||
code->attach(this);
|
||||
}
|
||||
Builder::~Builder() noexcept {}
|
||||
|
||||
// a64::Builder - Events
|
||||
// =====================
|
||||
|
||||
Error Builder::onAttach(CodeHolder* code) noexcept {
|
||||
return Base::onAttach(code);
|
||||
}
|
||||
|
||||
Error Builder::onDetach(CodeHolder* code) noexcept {
|
||||
return Base::onDetach(code);
|
||||
}
|
||||
|
||||
// a64::Builder - Finalize
|
||||
// =======================
|
||||
|
||||
Error Builder::finalize() {
|
||||
ASMJIT_PROPAGATE(runPasses());
|
||||
Assembler a(_code);
|
||||
a.addEncodingOptions(encodingOptions());
|
||||
a.addDiagnosticOptions(diagnosticOptions());
|
||||
return serializeTo(&a);
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_BUILDER
|
||||
57
src/asmjit/arm/a64builder.h
Normal file
57
src/asmjit/arm/a64builder.h
Normal file
@@ -0,0 +1,57 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64BUILDER_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64BUILDER_H_INCLUDED
|
||||
|
||||
#include "../core/api-config.h"
|
||||
#ifndef ASMJIT_NO_BUILDER
|
||||
|
||||
#include "../core/builder.h"
|
||||
#include "../arm/a64emitter.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
//! AArch64 builder implementation.
|
||||
class ASMJIT_VIRTAPI Builder
|
||||
: public BaseBuilder,
|
||||
public EmitterExplicitT<Builder> {
|
||||
public:
|
||||
ASMJIT_NONCOPYABLE(Builder)
|
||||
typedef BaseBuilder Base;
|
||||
|
||||
//! \name Construction & Destruction
|
||||
//! \{
|
||||
|
||||
ASMJIT_API explicit Builder(CodeHolder* code = nullptr) noexcept;
|
||||
ASMJIT_API virtual ~Builder() noexcept;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Events
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
|
||||
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Finalize
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error finalize() override;
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
//! \}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_BUILDER
|
||||
#endif // ASMJIT_ARM_A64BUILDER_H_INCLUDED
|
||||
60
src/asmjit/arm/a64compiler.cpp
Normal file
60
src/asmjit/arm/a64compiler.cpp
Normal file
@@ -0,0 +1,60 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_COMPILER)
|
||||
|
||||
#include "../arm/a64assembler.h"
|
||||
#include "../arm/a64compiler.h"
|
||||
#include "../arm/a64emithelper_p.h"
|
||||
#include "../arm/a64rapass_p.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
// a64::Compiler - Construction & Destruction
|
||||
// ==========================================
|
||||
|
||||
Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
|
||||
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
|
||||
assignEmitterFuncs(this);
|
||||
|
||||
if (code)
|
||||
code->attach(this);
|
||||
}
|
||||
Compiler::~Compiler() noexcept {}
|
||||
|
||||
// a64::Compiler - Events
|
||||
// ======================
|
||||
|
||||
Error Compiler::onAttach(CodeHolder* code) noexcept {
|
||||
ASMJIT_PROPAGATE(Base::onAttach(code));
|
||||
Error err = addPassT<ARMRAPass>();
|
||||
|
||||
if (ASMJIT_UNLIKELY(err)) {
|
||||
onDetach(code);
|
||||
return err;
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
Error Compiler::onDetach(CodeHolder* code) noexcept {
|
||||
return Base::onDetach(code);
|
||||
}
|
||||
|
||||
// a64::Compiler - Finalize
|
||||
// ========================
|
||||
|
||||
Error Compiler::finalize() {
|
||||
ASMJIT_PROPAGATE(runPasses());
|
||||
Assembler a(_code);
|
||||
a.addEncodingOptions(encodingOptions());
|
||||
a.addDiagnosticOptions(diagnosticOptions());
|
||||
return serializeTo(&a);
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_COMPILER
|
||||
235
src/asmjit/arm/a64compiler.h
Normal file
235
src/asmjit/arm/a64compiler.h
Normal file
@@ -0,0 +1,235 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_ARMCOMPILER_H_INCLUDED
|
||||
#define ASMJIT_ARM_ARMCOMPILER_H_INCLUDED
|
||||
|
||||
#include "../core/api-config.h"
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
|
||||
#include "../core/compiler.h"
|
||||
#include "../core/type.h"
|
||||
#include "../arm/a64emitter.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
//! AArch64 compiler implementation.
|
||||
class ASMJIT_VIRTAPI Compiler
|
||||
: public BaseCompiler,
|
||||
public EmitterExplicitT<Compiler> {
|
||||
public:
|
||||
ASMJIT_NONCOPYABLE(Compiler)
|
||||
typedef BaseCompiler Base;
|
||||
|
||||
//! \name Construction & Destruction
|
||||
//! \{
|
||||
|
||||
ASMJIT_API explicit Compiler(CodeHolder* code = nullptr) noexcept;
|
||||
ASMJIT_API virtual ~Compiler() noexcept;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Virtual Registers
|
||||
//! \{
|
||||
|
||||
//! \cond INTERNAL
|
||||
template<typename RegT, typename Type>
|
||||
inline RegT _newRegInternal(const Type& type) {
|
||||
RegT reg(Globals::NoInit);
|
||||
_newReg(®, type, nullptr);
|
||||
return reg;
|
||||
}
|
||||
|
||||
template<typename RegT, typename Type, typename... Args>
|
||||
inline RegT _newRegInternal(const Type& type, const char* s, Args&&... args) {
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
RegT reg(Globals::NoInit);
|
||||
if (sizeof...(Args) == 0)
|
||||
_newReg(®, type, s);
|
||||
else
|
||||
_newRegFmt(®, type, s, std::forward<Args>(args)...);
|
||||
return reg;
|
||||
#else
|
||||
DebugUtils::unused(std::forward<Args>(args)...);
|
||||
RegT reg(Globals::NoInit);
|
||||
_newReg(®, type, nullptr);
|
||||
return reg;
|
||||
#endif
|
||||
}
|
||||
//! \endcond
|
||||
|
||||
template<typename RegT, typename... Args>
|
||||
inline RegT newSimilarReg(const RegT& ref, Args&&... args) {
|
||||
return _newRegInternal<RegT>(ref, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename... Args>
|
||||
inline Reg newReg(TypeId typeId, Args&&... args) { return _newRegInternal<Reg>(typeId, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Gp newGp(TypeId typeId, Args&&... args) { return _newRegInternal<Gp>(typeId, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Vec newVec(TypeId typeId, Args&&... args) { return _newRegInternal<Vec>(typeId, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Gp newInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt32, std::forward<Args>(args)...); }
|
||||
template<typename... Args>
|
||||
inline Gp newUInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Gp newInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt64, std::forward<Args>(args)...); }
|
||||
template<typename... Args>
|
||||
inline Gp newUInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Gp newIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kIntPtr, std::forward<Args>(args)...); }
|
||||
template<typename... Args>
|
||||
inline Gp newUIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Gp newGpw(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
|
||||
template<typename... Args>
|
||||
inline Gp newGpx(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
|
||||
template<typename... Args>
|
||||
inline Gp newGpz(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Vec newVecS(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat32, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Vec newVecD(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat64, std::forward<Args>(args)...); }
|
||||
|
||||
template<typename... Args>
|
||||
inline Vec newVecQ(Args&&... args) { return _newRegInternal<Vec>(TypeId::kUInt8x16, std::forward<Args>(args)...); }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Stack
|
||||
//! \{
|
||||
|
||||
//! Creates a new memory chunk allocated on the current function's stack.
|
||||
inline Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) {
|
||||
Mem m(Globals::NoInit);
|
||||
_newStack(&m, size, alignment, name);
|
||||
return m;
|
||||
}
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Constants
|
||||
//! \{
|
||||
|
||||
//! Put data to a constant-pool and get a memory reference to it.
|
||||
inline Mem newConst(ConstPoolScope scope, const void* data, size_t size) {
|
||||
Mem m(Globals::NoInit);
|
||||
_newConst(&m, scope, data, size);
|
||||
return m;
|
||||
}
|
||||
|
||||
//! Put a BYTE `val` to a constant-pool (8 bits).
|
||||
inline Mem newByteConst(ConstPoolScope scope, uint8_t val) noexcept { return newConst(scope, &val, 1); }
|
||||
//! Put a HWORD `val` to a constant-pool (16 bits).
|
||||
inline Mem newHWordConst(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
|
||||
//! Put a WORD `val` to a constant-pool (32 bits).
|
||||
inline Mem newWordConst(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
|
||||
//! Put a DWORD `val` to a constant-pool (64 bits).
|
||||
inline Mem newDWordConst(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
|
||||
|
||||
//! Put a WORD `val` to a constant-pool.
|
||||
inline Mem newInt16Const(ConstPoolScope scope, int16_t val) noexcept { return newConst(scope, &val, 2); }
|
||||
//! Put a WORD `val` to a constant-pool.
|
||||
inline Mem newUInt16Const(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
|
||||
//! Put a DWORD `val` to a constant-pool.
|
||||
inline Mem newInt32Const(ConstPoolScope scope, int32_t val) noexcept { return newConst(scope, &val, 4); }
|
||||
//! Put a DWORD `val` to a constant-pool.
|
||||
inline Mem newUInt32Const(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
|
||||
//! Put a QWORD `val` to a constant-pool.
|
||||
inline Mem newInt64Const(ConstPoolScope scope, int64_t val) noexcept { return newConst(scope, &val, 8); }
|
||||
//! Put a QWORD `val` to a constant-pool.
|
||||
inline Mem newUInt64Const(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
|
||||
|
||||
//! Put a SP-FP `val` to a constant-pool.
|
||||
inline Mem newFloatConst(ConstPoolScope scope, float val) noexcept { return newConst(scope, &val, 4); }
|
||||
//! Put a DP-FP `val` to a constant-pool.
|
||||
inline Mem newDoubleConst(ConstPoolScope scope, double val) noexcept { return newConst(scope, &val, 8); }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Instruction Options
|
||||
//! \{
|
||||
|
||||
//! Force the compiler to not follow the conditional or unconditional jump.
|
||||
inline Compiler& unfollow() noexcept { _instOptions |= InstOptions::kUnfollow; return *this; }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Function Call & Ret Intrinsics
|
||||
//! \{
|
||||
|
||||
//! Invoke a function call without `target` type enforcement.
|
||||
inline Error invoke_(InvokeNode** out, const Operand_& target, const FuncSignature& signature) {
|
||||
return addInvokeNode(out, Inst::kIdBlr, target, signature);
|
||||
}
|
||||
|
||||
//! Invoke a function call of the given `target` and `signature` and store the added node to `out`.
|
||||
//!
|
||||
//! Creates a new \ref InvokeNode, initializes all the necessary members to match the given function `signature`,
|
||||
//! adds the node to the compiler, and stores its pointer to `out`. The operation is atomic, if anything fails
|
||||
//! nullptr is stored in `out` and error code is returned.
|
||||
inline Error invoke(InvokeNode** out, const Gp& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
|
||||
//! \overload
|
||||
inline Error invoke(InvokeNode** out, const Mem& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
|
||||
//! \overload
|
||||
inline Error invoke(InvokeNode** out, const Label& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
|
||||
//! \overload
|
||||
inline Error invoke(InvokeNode** out, const Imm& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
|
||||
//! \overload
|
||||
inline Error invoke(InvokeNode** out, uint64_t target, const FuncSignature& signature) { return invoke_(out, Imm(int64_t(target)), signature); }
|
||||
|
||||
//! Return.
|
||||
inline Error ret() { return addRet(Operand(), Operand()); }
|
||||
//! \overload
|
||||
inline Error ret(const BaseReg& o0) { return addRet(o0, Operand()); }
|
||||
//! \overload
|
||||
inline Error ret(const BaseReg& o0, const BaseReg& o1) { return addRet(o0, o1); }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Jump Tables Support
|
||||
//! \{
|
||||
|
||||
using EmitterExplicitT<Compiler>::br;
|
||||
|
||||
//! Adds a jump to the given `target` with the provided jump `annotation`.
|
||||
inline Error br(const BaseReg& target, JumpAnnotation* annotation) { return emitAnnotatedJump(Inst::kIdBr, target, annotation); }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Events
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
|
||||
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Finalize
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error finalize() override;
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
//! \}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_COMPILER
|
||||
#endif // ASMJIT_ARM_ARMCOMPILER_H_INCLUDED
|
||||
464
src/asmjit/arm/a64emithelper.cpp
Normal file
464
src/asmjit/arm/a64emithelper.cpp
Normal file
@@ -0,0 +1,464 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
|
||||
#include "../core/formatter.h"
|
||||
#include "../core/funcargscontext_p.h"
|
||||
#include "../core/string.h"
|
||||
#include "../core/support.h"
|
||||
#include "../core/type.h"
|
||||
#include "../arm/a64emithelper_p.h"
|
||||
#include "../arm/a64formatter_p.h"
|
||||
#include "../arm/a64instapi_p.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
// a64::EmitHelper - Emit Operations
|
||||
// =================================
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
|
||||
const Operand_& dst_,
|
||||
const Operand_& src_, TypeId typeId, const char* comment) {
|
||||
|
||||
Emitter* emitter = _emitter->as<Emitter>();
|
||||
|
||||
// Invalid or abstract TypeIds are not allowed.
|
||||
ASMJIT_ASSERT(TypeUtils::isValid(typeId) && !TypeUtils::isAbstract(typeId));
|
||||
|
||||
emitter->setInlineComment(comment);
|
||||
|
||||
if (dst_.isReg() && src_.isMem()) {
|
||||
Reg dst(dst_.as<Reg>());
|
||||
Mem src(src_.as<Mem>());
|
||||
|
||||
switch (typeId) {
|
||||
case TypeId::kInt8:
|
||||
case TypeId::kUInt8:
|
||||
return emitter->ldrb(dst.as<Gp>(), src);
|
||||
|
||||
case TypeId::kInt16:
|
||||
case TypeId::kUInt16:
|
||||
return emitter->ldrh(dst.as<Gp>(), src);
|
||||
|
||||
case TypeId::kInt32:
|
||||
case TypeId::kUInt32:
|
||||
return emitter->ldr(dst.as<Gp>().w(), src);
|
||||
|
||||
case TypeId::kInt64:
|
||||
case TypeId::kUInt64:
|
||||
return emitter->ldr(dst.as<Gp>().x(), src);
|
||||
|
||||
default: {
|
||||
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
|
||||
return emitter->ldr(dst.as<Vec>().s(), src);
|
||||
|
||||
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
|
||||
return emitter->ldr(dst.as<Vec>().d(), src);
|
||||
|
||||
if (TypeUtils::isVec128(typeId))
|
||||
return emitter->ldr(dst.as<Vec>().q(), src);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dst_.isMem() && src_.isReg()) {
|
||||
Mem dst(dst_.as<Mem>());
|
||||
Reg src(src_.as<Reg>());
|
||||
|
||||
switch (typeId) {
|
||||
case TypeId::kInt8:
|
||||
case TypeId::kUInt8:
|
||||
return emitter->strb(src.as<Gp>(), dst);
|
||||
|
||||
case TypeId::kInt16:
|
||||
case TypeId::kUInt16:
|
||||
return emitter->strh(src.as<Gp>(), dst);
|
||||
|
||||
case TypeId::kInt32:
|
||||
case TypeId::kUInt32:
|
||||
return emitter->str(src.as<Gp>().w(), dst);
|
||||
|
||||
case TypeId::kInt64:
|
||||
case TypeId::kUInt64:
|
||||
return emitter->str(src.as<Gp>().x(), dst);
|
||||
|
||||
default: {
|
||||
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
|
||||
return emitter->str(src.as<Vec>().s(), dst);
|
||||
|
||||
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
|
||||
return emitter->str(src.as<Vec>().d(), dst);
|
||||
|
||||
if (TypeUtils::isVec128(typeId))
|
||||
return emitter->str(src.as<Vec>().q(), dst);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dst_.isReg() && src_.isReg()) {
|
||||
Reg dst(dst_.as<Reg>());
|
||||
Reg src(src_.as<Reg>());
|
||||
|
||||
switch (typeId) {
|
||||
case TypeId::kInt8:
|
||||
case TypeId::kUInt8:
|
||||
case TypeId::kInt16:
|
||||
case TypeId::kUInt16:
|
||||
case TypeId::kInt32:
|
||||
case TypeId::kUInt32:
|
||||
case TypeId::kInt64:
|
||||
case TypeId::kUInt64:
|
||||
return emitter->mov(src.as<Gp>().x(), dst.as<Gp>().x());
|
||||
|
||||
default: {
|
||||
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
|
||||
return emitter->fmov(dst.as<Vec>().s(), src.as<Vec>().s());
|
||||
|
||||
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
|
||||
return emitter->mov(dst.as<Vec>().b8(), src.as<Vec>().b8());
|
||||
|
||||
if (TypeUtils::isVec128(typeId))
|
||||
return emitter->mov(dst.as<Vec>().b16(), src.as<Vec>().b16());
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitter->setInlineComment(nullptr);
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
|
||||
Error EmitHelper::emitRegSwap(
|
||||
const BaseReg& a,
|
||||
const BaseReg& b, const char* comment) {
|
||||
|
||||
DebugUtils::unused(a, b, comment);
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
|
||||
// TODO: [ARM] EmitArgMove is unfinished.
|
||||
Error EmitHelper::emitArgMove(
|
||||
const BaseReg& dst_, TypeId dstTypeId,
|
||||
const Operand_& src_, TypeId srcTypeId, const char* comment) {
|
||||
|
||||
// Deduce optional `dstTypeId`, which may be `TypeId::kVoid` in some cases.
|
||||
if (dstTypeId == TypeId::kVoid) {
|
||||
const ArchTraits& archTraits = ArchTraits::byArch(_emitter->arch());
|
||||
dstTypeId = archTraits.regTypeToTypeId(dst_.type());
|
||||
}
|
||||
|
||||
// Invalid or abstract TypeIds are not allowed.
|
||||
ASMJIT_ASSERT(TypeUtils::isValid(dstTypeId) && !TypeUtils::isAbstract(dstTypeId));
|
||||
ASMJIT_ASSERT(TypeUtils::isValid(srcTypeId) && !TypeUtils::isAbstract(srcTypeId));
|
||||
|
||||
Reg dst(dst_.as<Reg>());
|
||||
Operand src(src_);
|
||||
|
||||
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
|
||||
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
|
||||
|
||||
if (TypeUtils::isInt(dstTypeId)) {
|
||||
if (TypeUtils::isInt(srcTypeId)) {
|
||||
uint32_t x = dstSize == 8;
|
||||
|
||||
dst.setSignature(OperandSignature{x ? uint32_t(GpX::kSignature) : uint32_t(GpW::kSignature)});
|
||||
_emitter->setInlineComment(comment);
|
||||
|
||||
if (src.isReg()) {
|
||||
src.setSignature(dst.signature());
|
||||
return _emitter->emit(Inst::kIdMov, dst, src);
|
||||
}
|
||||
else if (src.isMem()) {
|
||||
InstId instId = Inst::kIdNone;
|
||||
switch (srcTypeId) {
|
||||
case TypeId::kInt8: instId = Inst::kIdLdrsb; break;
|
||||
case TypeId::kUInt8: instId = Inst::kIdLdrb; break;
|
||||
case TypeId::kInt16: instId = Inst::kIdLdrsh; break;
|
||||
case TypeId::kUInt16: instId = Inst::kIdLdrh; break;
|
||||
case TypeId::kInt32: instId = x ? Inst::kIdLdrsw : Inst::kIdLdr; break;
|
||||
case TypeId::kUInt32: instId = Inst::kIdLdr; x = 0; break;
|
||||
case TypeId::kInt64: instId = Inst::kIdLdr; break;
|
||||
case TypeId::kUInt64: instId = Inst::kIdLdr; break;
|
||||
default:
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
return _emitter->emit(instId, dst, src);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (TypeUtils::isFloat(dstTypeId) || TypeUtils::isVec(dstTypeId)) {
|
||||
if (TypeUtils::isFloat(srcTypeId) || TypeUtils::isVec(srcTypeId)) {
|
||||
switch (srcSize) {
|
||||
case 2: dst.as<Vec>().setSignature(OperandSignature{VecH::kSignature}); break;
|
||||
case 4: dst.as<Vec>().setSignature(OperandSignature{VecS::kSignature}); break;
|
||||
case 8: dst.as<Vec>().setSignature(OperandSignature{VecD::kSignature}); break;
|
||||
case 16: dst.as<Vec>().setSignature(OperandSignature{VecV::kSignature}); break;
|
||||
default:
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
|
||||
_emitter->setInlineComment(comment);
|
||||
|
||||
if (src.isReg()) {
|
||||
InstId instId = srcSize <= 4 ? Inst::kIdFmov_v : Inst::kIdMov_v;
|
||||
src.setSignature(dst.signature());
|
||||
return _emitter->emit(instId, dst, src);
|
||||
}
|
||||
else if (src.isMem()) {
|
||||
return _emitter->emit(Inst::kIdLdr_v, dst, src);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
|
||||
// a64::EmitHelper - Emit Prolog & Epilog
|
||||
// ======================================
|
||||
|
||||
struct LoadStoreInstructions {
|
||||
InstId singleInstId;
|
||||
InstId pairInstId;
|
||||
};
|
||||
|
||||
struct PrologEpilogInfo {
|
||||
struct RegPair {
|
||||
uint8_t ids[2];
|
||||
uint16_t offset;
|
||||
};
|
||||
|
||||
struct GroupData {
|
||||
RegPair pairs[16];
|
||||
uint32_t pairCount;
|
||||
};
|
||||
|
||||
Support::Array<GroupData, 2> groups;
|
||||
uint32_t sizeTotal;
|
||||
|
||||
Error init(const FuncFrame& frame) noexcept {
|
||||
uint32_t offset = 0;
|
||||
|
||||
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
|
||||
GroupData& data = groups[group];
|
||||
|
||||
uint32_t n = 0;
|
||||
uint32_t pairCount = 0;
|
||||
RegPair* pairs = data.pairs;
|
||||
|
||||
uint32_t slotSize = frame.saveRestoreRegSize(group);
|
||||
uint32_t savedRegs = frame.savedRegs(group);
|
||||
|
||||
if (group == RegGroup::kGp && frame.hasPreservedFP()) {
|
||||
// Must be at the beginning of the push/pop sequence.
|
||||
ASMJIT_ASSERT(pairCount == 0);
|
||||
|
||||
pairs[0].offset = uint16_t(offset);
|
||||
pairs[0].ids[0] = Gp::kIdFp;
|
||||
pairs[0].ids[1] = Gp::kIdLr;
|
||||
offset += slotSize * 2;
|
||||
pairCount++;
|
||||
|
||||
savedRegs &= ~Support::bitMask(Gp::kIdFp, Gp::kIdLr);
|
||||
}
|
||||
|
||||
Support::BitWordIterator<uint32_t> it(savedRegs);
|
||||
while (it.hasNext()) {
|
||||
pairs[pairCount].ids[n] = uint8_t(it.next());
|
||||
|
||||
if (++n == 2) {
|
||||
pairs[pairCount].offset = uint16_t(offset);
|
||||
offset += slotSize * 2;
|
||||
|
||||
n = 0;
|
||||
pairCount++;
|
||||
}
|
||||
}
|
||||
|
||||
if (n == 1) {
|
||||
pairs[pairCount].ids[1] = uint8_t(BaseReg::kIdBad);
|
||||
pairs[pairCount].offset = uint16_t(offset);
|
||||
offset += slotSize * 2;
|
||||
pairCount++;
|
||||
}
|
||||
|
||||
data.pairCount = pairCount;
|
||||
}
|
||||
|
||||
sizeTotal = offset;
|
||||
return kErrorOk;
|
||||
}
|
||||
};
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) {
|
||||
Emitter* emitter = _emitter->as<Emitter>();
|
||||
|
||||
PrologEpilogInfo pei;
|
||||
ASMJIT_PROPAGATE(pei.init(frame));
|
||||
|
||||
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
|
||||
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
|
||||
{ Inst::kIdStr , Inst::kIdStp },
|
||||
{ Inst::kIdStr_v, Inst::kIdStp_v }
|
||||
}};
|
||||
|
||||
uint32_t adjustInitialOffset = pei.sizeTotal;
|
||||
|
||||
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
|
||||
const PrologEpilogInfo::GroupData& data = pei.groups[group];
|
||||
uint32_t pairCount = data.pairCount;
|
||||
|
||||
Reg regs[2] = { groupRegs[group], groupRegs[group] };
|
||||
Mem mem = ptr(sp);
|
||||
|
||||
const LoadStoreInstructions& insts = groupInsts[group];
|
||||
for (uint32_t i = 0; i < pairCount; i++) {
|
||||
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
|
||||
|
||||
regs[0].setId(pair.ids[0]);
|
||||
regs[1].setId(pair.ids[1]);
|
||||
mem.setOffsetLo32(pair.offset);
|
||||
|
||||
if (pair.offset == 0 && adjustInitialOffset) {
|
||||
mem.setOffset(-int(adjustInitialOffset));
|
||||
mem.makePreIndex();
|
||||
}
|
||||
|
||||
if (pair.ids[1] == BaseReg::kIdBad)
|
||||
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
|
||||
else
|
||||
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
|
||||
|
||||
mem.resetToFixedOffset();
|
||||
|
||||
if (i == 0 && frame.hasPreservedFP()) {
|
||||
ASMJIT_PROPAGATE(emitter->mov(x29, sp));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (frame.hasStackAdjustment()) {
|
||||
uint32_t adj = frame.stackAdjustment();
|
||||
if (adj <= 0xFFFu) {
|
||||
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj));
|
||||
}
|
||||
else if (adj <= 0xFFFFFFu) {
|
||||
// TODO: [ARM] Prolog - we must touch the pages otherwise it's undefined.
|
||||
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0x000FFFu));
|
||||
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0xFFF000u));
|
||||
}
|
||||
else {
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
// TODO: [ARM] Emit epilog.
|
||||
ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
|
||||
Emitter* emitter = _emitter->as<Emitter>();
|
||||
|
||||
PrologEpilogInfo pei;
|
||||
ASMJIT_PROPAGATE(pei.init(frame));
|
||||
|
||||
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
|
||||
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
|
||||
{ Inst::kIdLdr , Inst::kIdLdp },
|
||||
{ Inst::kIdLdr_v, Inst::kIdLdp_v }
|
||||
}};
|
||||
|
||||
uint32_t adjustInitialOffset = pei.sizeTotal;
|
||||
|
||||
if (frame.hasStackAdjustment()) {
|
||||
uint32_t adj = frame.stackAdjustment();
|
||||
if (adj <= 0xFFFu) {
|
||||
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj));
|
||||
}
|
||||
else if (adj <= 0xFFFFFFu) {
|
||||
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0x000FFFu));
|
||||
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0xFFF000u));
|
||||
}
|
||||
else {
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
}
|
||||
|
||||
for (int g = 1; g >= 0; g--) {
|
||||
RegGroup group = RegGroup(g);
|
||||
const PrologEpilogInfo::GroupData& data = pei.groups[group];
|
||||
uint32_t pairCount = data.pairCount;
|
||||
|
||||
Reg regs[2] = { groupRegs[group], groupRegs[group] };
|
||||
Mem mem = ptr(sp);
|
||||
|
||||
const LoadStoreInstructions& insts = groupInsts[group];
|
||||
|
||||
for (int i = int(pairCount) - 1; i >= 0; i--) {
|
||||
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
|
||||
|
||||
regs[0].setId(pair.ids[0]);
|
||||
regs[1].setId(pair.ids[1]);
|
||||
mem.setOffsetLo32(pair.offset);
|
||||
|
||||
if (pair.offset == 0 && adjustInitialOffset) {
|
||||
mem.setOffset(int(adjustInitialOffset));
|
||||
mem.makePostIndex();
|
||||
}
|
||||
|
||||
if (pair.ids[1] == BaseReg::kIdBad)
|
||||
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
|
||||
else
|
||||
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
|
||||
|
||||
mem.resetToFixedOffset();
|
||||
}
|
||||
}
|
||||
|
||||
ASMJIT_PROPAGATE(emitter->ret(x30));
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
static Error ASMJIT_CDECL Emitter_emitProlog(BaseEmitter* emitter, const FuncFrame& frame) {
|
||||
EmitHelper emitHelper(emitter);
|
||||
return emitHelper.emitProlog(frame);
|
||||
}
|
||||
|
||||
static Error ASMJIT_CDECL Emitter_emitEpilog(BaseEmitter* emitter, const FuncFrame& frame) {
|
||||
EmitHelper emitHelper(emitter);
|
||||
return emitHelper.emitEpilog(frame);
|
||||
}
|
||||
|
||||
static Error ASMJIT_CDECL Emitter_emitArgsAssignment(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args) {
|
||||
EmitHelper emitHelper(emitter);
|
||||
return emitHelper.emitArgsAssignment(frame, args);
|
||||
}
|
||||
|
||||
void assignEmitterFuncs(BaseEmitter* emitter) {
|
||||
emitter->_funcs.emitProlog = Emitter_emitProlog;
|
||||
emitter->_funcs.emitEpilog = Emitter_emitEpilog;
|
||||
emitter->_funcs.emitArgsAssignment = Emitter_emitArgsAssignment;
|
||||
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
emitter->_funcs.formatInstruction = FormatterInternal::formatInstruction;
|
||||
#endif
|
||||
|
||||
#ifndef ASMJIT_NO_VALIDATION
|
||||
emitter->_funcs.validate = InstInternal::validate;
|
||||
#endif
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64
|
||||
50
src/asmjit/arm/a64emithelper_p.h
Normal file
50
src/asmjit/arm/a64emithelper_p.h
Normal file
@@ -0,0 +1,50 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_ARMEMITHELPER_P_H_INCLUDED
|
||||
#define ASMJIT_ARM_ARMEMITHELPER_P_H_INCLUDED
|
||||
|
||||
#include "../core/api-config.h"
|
||||
|
||||
#include "../core/emithelper_p.h"
|
||||
#include "../core/func.h"
|
||||
#include "../arm/a64emitter.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
class EmitHelper : public BaseEmitHelper {
|
||||
public:
|
||||
inline explicit EmitHelper(BaseEmitter* emitter = nullptr) noexcept
|
||||
: BaseEmitHelper(emitter) {}
|
||||
|
||||
Error emitRegMove(
|
||||
const Operand_& dst_,
|
||||
const Operand_& src_, TypeId typeId, const char* comment = nullptr) override;
|
||||
|
||||
Error emitRegSwap(
|
||||
const BaseReg& a,
|
||||
const BaseReg& b, const char* comment = nullptr) override;
|
||||
|
||||
Error emitArgMove(
|
||||
const BaseReg& dst_, TypeId dstTypeId,
|
||||
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr) override;
|
||||
|
||||
Error emitProlog(const FuncFrame& frame);
|
||||
Error emitEpilog(const FuncFrame& frame);
|
||||
};
|
||||
|
||||
void assignEmitterFuncs(BaseEmitter* emitter);
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_ARMEMITHELPER_P_H_INCLUDED
|
||||
1228
src/asmjit/arm/a64emitter.h
Normal file
1228
src/asmjit/arm/a64emitter.h
Normal file
File diff suppressed because it is too large
Load Diff
298
src/asmjit/arm/a64formatter.cpp
Normal file
298
src/asmjit/arm/a64formatter.cpp
Normal file
@@ -0,0 +1,298 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
|
||||
#include "../core/misc_p.h"
|
||||
#include "../core/support.h"
|
||||
#include "../arm/a64formatter_p.h"
|
||||
#include "../arm/a64instapi_p.h"
|
||||
#include "../arm/a64instdb_p.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
#include "../core/compiler.h"
|
||||
#endif
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
// a64::FormatterInternal - Format Register
|
||||
// ========================================
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
Arch arch,
|
||||
RegType regType,
|
||||
uint32_t rId,
|
||||
uint32_t elementType,
|
||||
uint32_t elementIndex) noexcept {
|
||||
|
||||
DebugUtils::unused(flags);
|
||||
DebugUtils::unused(arch);
|
||||
|
||||
static const char bhsdq[] = "bhsdq";
|
||||
|
||||
bool virtRegFormatted = false;
|
||||
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
if (Operand::isVirtId(rId)) {
|
||||
if (emitter && emitter->isCompiler()) {
|
||||
const BaseCompiler* cc = static_cast<const BaseCompiler*>(emitter);
|
||||
if (cc->isVirtIdValid(rId)) {
|
||||
VirtReg* vReg = cc->virtRegById(rId);
|
||||
ASMJIT_ASSERT(vReg != nullptr);
|
||||
|
||||
const char* name = vReg->name();
|
||||
if (name && name[0] != '\0')
|
||||
ASMJIT_PROPAGATE(sb.append(name));
|
||||
else
|
||||
ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(rId))));
|
||||
|
||||
virtRegFormatted = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
DebugUtils::unused(emitter, flags);
|
||||
#endif
|
||||
|
||||
if (!virtRegFormatted) {
|
||||
char letter = '\0';
|
||||
switch (regType) {
|
||||
case RegType::kARM_GpW:
|
||||
if (rId == Gp::kIdZr)
|
||||
return sb.append("wzr");
|
||||
if (rId == Gp::kIdSp)
|
||||
return sb.append("wsp");
|
||||
|
||||
letter = 'w';
|
||||
break;
|
||||
|
||||
case RegType::kARM_GpX:
|
||||
if (rId == Gp::kIdZr)
|
||||
return sb.append("xzr");
|
||||
if (rId == Gp::kIdSp)
|
||||
return sb.append("sp");
|
||||
|
||||
letter = 'x';
|
||||
break;
|
||||
|
||||
case RegType::kARM_VecB:
|
||||
case RegType::kARM_VecH:
|
||||
case RegType::kARM_VecS:
|
||||
case RegType::kARM_VecD:
|
||||
case RegType::kARM_VecV:
|
||||
letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kARM_VecB)];
|
||||
if (elementType)
|
||||
letter = 'v';
|
||||
break;
|
||||
|
||||
default:
|
||||
ASMJIT_PROPAGATE(sb.appendFormat("<Reg-%u>?$u", uint32_t(regType), rId));
|
||||
break;
|
||||
}
|
||||
|
||||
if (letter)
|
||||
ASMJIT_PROPAGATE(sb.appendFormat("%c%u", letter, rId));
|
||||
}
|
||||
|
||||
if (elementType) {
|
||||
char elementLetter = '\0';
|
||||
uint32_t elementCount = 0;
|
||||
|
||||
switch (elementType) {
|
||||
case Vec::kElementTypeB:
|
||||
elementLetter = 'b';
|
||||
elementCount = 16;
|
||||
break;
|
||||
|
||||
case Vec::kElementTypeH:
|
||||
elementLetter = 'h';
|
||||
elementCount = 8;
|
||||
break;
|
||||
|
||||
case Vec::kElementTypeS:
|
||||
elementLetter = 's';
|
||||
elementCount = 4;
|
||||
break;
|
||||
|
||||
case Vec::kElementTypeD:
|
||||
elementLetter = 'd';
|
||||
elementCount = 2;
|
||||
break;
|
||||
|
||||
default:
|
||||
return sb.append(".<Unknown>");
|
||||
}
|
||||
|
||||
if (elementLetter) {
|
||||
if (elementIndex == 0xFFFFFFFFu) {
|
||||
if (regType == RegType::kARM_VecD)
|
||||
elementCount /= 2u;
|
||||
ASMJIT_PROPAGATE(sb.appendFormat(".%u%c", elementCount, elementLetter));
|
||||
}
|
||||
else {
|
||||
ASMJIT_PROPAGATE(sb.appendFormat(".%c[%u]", elementLetter, elementIndex));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
// a64::FormatterInternal - Format Operand
|
||||
// =======================================
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
Arch arch,
|
||||
const Operand_& op) noexcept {
|
||||
|
||||
if (op.isReg()) {
|
||||
const BaseReg& reg = op.as<BaseReg>();
|
||||
|
||||
uint32_t elementType = op.as<Vec>().elementType();
|
||||
uint32_t elementIndex = op.as<Vec>().elementIndex();
|
||||
|
||||
if (!op.as<Vec>().hasElementIndex())
|
||||
elementIndex = 0xFFFFFFFFu;
|
||||
|
||||
return formatRegister(sb, flags, emitter, arch, reg.type(), reg.id(), elementType, elementIndex);
|
||||
}
|
||||
|
||||
if (op.isMem()) {
|
||||
const Mem& m = op.as<Mem>();
|
||||
ASMJIT_PROPAGATE(sb.append('['));
|
||||
|
||||
if (m.hasBase()) {
|
||||
if (m.hasBaseLabel()) {
|
||||
ASMJIT_PROPAGATE(Formatter::formatLabel(sb, flags, emitter, m.baseId()));
|
||||
}
|
||||
else {
|
||||
FormatFlags modifiedFlags = flags;
|
||||
if (m.isRegHome()) {
|
||||
ASMJIT_PROPAGATE(sb.append('&'));
|
||||
modifiedFlags &= ~FormatFlags::kRegCasts;
|
||||
}
|
||||
ASMJIT_PROPAGATE(formatRegister(sb, modifiedFlags, emitter, arch, m.baseType(), m.baseId()));
|
||||
}
|
||||
}
|
||||
else {
|
||||
// ARM really requires base.
|
||||
if (m.hasIndex() || m.hasOffset()) {
|
||||
ASMJIT_PROPAGATE(sb.append("<None>"));
|
||||
}
|
||||
}
|
||||
|
||||
// The post index makes it look like there was another operand, but it's
|
||||
// still the part of AsmJit's `arm::Mem` operand so it's consistent with
|
||||
// other architectures.
|
||||
if (m.isPostIndex())
|
||||
ASMJIT_PROPAGATE(sb.append(']'));
|
||||
|
||||
if (m.hasIndex()) {
|
||||
ASMJIT_PROPAGATE(sb.append(", "));
|
||||
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, m.indexType(), m.indexId()));
|
||||
}
|
||||
|
||||
if (m.hasOffset()) {
|
||||
ASMJIT_PROPAGATE(sb.append(", "));
|
||||
|
||||
int64_t off = int64_t(m.offset());
|
||||
uint32_t base = 10;
|
||||
|
||||
if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9)
|
||||
base = 16;
|
||||
|
||||
if (base == 10) {
|
||||
ASMJIT_PROPAGATE(sb.appendInt(off, base));
|
||||
}
|
||||
else {
|
||||
ASMJIT_PROPAGATE(sb.append("0x"));
|
||||
ASMJIT_PROPAGATE(sb.appendUInt(uint64_t(off), base));
|
||||
}
|
||||
}
|
||||
|
||||
if (m.hasShift()) {
|
||||
ASMJIT_PROPAGATE(sb.append(' '));
|
||||
if (!m.isPreOrPost())
|
||||
ASMJIT_PROPAGATE(formatShiftOp(sb, (ShiftOp)m.predicate()));
|
||||
ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift()));
|
||||
}
|
||||
|
||||
if (!m.isPostIndex())
|
||||
ASMJIT_PROPAGATE(sb.append(']'));
|
||||
|
||||
if (m.isPreIndex())
|
||||
ASMJIT_PROPAGATE(sb.append('!'));
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
if (op.isImm()) {
|
||||
const Imm& i = op.as<Imm>();
|
||||
int64_t val = i.value();
|
||||
|
||||
if (Support::test(flags, FormatFlags::kHexImms) && uint64_t(val) > 9) {
|
||||
ASMJIT_PROPAGATE(sb.append("0x"));
|
||||
return sb.appendUInt(uint64_t(val), 16);
|
||||
}
|
||||
else {
|
||||
return sb.appendInt(val, 10);
|
||||
}
|
||||
}
|
||||
|
||||
if (op.isLabel()) {
|
||||
return Formatter::formatLabel(sb, flags, emitter, op.id());
|
||||
}
|
||||
|
||||
return sb.append("<None>");
|
||||
}
|
||||
|
||||
// a64::FormatterInternal - Format Instruction
|
||||
// ===========================================
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
Arch arch,
|
||||
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
|
||||
|
||||
DebugUtils::unused(arch);
|
||||
|
||||
// Format instruction options and instruction mnemonic.
|
||||
InstId instId = inst.realId();
|
||||
if (instId < Inst::_kIdCount)
|
||||
ASMJIT_PROPAGATE(InstInternal::instIdToString(arch, instId, sb));
|
||||
else
|
||||
ASMJIT_PROPAGATE(sb.appendFormat("[InstId=#%u]", unsigned(instId)));
|
||||
|
||||
CondCode cc = inst.armCondCode();
|
||||
if (cc != CondCode::kAL) {
|
||||
ASMJIT_PROPAGATE(sb.append('.'));
|
||||
ASMJIT_PROPAGATE(formatCondCode(sb, cc));
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < opCount; i++) {
|
||||
const Operand_& op = operands[i];
|
||||
if (op.isNone())
|
||||
break;
|
||||
|
||||
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
|
||||
ASMJIT_PROPAGATE(formatOperand(sb, flags, emitter, arch, op));
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_LOGGING
|
||||
59
src/asmjit/arm/a64formatter_p.h
Normal file
59
src/asmjit/arm/a64formatter_p.h
Normal file
@@ -0,0 +1,59 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
|
||||
|
||||
#include "../core/api-config.h"
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
|
||||
#include "../core/formatter.h"
|
||||
#include "../core/string.h"
|
||||
#include "../arm/armformatter_p.h"
|
||||
#include "../arm/a64globals.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
namespace FormatterInternal {
|
||||
|
||||
using namespace arm::FormatterInternal;
|
||||
|
||||
Error ASMJIT_CDECL formatRegister(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
Arch arch,
|
||||
RegType regType,
|
||||
uint32_t regId,
|
||||
uint32_t elementType = 0,
|
||||
uint32_t elementIndex = 0xFFFFFFFFu) noexcept;
|
||||
|
||||
Error ASMJIT_CDECL formatOperand(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
Arch arch,
|
||||
const Operand_& op) noexcept;
|
||||
|
||||
Error ASMJIT_CDECL formatInstruction(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
Arch arch,
|
||||
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
|
||||
|
||||
} // {FormatterInternal}
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_LOGGING
|
||||
#endif // ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
|
||||
189
src/asmjit/arm/a64func.cpp
Normal file
189
src/asmjit/arm/a64func.cpp
Normal file
@@ -0,0 +1,189 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
|
||||
#include "../arm/a64func_p.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
namespace FuncInternal {
|
||||
|
||||
static inline bool shouldThreatAsCDecl(CallConvId ccId) noexcept {
|
||||
return ccId == CallConvId::kCDecl ||
|
||||
ccId == CallConvId::kStdCall ||
|
||||
ccId == CallConvId::kFastCall ||
|
||||
ccId == CallConvId::kVectorCall ||
|
||||
ccId == CallConvId::kThisCall ||
|
||||
ccId == CallConvId::kRegParm1 ||
|
||||
ccId == CallConvId::kRegParm2 ||
|
||||
ccId == CallConvId::kRegParm3;
|
||||
}
|
||||
|
||||
static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept {
|
||||
if (typeId == TypeId::kFloat32)
|
||||
return RegType::kARM_VecS;
|
||||
else if (typeId == TypeId::kFloat64)
|
||||
return RegType::kARM_VecD;
|
||||
else if (TypeUtils::isVec32(typeId))
|
||||
return RegType::kARM_VecS;
|
||||
else if (TypeUtils::isVec64(typeId))
|
||||
return RegType::kARM_VecD;
|
||||
else if (TypeUtils::isVec128(typeId))
|
||||
return RegType::kARM_VecV;
|
||||
else
|
||||
return RegType::kNone;
|
||||
}
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept {
|
||||
cc.setArch(environment.arch());
|
||||
|
||||
cc.setSaveRestoreRegSize(RegGroup::kGp, 8);
|
||||
cc.setSaveRestoreRegSize(RegGroup::kVec, 8);
|
||||
cc.setSaveRestoreAlignment(RegGroup::kGp, 16);
|
||||
cc.setSaveRestoreAlignment(RegGroup::kVec, 16);
|
||||
cc.setSaveRestoreAlignment(RegGroup::kExtraVirt2, 1);
|
||||
cc.setSaveRestoreAlignment(RegGroup::kExtraVirt3, 1);
|
||||
cc.setPassedOrder(RegGroup::kGp, 0, 1, 2, 3, 4, 5, 6, 7);
|
||||
cc.setPassedOrder(RegGroup::kVec, 0, 1, 2, 3, 4, 5, 6, 7);
|
||||
cc.setNaturalStackAlignment(16);
|
||||
|
||||
if (shouldThreatAsCDecl(ccId)) {
|
||||
// ARM doesn't have that many calling conventions as we can find in X86 world, treat most conventions as __cdecl.
|
||||
cc.setId(CallConvId::kCDecl);
|
||||
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(Gp::kIdOs, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
|
||||
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(8, 9, 10, 11, 12, 13, 14, 15));
|
||||
}
|
||||
else {
|
||||
cc.setId(ccId);
|
||||
cc.setSaveRestoreRegSize(RegGroup::kVec, 16);
|
||||
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
|
||||
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& signature, uint32_t registerSize) noexcept {
|
||||
DebugUtils::unused(signature);
|
||||
|
||||
const CallConv& cc = func.callConv();
|
||||
uint32_t stackOffset = 0;
|
||||
|
||||
uint32_t i;
|
||||
uint32_t argCount = func.argCount();
|
||||
|
||||
if (func.hasRet()) {
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
TypeId typeId = func._rets[valueIndex].typeId();
|
||||
|
||||
// Terminate at the first void type (end of the pack).
|
||||
if (typeId == TypeId::kVoid)
|
||||
break;
|
||||
|
||||
switch (typeId) {
|
||||
case TypeId::kInt8:
|
||||
case TypeId::kInt16:
|
||||
case TypeId::kInt32: {
|
||||
func._rets[valueIndex].initReg(RegType::kARM_GpW, valueIndex, TypeId::kInt32);
|
||||
break;
|
||||
}
|
||||
|
||||
case TypeId::kUInt8:
|
||||
case TypeId::kUInt16:
|
||||
case TypeId::kUInt32: {
|
||||
func._rets[valueIndex].initReg(RegType::kARM_GpW, valueIndex, TypeId::kUInt32);
|
||||
break;
|
||||
}
|
||||
|
||||
case TypeId::kInt64:
|
||||
case TypeId::kUInt64: {
|
||||
func._rets[valueIndex].initReg(RegType::kARM_GpX, valueIndex, typeId);
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
RegType regType = regTypeFromFpOrVecTypeId(typeId);
|
||||
if (regType == RegType::kNone)
|
||||
return DebugUtils::errored(kErrorInvalidRegType);
|
||||
|
||||
func._rets[valueIndex].initReg(regType, valueIndex, typeId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (cc.strategy()) {
|
||||
case CallConvStrategy::kDefault: {
|
||||
uint32_t gpzPos = 0;
|
||||
uint32_t vecPos = 0;
|
||||
|
||||
for (i = 0; i < argCount; i++) {
|
||||
FuncValue& arg = func._args[i][0];
|
||||
TypeId typeId = arg.typeId();
|
||||
|
||||
if (TypeUtils::isInt(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
|
||||
if (gpzPos < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[RegGroup::kGp].id[gpzPos];
|
||||
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
RegType regType = typeId <= TypeId::kUInt32 ? RegType::kARM_GpW : RegType::kARM_GpX;
|
||||
arg.assignRegData(regType, regId);
|
||||
func.addUsedRegs(RegGroup::kGp, Support::bitMask(regId));
|
||||
gpzPos++;
|
||||
}
|
||||
else {
|
||||
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), registerSize);
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
stackOffset += size;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
|
||||
if (vecPos < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[RegGroup::kVec].id[vecPos];
|
||||
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
RegType regType = regTypeFromFpOrVecTypeId(typeId);
|
||||
if (regType == RegType::kNone)
|
||||
return DebugUtils::errored(kErrorInvalidRegType);
|
||||
|
||||
arg.initTypeId(typeId);
|
||||
arg.assignRegData(regType, regId);
|
||||
func.addUsedRegs(RegGroup::kVec, Support::bitMask(regId));
|
||||
vecPos++;
|
||||
}
|
||||
else {
|
||||
uint32_t size = TypeUtils::sizeOf(typeId);
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
stackOffset += size;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
|
||||
func._argStackSize = stackOffset;
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
} // {FuncInternal}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64
|
||||
33
src/asmjit/arm/a64func_p.h
Normal file
33
src/asmjit/arm/a64func_p.h
Normal file
@@ -0,0 +1,33 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64FUNC_P_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64FUNC_P_H_INCLUDED
|
||||
|
||||
#include "../core/func.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
//! AArch64-specific function API (calling conventions and other utilities).
|
||||
namespace FuncInternal {
|
||||
|
||||
//! Initialize `CallConv` structure (AArch64 specific).
|
||||
Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept;
|
||||
|
||||
//! Initialize `FuncDetail` (AArch64 specific).
|
||||
Error initFuncDetail(FuncDetail& func, const FuncSignature& signature, uint32_t registerSize) noexcept;
|
||||
|
||||
} // {FuncInternal}
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_A64FUNC_P_H_INCLUDED
|
||||
1894
src/asmjit/arm/a64globals.h
Normal file
1894
src/asmjit/arm/a64globals.h
Normal file
File diff suppressed because it is too large
Load Diff
278
src/asmjit/arm/a64instapi.cpp
Normal file
278
src/asmjit/arm/a64instapi.cpp
Normal file
@@ -0,0 +1,278 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
|
||||
#include "../core/cpuinfo.h"
|
||||
#include "../core/misc_p.h"
|
||||
#include "../core/support.h"
|
||||
#include "../arm/a64instapi_p.h"
|
||||
#include "../arm/a64instdb_p.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
// a64::InstInternal - Text
|
||||
// ========================
|
||||
|
||||
#ifndef ASMJIT_NO_TEXT
|
||||
Error InstInternal::instIdToString(Arch arch, InstId instId, String& output) noexcept {
|
||||
uint32_t realId = instId & uint32_t(InstIdParts::kRealId);
|
||||
DebugUtils::unused(arch);
|
||||
|
||||
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId)))
|
||||
return DebugUtils::errored(kErrorInvalidInstruction);
|
||||
|
||||
const InstDB::InstInfo& info = InstDB::infoById(realId);
|
||||
return output.append(InstDB::_nameData + info._nameDataIndex);
|
||||
}
|
||||
|
||||
InstId InstInternal::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
|
||||
DebugUtils::unused(arch);
|
||||
|
||||
if (ASMJIT_UNLIKELY(!s))
|
||||
return Inst::kIdNone;
|
||||
|
||||
if (len == SIZE_MAX)
|
||||
len = strlen(s);
|
||||
|
||||
if (ASMJIT_UNLIKELY(len == 0 || len > InstDB::kMaxNameSize))
|
||||
return Inst::kIdNone;
|
||||
|
||||
uint32_t prefix = uint32_t(s[0]) - 'a';
|
||||
if (ASMJIT_UNLIKELY(prefix > 'z' - 'a'))
|
||||
return Inst::kIdNone;
|
||||
|
||||
uint32_t index = InstDB::instNameIndex[prefix].start;
|
||||
if (ASMJIT_UNLIKELY(!index))
|
||||
return Inst::kIdNone;
|
||||
|
||||
const char* nameData = InstDB::_nameData;
|
||||
const InstDB::InstInfo* table = InstDB::_instInfoTable;
|
||||
|
||||
const InstDB::InstInfo* base = table + index;
|
||||
const InstDB::InstInfo* end = table + InstDB::instNameIndex[prefix].end;
|
||||
|
||||
for (size_t lim = (size_t)(end - base); lim != 0; lim >>= 1) {
|
||||
const InstDB::InstInfo* cur = base + (lim >> 1);
|
||||
int result = Support::cmpInstName(nameData + cur[0]._nameDataIndex, s, len);
|
||||
|
||||
if (result < 0) {
|
||||
base = cur + 1;
|
||||
lim--;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (result > 0)
|
||||
continue;
|
||||
|
||||
return uint32_t((size_t)(cur - table));
|
||||
}
|
||||
|
||||
return Inst::kIdNone;
|
||||
}
|
||||
#endif // !ASMJIT_NO_TEXT
|
||||
|
||||
// a64::InstInternal - Validate
|
||||
// ============================
|
||||
|
||||
#ifndef ASMJIT_NO_VALIDATION
|
||||
ASMJIT_FAVOR_SIZE Error InstInternal::validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
|
||||
// TODO:
|
||||
DebugUtils::unused(arch, inst, operands, opCount, validationFlags);
|
||||
return kErrorOk;
|
||||
}
|
||||
#endif // !ASMJIT_NO_VALIDATION
|
||||
|
||||
// a64::InstInternal - QueryRWInfo
|
||||
// ===============================
|
||||
|
||||
#ifndef ASMJIT_NO_INTROSPECTION
|
||||
struct InstRWInfoData {
|
||||
uint8_t rwx[Globals::kMaxOpCount];
|
||||
};
|
||||
|
||||
static const InstRWInfoData instRWInfoData[] = {
|
||||
#define R uint8_t(OpRWFlags::kRead)
|
||||
#define W uint8_t(OpRWFlags::kWrite)
|
||||
#define X uint8_t(OpRWFlags::kRW)
|
||||
|
||||
{{ R, R, R, R, R, R }}, // kRWI_R
|
||||
{{ R, W, R, R, R, R }}, // kRWI_RW
|
||||
{{ R, X, R, R, R, R }}, // kRWI_RX
|
||||
{{ R, R, W, R, R, R }}, // kRWI_RRW
|
||||
{{ R, W, X, R, R, R }}, // kRWI_RWX
|
||||
{{ W, R, R, R, R, R }}, // kRWI_W
|
||||
{{ W, R, W, R, R, R }}, // kRWI_WRW
|
||||
{{ W, R, X, R, R, R }}, // kRWI_WRX
|
||||
{{ W, R, R, W, R, R }}, // kRWI_WRRW
|
||||
{{ W, R, R, X, R, R }}, // kRWI_WRRX
|
||||
{{ W, W, R, R, R, R }}, // kRWI_WW
|
||||
{{ X, R, R, R, R, R }}, // kRWI_X
|
||||
{{ X, R, X, R, R, R }}, // kRWI_XRX
|
||||
{{ X, X, R, R, X, R }}, // kRWI_XXRRX
|
||||
|
||||
{{ W, R, R, R, R, R }}, // kRWI_LDn
|
||||
{{ R, W, R, R, R, R }}, // kRWI_STn
|
||||
{{ R, R, R, R, R, R }} // kRWI_TODO
|
||||
|
||||
#undef R
|
||||
#undef W
|
||||
#undef X
|
||||
};
|
||||
|
||||
static const uint8_t elementTypeSize[8] = { 0, 1, 2, 4, 8, 4, 4, 0 };
|
||||
|
||||
Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
|
||||
// Unused in Release configuration as the assert is not compiled in.
|
||||
DebugUtils::unused(arch);
|
||||
|
||||
// Only called when `arch` matches X86 family.
|
||||
ASMJIT_ASSERT(Environment::isFamilyARM(arch));
|
||||
|
||||
// Get the instruction data.
|
||||
uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId);
|
||||
|
||||
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId)))
|
||||
return DebugUtils::errored(kErrorInvalidInstruction);
|
||||
|
||||
out->_instFlags = 0;
|
||||
out->_opCount = uint8_t(opCount);
|
||||
out->_rmFeature = 0;
|
||||
out->_extraReg.reset();
|
||||
out->_readFlags = CpuRWFlags::kNone; // TODO: [ARM] Read PSTATUS.
|
||||
out->_writeFlags = CpuRWFlags::kNone; // TODO: [ARM] Write PSTATUS
|
||||
|
||||
const InstDB::InstInfo& instInfo = InstDB::_instInfoTable[realId];
|
||||
const InstRWInfoData& rwInfo = instRWInfoData[instInfo.rwInfoIndex()];
|
||||
|
||||
if (instInfo.hasFlag(InstDB::kInstFlagConsecutive) && opCount > 2) {
|
||||
for (uint32_t i = 0; i < opCount; i++) {
|
||||
OpRWInfo& op = out->_operands[i];
|
||||
const Operand_& srcOp = operands[i];
|
||||
|
||||
if (!srcOp.isRegOrMem()) {
|
||||
op.reset();
|
||||
continue;
|
||||
}
|
||||
|
||||
OpRWFlags rwFlags = i < opCount - 1 ? (OpRWFlags)rwInfo.rwx[0] : (OpRWFlags)rwInfo.rwx[1];
|
||||
|
||||
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
|
||||
op._physId = BaseReg::kIdBad;
|
||||
op._rmSize = 0;
|
||||
op._resetReserved();
|
||||
|
||||
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
|
||||
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
|
||||
|
||||
op._readByteMask = rByteMask;
|
||||
op._writeByteMask = wByteMask;
|
||||
op._extendByteMask = 0;
|
||||
op._consecutiveLeadCount = 0;
|
||||
|
||||
if (srcOp.isReg()) {
|
||||
if (i == 0)
|
||||
op._consecutiveLeadCount = uint8_t(opCount - 1);
|
||||
else
|
||||
op.addOpFlags(OpRWFlags::kConsecutive);
|
||||
}
|
||||
else {
|
||||
const Mem& memOp = srcOp.as<Mem>();
|
||||
|
||||
if (memOp.hasBase()) {
|
||||
op.addOpFlags(OpRWFlags::kMemBaseRead);
|
||||
}
|
||||
|
||||
if (memOp.hasIndex()) {
|
||||
op.addOpFlags(OpRWFlags::kMemIndexRead);
|
||||
op.addOpFlags(memOp.isPreOrPost() ? OpRWFlags::kMemIndexWrite : OpRWFlags::kNone);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (uint32_t i = 0; i < opCount; i++) {
|
||||
OpRWInfo& op = out->_operands[i];
|
||||
const Operand_& srcOp = operands[i];
|
||||
|
||||
if (!srcOp.isRegOrMem()) {
|
||||
op.reset();
|
||||
continue;
|
||||
}
|
||||
|
||||
OpRWFlags rwFlags = (OpRWFlags)rwInfo.rwx[i];
|
||||
|
||||
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
|
||||
op._physId = BaseReg::kIdBad;
|
||||
op._rmSize = 0;
|
||||
op._resetReserved();
|
||||
|
||||
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
|
||||
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
|
||||
|
||||
op._readByteMask = rByteMask;
|
||||
op._writeByteMask = wByteMask;
|
||||
op._extendByteMask = 0;
|
||||
op._consecutiveLeadCount = 0;
|
||||
|
||||
if (srcOp.isReg()) {
|
||||
if (srcOp.as<Vec>().hasElementIndex()) {
|
||||
// Only part of the vector is accessed if element index [] is used.
|
||||
uint32_t elementType = srcOp.as<Vec>().elementType();
|
||||
uint32_t elementIndex = srcOp.as<Vec>().elementIndex();
|
||||
|
||||
uint32_t elementSize = elementTypeSize[elementType];
|
||||
uint64_t accessMask = uint64_t(Support::lsbMask<uint32_t>(elementSize)) << (elementIndex * elementSize);
|
||||
|
||||
op._readByteMask &= accessMask;
|
||||
op._writeByteMask &= accessMask;
|
||||
}
|
||||
|
||||
// TODO: [ARM] RW info is not finished.
|
||||
}
|
||||
else {
|
||||
const Mem& memOp = srcOp.as<Mem>();
|
||||
|
||||
if (memOp.hasBase()) {
|
||||
op.addOpFlags(OpRWFlags::kMemBaseRead);
|
||||
}
|
||||
|
||||
if (memOp.hasIndex()) {
|
||||
op.addOpFlags(OpRWFlags::kMemIndexRead);
|
||||
op.addOpFlags(memOp.isPreOrPost() ? OpRWFlags::kMemIndexWrite : OpRWFlags::kNone);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
#endif // !ASMJIT_NO_INTROSPECTION
|
||||
|
||||
// a64::InstInternal - QueryFeatures
|
||||
// =================================
|
||||
|
||||
#ifndef ASMJIT_NO_INTROSPECTION
|
||||
Error InstInternal::queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
|
||||
// TODO: [ARM] QueryFeatures not implemented yet.
|
||||
DebugUtils::unused(arch, inst, operands, opCount, out);
|
||||
return kErrorOk;
|
||||
}
|
||||
#endif // !ASMJIT_NO_INTROSPECTION
|
||||
|
||||
// a64::InstInternal - Unit
|
||||
// ========================
|
||||
|
||||
#if defined(ASMJIT_TEST)
|
||||
UNIT(arm_inst_api_text) {
|
||||
// TODO:
|
||||
}
|
||||
#endif
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64
|
||||
41
src/asmjit/arm/a64instapi_p.h
Normal file
41
src/asmjit/arm/a64instapi_p.h
Normal file
@@ -0,0 +1,41 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
|
||||
|
||||
#include "../core/inst.h"
|
||||
#include "../core/operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
namespace InstInternal {
|
||||
|
||||
#ifndef ASMJIT_NO_TEXT
|
||||
Error ASMJIT_CDECL instIdToString(Arch arch, InstId instId, String& output) noexcept;
|
||||
InstId ASMJIT_CDECL stringToInstId(Arch arch, const char* s, size_t len) noexcept;
|
||||
#endif // !ASMJIT_NO_TEXT
|
||||
|
||||
#ifndef ASMJIT_NO_VALIDATION
|
||||
Error ASMJIT_CDECL validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
|
||||
#endif // !ASMJIT_NO_VALIDATION
|
||||
|
||||
#ifndef ASMJIT_NO_INTROSPECTION
|
||||
Error ASMJIT_CDECL queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
|
||||
Error ASMJIT_CDECL queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
|
||||
#endif // !ASMJIT_NO_INTROSPECTION
|
||||
|
||||
} // {InstInternal}
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
|
||||
1957
src/asmjit/arm/a64instdb.cpp
Normal file
1957
src/asmjit/arm/a64instdb.cpp
Normal file
File diff suppressed because it is too large
Load Diff
74
src/asmjit/arm/a64instdb.h
Normal file
74
src/asmjit/arm/a64instdb.h
Normal file
@@ -0,0 +1,74 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64INSTDB_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64INSTDB_H_INCLUDED
|
||||
|
||||
#include "../arm/a64globals.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
//! Instruction database (AArch64).
|
||||
namespace InstDB {
|
||||
|
||||
//! Instruction flags.
|
||||
enum InstFlags : uint32_t {
|
||||
//! The instruction provides conditional execution.
|
||||
kInstFlagCond = 0x00000001u,
|
||||
//! SIMD instruction that processes elements in pairs.
|
||||
kInstFlagPair = 0x00000002u,
|
||||
//! SIMD instruction that does widening (Long).
|
||||
kInstFlagLong = 0x00000004u,
|
||||
//! SIMD instruction that does narrowing (Narrow).
|
||||
kInstFlagNarrow = 0x00000008u,
|
||||
//! SIMD element access of half-words can only be used with v0..15.
|
||||
kInstFlagVH0_15 = 0x00000010u,
|
||||
|
||||
//! Instruction may consecutive registers if the number of operands is greater than 2.
|
||||
kInstFlagConsecutive = 0x00000080u
|
||||
};
|
||||
|
||||
//! Instruction information (AArch64).
|
||||
struct InstInfo {
|
||||
//! Instruction encoding type.
|
||||
uint32_t _encoding : 8;
|
||||
//! Index to data specific to each encoding type.
|
||||
uint32_t _encodingDataIndex : 8;
|
||||
uint32_t _reserved : 2;
|
||||
//! Index to \ref _nameData.
|
||||
uint32_t _nameDataIndex : 14;
|
||||
|
||||
uint16_t _rwInfoIndex;
|
||||
uint16_t _flags;
|
||||
|
||||
//! \name Accessors
|
||||
//! \{
|
||||
|
||||
inline uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; }
|
||||
inline uint32_t flags() const noexcept { return _flags; }
|
||||
|
||||
inline bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; }
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
ASMJIT_VARAPI const InstInfo _instInfoTable[];
|
||||
|
||||
static inline const InstInfo& infoById(InstId instId) noexcept {
|
||||
instId &= uint32_t(InstIdParts::kRealId);
|
||||
ASMJIT_ASSERT(Inst::isDefinedId(instId));
|
||||
return _instInfoTable[instId];
|
||||
}
|
||||
|
||||
} // {InstDB}
|
||||
|
||||
//! \}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_A64INSTDB_H_INCLUDED
|
||||
876
src/asmjit/arm/a64instdb_p.h
Normal file
876
src/asmjit/arm/a64instdb_p.h
Normal file
@@ -0,0 +1,876 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64INSTDB_H_P_INCLUDED
|
||||
#define ASMJIT_ARM_A64INSTDB_H_P_INCLUDED
|
||||
|
||||
#include "../core/codeholder.h"
|
||||
#include "../arm/a64instdb.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
namespace InstDB {
|
||||
|
||||
// a64::InstDB - Constants Used by Instructions
|
||||
// ============================================
|
||||
|
||||
// GP register types supported by base instructions.
|
||||
static constexpr uint32_t kW = 0x1;
|
||||
static constexpr uint32_t kX = 0x2;
|
||||
static constexpr uint32_t kWX = 0x3;
|
||||
|
||||
// GP high register IDs supported by the instruction.
|
||||
static constexpr uint32_t kZR = Gp::kIdZr;
|
||||
static constexpr uint32_t kSP = Gp::kIdSp;
|
||||
|
||||
// a64::InstDB - RWInfo
|
||||
// ====================
|
||||
|
||||
enum RWInfoType : uint32_t {
|
||||
kRWI_R,
|
||||
kRWI_RW,
|
||||
kRWI_RX,
|
||||
kRWI_RRW,
|
||||
kRWI_RWX,
|
||||
kRWI_W,
|
||||
kRWI_WRW,
|
||||
kRWI_WRX,
|
||||
kRWI_WRRW,
|
||||
kRWI_WRRX,
|
||||
kRWI_WW,
|
||||
kRWI_X,
|
||||
kRWI_XRX,
|
||||
kRWI_XXRRX,
|
||||
|
||||
kRWI_LDn,
|
||||
kRWI_STn,
|
||||
|
||||
kRWI_SpecialStart = kRWI_LDn
|
||||
};
|
||||
|
||||
// a64::InstDB - ElementType
|
||||
// =========================
|
||||
|
||||
enum ElementType : uint8_t {
|
||||
kET_None = Vec::kElementTypeNone,
|
||||
kET_B = Vec::kElementTypeB,
|
||||
kET_H = Vec::kElementTypeH,
|
||||
kET_S = Vec::kElementTypeS,
|
||||
kET_D = Vec::kElementTypeD,
|
||||
kET_2H = Vec::kElementTypeH2,
|
||||
kET_4B = Vec::kElementTypeB4
|
||||
};
|
||||
|
||||
// a64::InstDB - GpType
|
||||
// ====================
|
||||
|
||||
enum GpType : uint8_t {
|
||||
kGp_W,
|
||||
kGp_X,
|
||||
kGp_X_SP
|
||||
};
|
||||
|
||||
// a64::InstDB - OPSig
|
||||
// ===================
|
||||
|
||||
enum kOpSignature : uint32_t {
|
||||
kOp_GpW = GpW::kSignature,
|
||||
kOp_GpX = GpX::kSignature,
|
||||
|
||||
kOp_B = VecB::kSignature,
|
||||
kOp_H = VecH::kSignature,
|
||||
kOp_S = VecS::kSignature,
|
||||
kOp_D = VecD::kSignature,
|
||||
kOp_Q = VecV::kSignature,
|
||||
|
||||
kOp_V8B = VecD::kSignature | Vec::kSignatureElementB,
|
||||
kOp_V4H = VecD::kSignature | Vec::kSignatureElementH,
|
||||
kOp_V2S = VecD::kSignature | Vec::kSignatureElementS,
|
||||
|
||||
kOp_V16B = VecV::kSignature | Vec::kSignatureElementB,
|
||||
kOp_V8H = VecV::kSignature | Vec::kSignatureElementH,
|
||||
kOp_V4S = VecV::kSignature | Vec::kSignatureElementS,
|
||||
kOp_V2D = VecV::kSignature | Vec::kSignatureElementD
|
||||
};
|
||||
|
||||
// a64::InstDB - HFConv
|
||||
// ====================
|
||||
|
||||
enum kHFConv : uint32_t {
|
||||
//! FP16 version of the instruction is not available.
|
||||
kHF_N,
|
||||
|
||||
//! Doesn't do any change to the opcode.
|
||||
kHF_0,
|
||||
|
||||
kHF_A,
|
||||
kHF_B,
|
||||
kHF_C,
|
||||
kHF_D,
|
||||
|
||||
kHF_Count
|
||||
};
|
||||
|
||||
// a64::InstDB - VOType
|
||||
// ====================
|
||||
|
||||
//! Vector operand type combinations used by FP&SIMD instructions.
|
||||
enum VOType : uint32_t {
|
||||
kVO_V_B,
|
||||
kVO_V_BH,
|
||||
kVO_V_BH_4S,
|
||||
kVO_V_BHS,
|
||||
kVO_V_BHS_D2,
|
||||
kVO_V_HS,
|
||||
kVO_V_S,
|
||||
|
||||
kVO_V_B8H4,
|
||||
kVO_V_B8H4S2,
|
||||
kVO_V_B8D1,
|
||||
kVO_V_H4S2,
|
||||
|
||||
kVO_V_B16,
|
||||
kVO_V_B16H8,
|
||||
kVO_V_B16H8S4,
|
||||
kVO_V_B16D2,
|
||||
kVO_V_H8S4,
|
||||
kVO_V_S4,
|
||||
kVO_V_D2,
|
||||
|
||||
kVO_SV_BHS,
|
||||
kVO_SV_B8H4S2,
|
||||
kVO_SV_HS,
|
||||
kVO_V_Any,
|
||||
kVO_SV_Any,
|
||||
|
||||
kVO_Count
|
||||
};
|
||||
|
||||
// a64::InstDB - EncodingId
|
||||
// ========================
|
||||
|
||||
// ${EncodingId:Begin}
|
||||
// ------------------- Automatically generated, do not edit -------------------
|
||||
enum EncodingId : uint32_t {
|
||||
kEncodingNone = 0,
|
||||
kEncodingBaseAddSub,
|
||||
kEncodingBaseAdr,
|
||||
kEncodingBaseAtDcIcTlbi,
|
||||
kEncodingBaseAtomicCasp,
|
||||
kEncodingBaseAtomicOp,
|
||||
kEncodingBaseAtomicSt,
|
||||
kEncodingBaseBfc,
|
||||
kEncodingBaseBfi,
|
||||
kEncodingBaseBfm,
|
||||
kEncodingBaseBfx,
|
||||
kEncodingBaseBranchCmp,
|
||||
kEncodingBaseBranchReg,
|
||||
kEncodingBaseBranchRel,
|
||||
kEncodingBaseBranchTst,
|
||||
kEncodingBaseCCmp,
|
||||
kEncodingBaseCInc,
|
||||
kEncodingBaseCSel,
|
||||
kEncodingBaseCSet,
|
||||
kEncodingBaseCmpCmn,
|
||||
kEncodingBaseExtend,
|
||||
kEncodingBaseExtract,
|
||||
kEncodingBaseLdSt,
|
||||
kEncodingBaseLdpStp,
|
||||
kEncodingBaseLdxp,
|
||||
kEncodingBaseLogical,
|
||||
kEncodingBaseMov,
|
||||
kEncodingBaseMovKNZ,
|
||||
kEncodingBaseMrs,
|
||||
kEncodingBaseMsr,
|
||||
kEncodingBaseMvnNeg,
|
||||
kEncodingBaseOp,
|
||||
kEncodingBaseOpImm,
|
||||
kEncodingBaseR,
|
||||
kEncodingBaseRM_NoImm,
|
||||
kEncodingBaseRM_SImm10,
|
||||
kEncodingBaseRM_SImm9,
|
||||
kEncodingBaseRR,
|
||||
kEncodingBaseRRII,
|
||||
kEncodingBaseRRR,
|
||||
kEncodingBaseRRRR,
|
||||
kEncodingBaseRev,
|
||||
kEncodingBaseShift,
|
||||
kEncodingBaseStx,
|
||||
kEncodingBaseStxp,
|
||||
kEncodingBaseSys,
|
||||
kEncodingBaseTst,
|
||||
kEncodingFSimdPair,
|
||||
kEncodingFSimdSV,
|
||||
kEncodingFSimdVV,
|
||||
kEncodingFSimdVVV,
|
||||
kEncodingFSimdVVVV,
|
||||
kEncodingFSimdVVVe,
|
||||
kEncodingISimdPair,
|
||||
kEncodingISimdSV,
|
||||
kEncodingISimdVV,
|
||||
kEncodingISimdVVV,
|
||||
kEncodingISimdVVVI,
|
||||
kEncodingISimdVVVV,
|
||||
kEncodingISimdVVVVx,
|
||||
kEncodingISimdVVVe,
|
||||
kEncodingISimdVVVx,
|
||||
kEncodingISimdVVx,
|
||||
kEncodingISimdWWV,
|
||||
kEncodingSimdBicOrr,
|
||||
kEncodingSimdCmp,
|
||||
kEncodingSimdDot,
|
||||
kEncodingSimdDup,
|
||||
kEncodingSimdFcadd,
|
||||
kEncodingSimdFccmpFccmpe,
|
||||
kEncodingSimdFcm,
|
||||
kEncodingSimdFcmla,
|
||||
kEncodingSimdFcmpFcmpe,
|
||||
kEncodingSimdFcsel,
|
||||
kEncodingSimdFcvt,
|
||||
kEncodingSimdFcvtLN,
|
||||
kEncodingSimdFcvtSV,
|
||||
kEncodingSimdFmlal,
|
||||
kEncodingSimdFmov,
|
||||
kEncodingSimdIns,
|
||||
kEncodingSimdLdNStN,
|
||||
kEncodingSimdLdSt,
|
||||
kEncodingSimdLdpStp,
|
||||
kEncodingSimdLdurStur,
|
||||
kEncodingSimdMov,
|
||||
kEncodingSimdMoviMvni,
|
||||
kEncodingSimdShift,
|
||||
kEncodingSimdShiftES,
|
||||
kEncodingSimdSm3tt,
|
||||
kEncodingSimdSmovUmov,
|
||||
kEncodingSimdSxtlUxtl,
|
||||
kEncodingSimdTblTbx
|
||||
};
|
||||
// ----------------------------------------------------------------------------
|
||||
// ${EncodingId:End}
|
||||
|
||||
// a64::InstDB::EncodingData
|
||||
// =========================
|
||||
|
||||
namespace EncodingData {
|
||||
|
||||
#define M_OPCODE(field, bits) \
|
||||
uint32_t _##field : bits; \
|
||||
inline constexpr uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); }
|
||||
|
||||
struct BaseOp {
|
||||
uint32_t opcode;
|
||||
};
|
||||
|
||||
struct BaseOpImm {
|
||||
uint32_t opcode;
|
||||
uint16_t immBits;
|
||||
uint16_t immOffset;
|
||||
};
|
||||
|
||||
struct BaseR {
|
||||
uint32_t opcode;
|
||||
uint32_t rType : 8;
|
||||
uint32_t rHiId : 8;
|
||||
uint32_t rShift : 8;
|
||||
};
|
||||
|
||||
struct BaseRR {
|
||||
uint32_t opcode;
|
||||
uint32_t aType : 2;
|
||||
uint32_t aHiId : 6;
|
||||
uint32_t aShift : 5;
|
||||
uint32_t bType : 2;
|
||||
uint32_t bHiId : 6;
|
||||
uint32_t bShift : 5;
|
||||
uint32_t uniform : 1;
|
||||
};
|
||||
|
||||
struct BaseRRR {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t aType : 2;
|
||||
uint32_t aHiId : 6;
|
||||
uint32_t bType : 2;
|
||||
uint32_t bHiId : 6;
|
||||
uint32_t cType : 2;
|
||||
uint32_t cHiId : 6;
|
||||
uint32_t uniform : 1;
|
||||
};
|
||||
|
||||
struct BaseRRRR {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t aType : 2;
|
||||
uint32_t aHiId : 6;
|
||||
uint32_t bType : 2;
|
||||
uint32_t bHiId : 6;
|
||||
uint32_t cType : 2;
|
||||
uint32_t cHiId : 6;
|
||||
uint32_t dType : 2;
|
||||
uint32_t dHiId : 6;
|
||||
uint32_t uniform : 1;
|
||||
};
|
||||
|
||||
struct BaseRRII {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t aType : 2;
|
||||
uint32_t aHiId : 6;
|
||||
uint32_t bType : 2;
|
||||
uint32_t bHiId : 6;
|
||||
uint32_t aImmSize : 6;
|
||||
uint32_t aImmDiscardLsb : 5;
|
||||
uint32_t aImmOffset : 5;
|
||||
uint32_t bImmSize : 6;
|
||||
uint32_t bImmDiscardLsb : 5;
|
||||
uint32_t bImmOffset : 5;
|
||||
};
|
||||
|
||||
struct BaseAtDcIcTlbi {
|
||||
uint32_t immVerifyMask : 14;
|
||||
uint32_t immVerifyData : 14;
|
||||
uint32_t mandatoryReg : 1;
|
||||
};
|
||||
|
||||
struct BaseAdcSbc {
|
||||
uint32_t opcode;
|
||||
};
|
||||
|
||||
struct BaseAddSub {
|
||||
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
|
||||
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|Rd|
|
||||
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|Rd|
|
||||
};
|
||||
|
||||
struct BaseAdr {
|
||||
M_OPCODE(opcode, 22)
|
||||
OffsetType offsetType : 8;
|
||||
};
|
||||
|
||||
struct BaseBfm {
|
||||
uint32_t opcode; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
|
||||
};
|
||||
|
||||
struct BaseCmpCmn {
|
||||
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
|
||||
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|11111|
|
||||
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|11111|
|
||||
};
|
||||
|
||||
struct BaseExtend {
|
||||
M_OPCODE(opcode, 22) // sf|........|N|......|......|Rn|Rd|
|
||||
uint32_t rType : 2;
|
||||
uint32_t u : 1;
|
||||
};
|
||||
|
||||
struct BaseLogical {
|
||||
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
|
||||
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
|
||||
uint32_t negateImm : 1 ; // True if this is an operation that must negate IMM.
|
||||
};
|
||||
|
||||
struct BaseMvnNeg {
|
||||
uint32_t opcode;
|
||||
};
|
||||
|
||||
struct BaseShift {
|
||||
M_OPCODE(registerOp, 22)
|
||||
M_OPCODE(immediateOp, 22)
|
||||
uint32_t ror : 2;
|
||||
};
|
||||
|
||||
struct BaseTst {
|
||||
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
|
||||
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|11111|
|
||||
};
|
||||
|
||||
struct BaseRM_NoImm {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t rHiId : 6;
|
||||
uint32_t xOffset : 5;
|
||||
};
|
||||
|
||||
struct BaseRM_SImm9 {
|
||||
M_OPCODE(offsetOp, 22)
|
||||
M_OPCODE(prePostOp, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t rHiId : 6;
|
||||
uint32_t xOffset : 5;
|
||||
uint32_t immShift : 4;
|
||||
};
|
||||
|
||||
struct BaseRM_SImm10 {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t rHiId : 6;
|
||||
uint32_t xOffset : 5;
|
||||
uint32_t immShift : 4;
|
||||
};
|
||||
|
||||
struct BaseLdSt {
|
||||
uint32_t uOffsetOp : 10;
|
||||
uint32_t prePostOp : 11;
|
||||
uint32_t registerOp : 11;
|
||||
uint32_t literalOp : 8;
|
||||
uint32_t rType : 2;
|
||||
uint32_t xOffset : 5;
|
||||
uint32_t uOffsetShift : 3;
|
||||
uint32_t uAltInstId : 14;
|
||||
};
|
||||
|
||||
struct BaseLdpStp {
|
||||
uint32_t offsetOp : 10;
|
||||
uint32_t prePostOp : 10;
|
||||
uint32_t rType : 2;
|
||||
uint32_t xOffset : 5;
|
||||
uint32_t offsetShift : 3;
|
||||
};
|
||||
|
||||
struct BaseStx {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t xOffset : 5;
|
||||
};
|
||||
|
||||
struct BaseLdxp {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t xOffset : 5;
|
||||
};
|
||||
|
||||
struct BaseStxp {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t xOffset : 5;
|
||||
};
|
||||
|
||||
struct BaseAtomicOp {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t xOffset : 5;
|
||||
uint32_t zr : 1;
|
||||
};
|
||||
|
||||
struct BaseAtomicSt {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t xOffset : 5;
|
||||
};
|
||||
|
||||
struct BaseAtomicCasp {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t rType : 2;
|
||||
uint32_t xOffset : 5;
|
||||
};
|
||||
|
||||
typedef BaseOp BaseBranchReg;
|
||||
typedef BaseOp BaseBranchRel;
|
||||
typedef BaseOp BaseBranchCmp;
|
||||
typedef BaseOp BaseBranchTst;
|
||||
typedef BaseOp BaseExtract;
|
||||
typedef BaseOp BaseBfc;
|
||||
typedef BaseOp BaseBfi;
|
||||
typedef BaseOp BaseBfx;
|
||||
typedef BaseOp BaseCCmp;
|
||||
typedef BaseOp BaseCInc;
|
||||
typedef BaseOp BaseCSet;
|
||||
typedef BaseOp BaseCSel;
|
||||
typedef BaseOp BaseMovKNZ;
|
||||
typedef BaseOp BaseMull;
|
||||
|
||||
struct FSimdGeneric {
|
||||
uint32_t _scalarOp : 28;
|
||||
uint32_t _scalarHf : 4;
|
||||
uint32_t _vectorOp : 28;
|
||||
uint32_t _vectorHf : 4;
|
||||
|
||||
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
|
||||
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
|
||||
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); }
|
||||
constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); }
|
||||
};
|
||||
|
||||
typedef FSimdGeneric FSimdVV;
|
||||
typedef FSimdGeneric FSimdVVV;
|
||||
typedef FSimdGeneric FSimdVVVV;
|
||||
|
||||
struct FSimdSV {
|
||||
uint32_t opcode;
|
||||
};
|
||||
|
||||
struct FSimdVVVe {
|
||||
uint32_t _scalarOp : 28;
|
||||
uint32_t _scalarHf : 4;
|
||||
uint32_t _vectorOp;
|
||||
uint32_t _elementOp;
|
||||
|
||||
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
|
||||
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); };
|
||||
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
|
||||
constexpr uint32_t vectorHf() const noexcept { return kHF_C; }
|
||||
constexpr uint32_t elementScalarOp() const noexcept { return (uint32_t(_elementOp) << 10) | (0x5u << 28); }
|
||||
constexpr uint32_t elementVectorOp() const noexcept { return (uint32_t(_elementOp) << 10); }
|
||||
};
|
||||
|
||||
struct SimdFcadd {
|
||||
uint32_t _opcode;
|
||||
|
||||
constexpr uint32_t opcode() const noexcept { return _opcode << 10; }
|
||||
};
|
||||
|
||||
struct SimdFcmla {
|
||||
uint32_t _regularOp;
|
||||
uint32_t _elementOp;
|
||||
|
||||
constexpr uint32_t regularOp() const noexcept { return uint32_t(_regularOp) << 10; }
|
||||
constexpr uint32_t elementOp() const noexcept { return (uint32_t(_elementOp) << 10); }
|
||||
};
|
||||
|
||||
struct SimdFccmpFccmpe {
|
||||
uint32_t _opcode;
|
||||
constexpr uint32_t opcode() const noexcept { return _opcode; }
|
||||
};
|
||||
|
||||
struct SimdFcm {
|
||||
uint32_t _registerOp : 28;
|
||||
uint32_t _registerHf : 4;
|
||||
|
||||
uint32_t _zeroOp : 28;
|
||||
|
||||
constexpr bool hasRegisterOp() const noexcept { return _registerOp != 0; }
|
||||
constexpr bool hasZeroOp() const noexcept { return _zeroOp != 0; }
|
||||
|
||||
constexpr uint32_t registerScalarOp() const noexcept { return (uint32_t(_registerOp) << 10) | (0x5u << 28); }
|
||||
constexpr uint32_t registerVectorOp() const noexcept { return uint32_t(_registerOp) << 10; }
|
||||
constexpr uint32_t registerScalarHf() const noexcept { return uint32_t(_registerHf); }
|
||||
constexpr uint32_t registerVectorHf() const noexcept { return uint32_t(_registerHf); }
|
||||
|
||||
constexpr uint32_t zeroScalarOp() const noexcept { return (uint32_t(_zeroOp) << 10) | (0x5u << 28); }
|
||||
constexpr uint32_t zeroVectorOp() const noexcept { return (uint32_t(_zeroOp) << 10); }
|
||||
};
|
||||
|
||||
struct SimdFcmpFcmpe {
|
||||
uint32_t _opcode;
|
||||
constexpr uint32_t opcode() const noexcept { return _opcode; }
|
||||
};
|
||||
|
||||
struct SimdFcvtLN {
|
||||
uint32_t _opcode : 22;
|
||||
uint32_t _isCvtxn : 1;
|
||||
uint32_t _hasScalar : 1;
|
||||
|
||||
constexpr uint32_t scalarOp() const noexcept { return (uint32_t(_opcode) << 10) | (0x5u << 28); }
|
||||
constexpr uint32_t vectorOp() const noexcept { return (uint32_t(_opcode) << 10); }
|
||||
|
||||
constexpr uint32_t isCvtxn() const noexcept { return _isCvtxn; }
|
||||
constexpr uint32_t hasScalar() const noexcept { return _hasScalar; }
|
||||
};
|
||||
|
||||
struct SimdFcvtSV {
|
||||
uint32_t _vectorIntOp;
|
||||
uint32_t _vectorFpOp;
|
||||
uint32_t _generalOp : 31;
|
||||
uint32_t _isFloatToInt : 1;
|
||||
|
||||
constexpr uint32_t scalarIntOp() const noexcept { return (uint32_t(_vectorIntOp) << 10) | (0x5u << 28); }
|
||||
constexpr uint32_t vectorIntOp() const noexcept { return uint32_t(_vectorIntOp) << 10; }
|
||||
constexpr uint32_t scalarFpOp() const noexcept { return (uint32_t(_vectorFpOp) << 10) | (0x5u << 28); }
|
||||
constexpr uint32_t vectorFpOp() const noexcept { return uint32_t(_vectorFpOp) << 10; }
|
||||
constexpr uint32_t generalOp() const noexcept { return (uint32_t(_generalOp) << 10); }
|
||||
|
||||
constexpr uint32_t isFloatToInt() const noexcept { return _isFloatToInt; }
|
||||
constexpr uint32_t isFixedPoint() const noexcept { return _vectorFpOp != 0; }
|
||||
};
|
||||
|
||||
struct SimdFmlal {
|
||||
uint32_t _vectorOp;
|
||||
uint32_t _elementOp;
|
||||
uint8_t _optionalQ;
|
||||
uint8_t tA;
|
||||
uint8_t tB;
|
||||
uint8_t tElement;
|
||||
|
||||
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
|
||||
constexpr uint32_t elementOp() const noexcept { return uint32_t(_elementOp) << 10; }
|
||||
constexpr uint32_t optionalQ() const noexcept { return _optionalQ; }
|
||||
};
|
||||
|
||||
struct FSimdPair {
|
||||
uint32_t _scalarOp;
|
||||
uint32_t _vectorOp;
|
||||
|
||||
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
|
||||
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
|
||||
};
|
||||
|
||||
struct ISimdVV {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct ISimdVVx {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t op0Signature;
|
||||
uint32_t op1Signature;
|
||||
};
|
||||
|
||||
struct ISimdSV {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct ISimdVVV {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct ISimdVVVx {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t op0Signature;
|
||||
uint32_t op1Signature;
|
||||
uint32_t op2Signature;
|
||||
};
|
||||
|
||||
struct ISimdWWV {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct ISimdVVVe {
|
||||
uint32_t regularOp : 26; // 22 bits used.
|
||||
uint32_t regularVecType : 6;
|
||||
uint32_t elementOp : 26; // 22 bits used.
|
||||
uint32_t elementVecType : 6;
|
||||
};
|
||||
|
||||
struct ISimdVVVI {
|
||||
M_OPCODE(opcode, 22)
|
||||
uint32_t vecOpType : 6;
|
||||
uint32_t immSize : 4;
|
||||
uint32_t immShift : 4;
|
||||
uint32_t imm64HasOneBitLess : 1;
|
||||
};
|
||||
|
||||
struct ISimdVVVV {
|
||||
uint32_t opcode : 22;
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct ISimdVVVVx {
|
||||
uint32_t opcode;
|
||||
uint32_t op0Signature;
|
||||
uint32_t op1Signature;
|
||||
uint32_t op2Signature;
|
||||
uint32_t op3Signature;
|
||||
};
|
||||
|
||||
struct SimdBicOrr {
|
||||
uint32_t registerOp; // 22 bits used.
|
||||
uint32_t immediateOp; // 22 bits used.
|
||||
};
|
||||
|
||||
struct SimdCmp {
|
||||
uint32_t regOp;
|
||||
uint32_t zeroOp : 22;
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct SimdDot {
|
||||
uint32_t vectorOp; // 22 bits used.
|
||||
uint32_t elementOp; // 22 bits used.
|
||||
uint8_t tA; // Element-type of the first operand.
|
||||
uint8_t tB; // Element-type of the second and third operands.
|
||||
uint8_t tElement; // Element-type of the element index[] operand.
|
||||
};
|
||||
|
||||
struct SimdMoviMvni {
|
||||
uint32_t opcode : 31;
|
||||
uint32_t inverted : 1;
|
||||
};
|
||||
|
||||
struct SimdLdSt {
|
||||
uint32_t uOffsetOp : 10;
|
||||
uint32_t prePostOp : 11;
|
||||
uint32_t registerOp : 11;
|
||||
uint32_t literalOp : 8;
|
||||
uint32_t uAltInstId : 16;
|
||||
};
|
||||
|
||||
struct SimdLdNStN {
|
||||
uint32_t singleOp;
|
||||
uint32_t multipleOp : 22;
|
||||
uint32_t n : 3;
|
||||
uint32_t replicate : 1;
|
||||
};
|
||||
|
||||
struct SimdLdpStp {
|
||||
uint32_t offsetOp : 10;
|
||||
uint32_t prePostOp : 10;
|
||||
};
|
||||
|
||||
struct SimdLdurStur {
|
||||
uint32_t opcode;
|
||||
};
|
||||
|
||||
struct ISimdPair {
|
||||
uint32_t opcode2; // 22 bits used.
|
||||
uint32_t opcode3 : 26; // 22 bits used.
|
||||
uint32_t opType3 : 6;
|
||||
};
|
||||
|
||||
struct SimdShift {
|
||||
uint32_t registerOp; // 22 bits used.
|
||||
uint32_t immediateOp : 22; // 22 bits used.
|
||||
uint32_t invertedImm : 1;
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct SimdShiftES {
|
||||
uint32_t opcode : 22;
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct SimdSm3tt {
|
||||
uint32_t opcode;
|
||||
};
|
||||
|
||||
struct SimdSmovUmov {
|
||||
uint32_t opcode : 22;
|
||||
uint32_t vecOpType : 6;
|
||||
uint32_t isSigned : 1;
|
||||
};
|
||||
|
||||
struct SimdSxtlUxtl {
|
||||
uint32_t opcode : 22;
|
||||
uint32_t vecOpType : 6;
|
||||
};
|
||||
|
||||
struct SimdTblTbx {
|
||||
uint32_t opcode;
|
||||
};
|
||||
|
||||
#undef M_OPCODE
|
||||
|
||||
// ${EncodingDataForward:Begin}
|
||||
// ------------------- Automatically generated, do not edit -------------------
|
||||
extern const BaseAddSub baseAddSub[4];
|
||||
extern const BaseAdr baseAdr[2];
|
||||
extern const BaseAtDcIcTlbi baseAtDcIcTlbi[4];
|
||||
extern const BaseAtomicCasp baseAtomicCasp[4];
|
||||
extern const BaseAtomicOp baseAtomicOp[123];
|
||||
extern const BaseAtomicSt baseAtomicSt[48];
|
||||
extern const BaseBfc baseBfc[1];
|
||||
extern const BaseBfi baseBfi[3];
|
||||
extern const BaseBfm baseBfm[3];
|
||||
extern const BaseBfx baseBfx[3];
|
||||
extern const BaseBranchCmp baseBranchCmp[2];
|
||||
extern const BaseBranchReg baseBranchReg[3];
|
||||
extern const BaseBranchRel baseBranchRel[2];
|
||||
extern const BaseBranchTst baseBranchTst[2];
|
||||
extern const BaseCCmp baseCCmp[2];
|
||||
extern const BaseCInc baseCInc[3];
|
||||
extern const BaseCSel baseCSel[4];
|
||||
extern const BaseCSet baseCSet[2];
|
||||
extern const BaseCmpCmn baseCmpCmn[2];
|
||||
extern const BaseExtend baseExtend[5];
|
||||
extern const BaseExtract baseExtract[1];
|
||||
extern const BaseLdSt baseLdSt[9];
|
||||
extern const BaseLdpStp baseLdpStp[6];
|
||||
extern const BaseLdxp baseLdxp[2];
|
||||
extern const BaseLogical baseLogical[8];
|
||||
extern const BaseMovKNZ baseMovKNZ[3];
|
||||
extern const BaseMvnNeg baseMvnNeg[3];
|
||||
extern const BaseOp baseOp[23];
|
||||
extern const BaseOpImm baseOpImm[14];
|
||||
extern const BaseR baseR[10];
|
||||
extern const BaseRM_NoImm baseRM_NoImm[21];
|
||||
extern const BaseRM_SImm10 baseRM_SImm10[2];
|
||||
extern const BaseRM_SImm9 baseRM_SImm9[23];
|
||||
extern const BaseRR baseRR[15];
|
||||
extern const BaseRRII baseRRII[2];
|
||||
extern const BaseRRR baseRRR[26];
|
||||
extern const BaseRRRR baseRRRR[6];
|
||||
extern const BaseShift baseShift[8];
|
||||
extern const BaseStx baseStx[3];
|
||||
extern const BaseStxp baseStxp[2];
|
||||
extern const BaseTst baseTst[1];
|
||||
extern const FSimdPair fSimdPair[5];
|
||||
extern const FSimdSV fSimdSV[4];
|
||||
extern const FSimdVV fSimdVV[17];
|
||||
extern const FSimdVVV fSimdVVV[13];
|
||||
extern const FSimdVVVV fSimdVVVV[4];
|
||||
extern const FSimdVVVe fSimdVVVe[4];
|
||||
extern const ISimdPair iSimdPair[1];
|
||||
extern const ISimdSV iSimdSV[7];
|
||||
extern const ISimdVV iSimdVV[29];
|
||||
extern const ISimdVVV iSimdVVV[65];
|
||||
extern const ISimdVVVI iSimdVVVI[2];
|
||||
extern const ISimdVVVV iSimdVVVV[2];
|
||||
extern const ISimdVVVVx iSimdVVVVx[1];
|
||||
extern const ISimdVVVe iSimdVVVe[25];
|
||||
extern const ISimdVVVx iSimdVVVx[17];
|
||||
extern const ISimdVVx iSimdVVx[13];
|
||||
extern const ISimdWWV iSimdWWV[8];
|
||||
extern const SimdBicOrr simdBicOrr[2];
|
||||
extern const SimdCmp simdCmp[7];
|
||||
extern const SimdDot simdDot[5];
|
||||
extern const SimdFcadd simdFcadd[1];
|
||||
extern const SimdFccmpFccmpe simdFccmpFccmpe[2];
|
||||
extern const SimdFcm simdFcm[5];
|
||||
extern const SimdFcmla simdFcmla[1];
|
||||
extern const SimdFcmpFcmpe simdFcmpFcmpe[2];
|
||||
extern const SimdFcvtLN simdFcvtLN[6];
|
||||
extern const SimdFcvtSV simdFcvtSV[12];
|
||||
extern const SimdFmlal simdFmlal[6];
|
||||
extern const SimdLdNStN simdLdNStN[12];
|
||||
extern const SimdLdSt simdLdSt[2];
|
||||
extern const SimdLdpStp simdLdpStp[4];
|
||||
extern const SimdLdurStur simdLdurStur[2];
|
||||
extern const SimdMoviMvni simdMoviMvni[2];
|
||||
extern const SimdShift simdShift[40];
|
||||
extern const SimdShiftES simdShiftES[2];
|
||||
extern const SimdSm3tt simdSm3tt[4];
|
||||
extern const SimdSmovUmov simdSmovUmov[2];
|
||||
extern const SimdSxtlUxtl simdSxtlUxtl[4];
|
||||
extern const SimdTblTbx simdTblTbx[2];
|
||||
// ----------------------------------------------------------------------------
|
||||
// ${EncodingDataForward:End}
|
||||
|
||||
} // {EncodingData}
|
||||
|
||||
// a64::InstDB - InstNameIndex
|
||||
// ===========================
|
||||
|
||||
// ${NameLimits:Begin}
|
||||
// ------------------- Automatically generated, do not edit -------------------
|
||||
enum : uint32_t { kMaxNameSize = 9 };
|
||||
// ----------------------------------------------------------------------------
|
||||
// ${NameLimits:End}
|
||||
|
||||
struct InstNameIndex {
|
||||
uint16_t start;
|
||||
uint16_t end;
|
||||
};
|
||||
|
||||
// a64::InstDB - Tables
|
||||
// ====================
|
||||
|
||||
#ifndef ASMJIT_NO_TEXT
|
||||
extern const char _nameData[];
|
||||
extern const InstNameIndex instNameIndex[26];
|
||||
#endif // !ASMJIT_NO_TEXT
|
||||
|
||||
} // {InstDB}
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_A64_ARMINSTDB_H_P_INCLUDED
|
||||
|
||||
85
src/asmjit/arm/a64operand.cpp
Normal file
85
src/asmjit/arm/a64operand.cpp
Normal file
@@ -0,0 +1,85 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
|
||||
#include "../core/misc_p.h"
|
||||
#include "../arm/a64operand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
// a64::Operand - Tests
|
||||
// ====================
|
||||
|
||||
#if defined(ASMJIT_TEST)
|
||||
UNIT(a64_operand) {
|
||||
INFO("Checking if a64::reg(...) matches built-in IDs");
|
||||
EXPECT(w(5) == w5);
|
||||
EXPECT(x(5) == x5);
|
||||
|
||||
INFO("Checking Gp register properties");
|
||||
EXPECT(Gp().isReg() == true);
|
||||
EXPECT(w0.isReg() == true);
|
||||
EXPECT(x0.isReg() == true);
|
||||
EXPECT(w0.id() == 0);
|
||||
EXPECT(x0.id() == 0);
|
||||
EXPECT(wzr.id() == Gp::kIdZr);
|
||||
EXPECT(xzr.id() == Gp::kIdZr);
|
||||
EXPECT(wsp.id() == Gp::kIdSp);
|
||||
EXPECT(sp.id() == Gp::kIdSp);
|
||||
EXPECT(w0.size() == 4);
|
||||
EXPECT(x0.size() == 8);
|
||||
EXPECT(w0.type() == RegType::kARM_GpW);
|
||||
EXPECT(x0.type() == RegType::kARM_GpX);
|
||||
EXPECT(w0.group() == RegGroup::kGp);
|
||||
EXPECT(x0.group() == RegGroup::kGp);
|
||||
|
||||
INFO("Checking Vec register properties");
|
||||
EXPECT(v0.type() == RegType::kARM_VecV);
|
||||
EXPECT(d0.type() == RegType::kARM_VecD);
|
||||
EXPECT(s0.type() == RegType::kARM_VecS);
|
||||
EXPECT(h0.type() == RegType::kARM_VecH);
|
||||
EXPECT(b0.type() == RegType::kARM_VecB);
|
||||
|
||||
EXPECT(v0.group() == RegGroup::kVec);
|
||||
EXPECT(d0.group() == RegGroup::kVec);
|
||||
EXPECT(s0.group() == RegGroup::kVec);
|
||||
EXPECT(h0.group() == RegGroup::kVec);
|
||||
EXPECT(b0.group() == RegGroup::kVec);
|
||||
|
||||
INFO("Checking Vec register element[] access");
|
||||
Vec vd_1 = v15.d(1);
|
||||
EXPECT(vd_1.type() == RegType::kARM_VecV);
|
||||
EXPECT(vd_1.group() == RegGroup::kVec);
|
||||
EXPECT(vd_1.id() == 15);
|
||||
EXPECT(vd_1.isVecD2());
|
||||
EXPECT(vd_1.elementType() == Vec::kElementTypeD);
|
||||
EXPECT(vd_1.hasElementIndex());
|
||||
EXPECT(vd_1.elementIndex() == 1);
|
||||
|
||||
Vec vs_3 = v15.s(3);
|
||||
EXPECT(vs_3.type() == RegType::kARM_VecV);
|
||||
EXPECT(vs_3.group() == RegGroup::kVec);
|
||||
EXPECT(vs_3.id() == 15);
|
||||
EXPECT(vs_3.isVecS4());
|
||||
EXPECT(vs_3.elementType() == Vec::kElementTypeS);
|
||||
EXPECT(vs_3.hasElementIndex());
|
||||
EXPECT(vs_3.elementIndex() == 3);
|
||||
|
||||
Vec vb_4 = v15.b4(3);
|
||||
EXPECT(vb_4.type() == RegType::kARM_VecV);
|
||||
EXPECT(vb_4.group() == RegGroup::kVec);
|
||||
EXPECT(vb_4.id() == 15);
|
||||
EXPECT(vb_4.isVecB4x4());
|
||||
EXPECT(vb_4.elementType() == Vec::kElementTypeB4);
|
||||
EXPECT(vb_4.hasElementIndex());
|
||||
EXPECT(vb_4.elementIndex() == 3);
|
||||
}
|
||||
#endif
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64
|
||||
312
src/asmjit/arm/a64operand.h
Normal file
312
src/asmjit/arm/a64operand.h
Normal file
@@ -0,0 +1,312 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64OPERAND_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64OPERAND_H_INCLUDED
|
||||
|
||||
#include "../arm/armoperand.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
using arm::Reg;
|
||||
using arm::Mem;
|
||||
using arm::Gp;
|
||||
using arm::GpW;
|
||||
using arm::GpX;
|
||||
|
||||
using arm::Vec;
|
||||
using arm::VecB;
|
||||
using arm::VecH;
|
||||
using arm::VecS;
|
||||
using arm::VecD;
|
||||
using arm::VecV;
|
||||
|
||||
#ifndef _DOXYGEN
|
||||
namespace regs {
|
||||
#endif
|
||||
|
||||
using namespace ::asmjit::arm::regs;
|
||||
|
||||
static constexpr GpW w0 = GpW(0);
|
||||
static constexpr GpW w1 = GpW(1);
|
||||
static constexpr GpW w2 = GpW(2);
|
||||
static constexpr GpW w3 = GpW(3);
|
||||
static constexpr GpW w4 = GpW(4);
|
||||
static constexpr GpW w5 = GpW(5);
|
||||
static constexpr GpW w6 = GpW(6);
|
||||
static constexpr GpW w7 = GpW(7);
|
||||
static constexpr GpW w8 = GpW(8);
|
||||
static constexpr GpW w9 = GpW(9);
|
||||
static constexpr GpW w10 = GpW(10);
|
||||
static constexpr GpW w11 = GpW(11);
|
||||
static constexpr GpW w12 = GpW(12);
|
||||
static constexpr GpW w13 = GpW(13);
|
||||
static constexpr GpW w14 = GpW(14);
|
||||
static constexpr GpW w15 = GpW(15);
|
||||
static constexpr GpW w16 = GpW(16);
|
||||
static constexpr GpW w17 = GpW(17);
|
||||
static constexpr GpW w18 = GpW(18);
|
||||
static constexpr GpW w19 = GpW(19);
|
||||
static constexpr GpW w20 = GpW(20);
|
||||
static constexpr GpW w21 = GpW(21);
|
||||
static constexpr GpW w22 = GpW(22);
|
||||
static constexpr GpW w23 = GpW(23);
|
||||
static constexpr GpW w24 = GpW(24);
|
||||
static constexpr GpW w25 = GpW(25);
|
||||
static constexpr GpW w26 = GpW(26);
|
||||
static constexpr GpW w27 = GpW(27);
|
||||
static constexpr GpW w28 = GpW(28);
|
||||
static constexpr GpW w29 = GpW(29);
|
||||
static constexpr GpW w30 = GpW(30);
|
||||
static constexpr GpW wzr = GpW(Gp::kIdZr);
|
||||
static constexpr GpW wsp = GpW(Gp::kIdSp);
|
||||
|
||||
static constexpr GpX x0 = GpX(0);
|
||||
static constexpr GpX x1 = GpX(1);
|
||||
static constexpr GpX x2 = GpX(2);
|
||||
static constexpr GpX x3 = GpX(3);
|
||||
static constexpr GpX x4 = GpX(4);
|
||||
static constexpr GpX x5 = GpX(5);
|
||||
static constexpr GpX x6 = GpX(6);
|
||||
static constexpr GpX x7 = GpX(7);
|
||||
static constexpr GpX x8 = GpX(8);
|
||||
static constexpr GpX x9 = GpX(9);
|
||||
static constexpr GpX x10 = GpX(10);
|
||||
static constexpr GpX x11 = GpX(11);
|
||||
static constexpr GpX x12 = GpX(12);
|
||||
static constexpr GpX x13 = GpX(13);
|
||||
static constexpr GpX x14 = GpX(14);
|
||||
static constexpr GpX x15 = GpX(15);
|
||||
static constexpr GpX x16 = GpX(16);
|
||||
static constexpr GpX x17 = GpX(17);
|
||||
static constexpr GpX x18 = GpX(18);
|
||||
static constexpr GpX x19 = GpX(19);
|
||||
static constexpr GpX x20 = GpX(20);
|
||||
static constexpr GpX x21 = GpX(21);
|
||||
static constexpr GpX x22 = GpX(22);
|
||||
static constexpr GpX x23 = GpX(23);
|
||||
static constexpr GpX x24 = GpX(24);
|
||||
static constexpr GpX x25 = GpX(25);
|
||||
static constexpr GpX x26 = GpX(26);
|
||||
static constexpr GpX x27 = GpX(27);
|
||||
static constexpr GpX x28 = GpX(28);
|
||||
static constexpr GpX x29 = GpX(29);
|
||||
static constexpr GpX x30 = GpX(30);
|
||||
static constexpr GpX xzr = GpX(Gp::kIdZr);
|
||||
static constexpr GpX sp = GpX(Gp::kIdSp);
|
||||
|
||||
static constexpr VecB b0 = VecB(0);
|
||||
static constexpr VecB b1 = VecB(1);
|
||||
static constexpr VecB b2 = VecB(2);
|
||||
static constexpr VecB b3 = VecB(3);
|
||||
static constexpr VecB b4 = VecB(4);
|
||||
static constexpr VecB b5 = VecB(5);
|
||||
static constexpr VecB b6 = VecB(6);
|
||||
static constexpr VecB b7 = VecB(7);
|
||||
static constexpr VecB b8 = VecB(8);
|
||||
static constexpr VecB b9 = VecB(9);
|
||||
static constexpr VecB b10 = VecB(10);
|
||||
static constexpr VecB b11 = VecB(11);
|
||||
static constexpr VecB b12 = VecB(12);
|
||||
static constexpr VecB b13 = VecB(13);
|
||||
static constexpr VecB b14 = VecB(14);
|
||||
static constexpr VecB b15 = VecB(15);
|
||||
static constexpr VecB b16 = VecB(16);
|
||||
static constexpr VecB b17 = VecB(17);
|
||||
static constexpr VecB b18 = VecB(18);
|
||||
static constexpr VecB b19 = VecB(19);
|
||||
static constexpr VecB b20 = VecB(20);
|
||||
static constexpr VecB b21 = VecB(21);
|
||||
static constexpr VecB b22 = VecB(22);
|
||||
static constexpr VecB b23 = VecB(23);
|
||||
static constexpr VecB b24 = VecB(24);
|
||||
static constexpr VecB b25 = VecB(25);
|
||||
static constexpr VecB b26 = VecB(26);
|
||||
static constexpr VecB b27 = VecB(27);
|
||||
static constexpr VecB b28 = VecB(28);
|
||||
static constexpr VecB b29 = VecB(29);
|
||||
static constexpr VecB b30 = VecB(30);
|
||||
static constexpr VecB b31 = VecB(31);
|
||||
|
||||
static constexpr VecH h0 = VecH(0);
|
||||
static constexpr VecH h1 = VecH(1);
|
||||
static constexpr VecH h2 = VecH(2);
|
||||
static constexpr VecH h3 = VecH(3);
|
||||
static constexpr VecH h4 = VecH(4);
|
||||
static constexpr VecH h5 = VecH(5);
|
||||
static constexpr VecH h6 = VecH(6);
|
||||
static constexpr VecH h7 = VecH(7);
|
||||
static constexpr VecH h8 = VecH(8);
|
||||
static constexpr VecH h9 = VecH(9);
|
||||
static constexpr VecH h10 = VecH(10);
|
||||
static constexpr VecH h11 = VecH(11);
|
||||
static constexpr VecH h12 = VecH(12);
|
||||
static constexpr VecH h13 = VecH(13);
|
||||
static constexpr VecH h14 = VecH(14);
|
||||
static constexpr VecH h15 = VecH(15);
|
||||
static constexpr VecH h16 = VecH(16);
|
||||
static constexpr VecH h17 = VecH(17);
|
||||
static constexpr VecH h18 = VecH(18);
|
||||
static constexpr VecH h19 = VecH(19);
|
||||
static constexpr VecH h20 = VecH(20);
|
||||
static constexpr VecH h21 = VecH(21);
|
||||
static constexpr VecH h22 = VecH(22);
|
||||
static constexpr VecH h23 = VecH(23);
|
||||
static constexpr VecH h24 = VecH(24);
|
||||
static constexpr VecH h25 = VecH(25);
|
||||
static constexpr VecH h26 = VecH(26);
|
||||
static constexpr VecH h27 = VecH(27);
|
||||
static constexpr VecH h28 = VecH(28);
|
||||
static constexpr VecH h29 = VecH(29);
|
||||
static constexpr VecH h30 = VecH(30);
|
||||
static constexpr VecH h31 = VecH(31);
|
||||
|
||||
static constexpr VecS s0 = VecS(0);
|
||||
static constexpr VecS s1 = VecS(1);
|
||||
static constexpr VecS s2 = VecS(2);
|
||||
static constexpr VecS s3 = VecS(3);
|
||||
static constexpr VecS s4 = VecS(4);
|
||||
static constexpr VecS s5 = VecS(5);
|
||||
static constexpr VecS s6 = VecS(6);
|
||||
static constexpr VecS s7 = VecS(7);
|
||||
static constexpr VecS s8 = VecS(8);
|
||||
static constexpr VecS s9 = VecS(9);
|
||||
static constexpr VecS s10 = VecS(10);
|
||||
static constexpr VecS s11 = VecS(11);
|
||||
static constexpr VecS s12 = VecS(12);
|
||||
static constexpr VecS s13 = VecS(13);
|
||||
static constexpr VecS s14 = VecS(14);
|
||||
static constexpr VecS s15 = VecS(15);
|
||||
static constexpr VecS s16 = VecS(16);
|
||||
static constexpr VecS s17 = VecS(17);
|
||||
static constexpr VecS s18 = VecS(18);
|
||||
static constexpr VecS s19 = VecS(19);
|
||||
static constexpr VecS s20 = VecS(20);
|
||||
static constexpr VecS s21 = VecS(21);
|
||||
static constexpr VecS s22 = VecS(22);
|
||||
static constexpr VecS s23 = VecS(23);
|
||||
static constexpr VecS s24 = VecS(24);
|
||||
static constexpr VecS s25 = VecS(25);
|
||||
static constexpr VecS s26 = VecS(26);
|
||||
static constexpr VecS s27 = VecS(27);
|
||||
static constexpr VecS s28 = VecS(28);
|
||||
static constexpr VecS s29 = VecS(29);
|
||||
static constexpr VecS s30 = VecS(30);
|
||||
static constexpr VecS s31 = VecS(31);
|
||||
|
||||
static constexpr VecD d0 = VecD(0);
|
||||
static constexpr VecD d1 = VecD(1);
|
||||
static constexpr VecD d2 = VecD(2);
|
||||
static constexpr VecD d3 = VecD(3);
|
||||
static constexpr VecD d4 = VecD(4);
|
||||
static constexpr VecD d5 = VecD(5);
|
||||
static constexpr VecD d6 = VecD(6);
|
||||
static constexpr VecD d7 = VecD(7);
|
||||
static constexpr VecD d8 = VecD(8);
|
||||
static constexpr VecD d9 = VecD(9);
|
||||
static constexpr VecD d10 = VecD(10);
|
||||
static constexpr VecD d11 = VecD(11);
|
||||
static constexpr VecD d12 = VecD(12);
|
||||
static constexpr VecD d13 = VecD(13);
|
||||
static constexpr VecD d14 = VecD(14);
|
||||
static constexpr VecD d15 = VecD(15);
|
||||
static constexpr VecD d16 = VecD(16);
|
||||
static constexpr VecD d17 = VecD(17);
|
||||
static constexpr VecD d18 = VecD(18);
|
||||
static constexpr VecD d19 = VecD(19);
|
||||
static constexpr VecD d20 = VecD(20);
|
||||
static constexpr VecD d21 = VecD(21);
|
||||
static constexpr VecD d22 = VecD(22);
|
||||
static constexpr VecD d23 = VecD(23);
|
||||
static constexpr VecD d24 = VecD(24);
|
||||
static constexpr VecD d25 = VecD(25);
|
||||
static constexpr VecD d26 = VecD(26);
|
||||
static constexpr VecD d27 = VecD(27);
|
||||
static constexpr VecD d28 = VecD(28);
|
||||
static constexpr VecD d29 = VecD(29);
|
||||
static constexpr VecD d30 = VecD(30);
|
||||
static constexpr VecD d31 = VecD(31);
|
||||
|
||||
static constexpr VecV q0 = VecV(0);
|
||||
static constexpr VecV q1 = VecV(1);
|
||||
static constexpr VecV q2 = VecV(2);
|
||||
static constexpr VecV q3 = VecV(3);
|
||||
static constexpr VecV q4 = VecV(4);
|
||||
static constexpr VecV q5 = VecV(5);
|
||||
static constexpr VecV q6 = VecV(6);
|
||||
static constexpr VecV q7 = VecV(7);
|
||||
static constexpr VecV q8 = VecV(8);
|
||||
static constexpr VecV q9 = VecV(9);
|
||||
static constexpr VecV q10 = VecV(10);
|
||||
static constexpr VecV q11 = VecV(11);
|
||||
static constexpr VecV q12 = VecV(12);
|
||||
static constexpr VecV q13 = VecV(13);
|
||||
static constexpr VecV q14 = VecV(14);
|
||||
static constexpr VecV q15 = VecV(15);
|
||||
static constexpr VecV q16 = VecV(16);
|
||||
static constexpr VecV q17 = VecV(17);
|
||||
static constexpr VecV q18 = VecV(18);
|
||||
static constexpr VecV q19 = VecV(19);
|
||||
static constexpr VecV q20 = VecV(20);
|
||||
static constexpr VecV q21 = VecV(21);
|
||||
static constexpr VecV q22 = VecV(22);
|
||||
static constexpr VecV q23 = VecV(23);
|
||||
static constexpr VecV q24 = VecV(24);
|
||||
static constexpr VecV q25 = VecV(25);
|
||||
static constexpr VecV q26 = VecV(26);
|
||||
static constexpr VecV q27 = VecV(27);
|
||||
static constexpr VecV q28 = VecV(28);
|
||||
static constexpr VecV q29 = VecV(29);
|
||||
static constexpr VecV q30 = VecV(30);
|
||||
static constexpr VecV q31 = VecV(31);
|
||||
|
||||
static constexpr VecV v0 = VecV(0);
|
||||
static constexpr VecV v1 = VecV(1);
|
||||
static constexpr VecV v2 = VecV(2);
|
||||
static constexpr VecV v3 = VecV(3);
|
||||
static constexpr VecV v4 = VecV(4);
|
||||
static constexpr VecV v5 = VecV(5);
|
||||
static constexpr VecV v6 = VecV(6);
|
||||
static constexpr VecV v7 = VecV(7);
|
||||
static constexpr VecV v8 = VecV(8);
|
||||
static constexpr VecV v9 = VecV(9);
|
||||
static constexpr VecV v10 = VecV(10);
|
||||
static constexpr VecV v11 = VecV(11);
|
||||
static constexpr VecV v12 = VecV(12);
|
||||
static constexpr VecV v13 = VecV(13);
|
||||
static constexpr VecV v14 = VecV(14);
|
||||
static constexpr VecV v15 = VecV(15);
|
||||
static constexpr VecV v16 = VecV(16);
|
||||
static constexpr VecV v17 = VecV(17);
|
||||
static constexpr VecV v18 = VecV(18);
|
||||
static constexpr VecV v19 = VecV(19);
|
||||
static constexpr VecV v20 = VecV(20);
|
||||
static constexpr VecV v21 = VecV(21);
|
||||
static constexpr VecV v22 = VecV(22);
|
||||
static constexpr VecV v23 = VecV(23);
|
||||
static constexpr VecV v24 = VecV(24);
|
||||
static constexpr VecV v25 = VecV(25);
|
||||
static constexpr VecV v26 = VecV(26);
|
||||
static constexpr VecV v27 = VecV(27);
|
||||
static constexpr VecV v28 = VecV(28);
|
||||
static constexpr VecV v29 = VecV(29);
|
||||
static constexpr VecV v30 = VecV(30);
|
||||
static constexpr VecV v31 = VecV(31);
|
||||
|
||||
#ifndef _DOXYGEN
|
||||
} // {regs}
|
||||
|
||||
// Make `a64::regs` accessible through `a64` namespace as well.
|
||||
using namespace regs;
|
||||
#endif
|
||||
|
||||
//! \}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_A64OPERAND_H_INCLUDED
|
||||
806
src/asmjit/arm/a64rapass.cpp
Normal file
806
src/asmjit/arm/a64rapass.cpp
Normal file
@@ -0,0 +1,806 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_COMPILER)
|
||||
|
||||
#include "../core/cpuinfo.h"
|
||||
#include "../core/support.h"
|
||||
#include "../core/type.h"
|
||||
#include "../arm/a64assembler.h"
|
||||
#include "../arm/a64compiler.h"
|
||||
#include "../arm/a64emithelper_p.h"
|
||||
#include "../arm/a64instapi_p.h"
|
||||
#include "../arm/a64instdb_p.h"
|
||||
#include "../arm/a64rapass_p.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
// a64::ARMRAPass - Helpers
|
||||
// ========================
|
||||
|
||||
// TODO: [ARM] These should be shared with all backends.
|
||||
ASMJIT_MAYBE_UNUSED
|
||||
static inline uint64_t raImmMaskFromSize(uint32_t size) noexcept {
|
||||
ASMJIT_ASSERT(size > 0 && size < 256);
|
||||
static const uint64_t masks[] = {
|
||||
0x00000000000000FFu, // 1
|
||||
0x000000000000FFFFu, // 2
|
||||
0x00000000FFFFFFFFu, // 4
|
||||
0xFFFFFFFFFFFFFFFFu, // 8
|
||||
0x0000000000000000u, // 16
|
||||
0x0000000000000000u, // 32
|
||||
0x0000000000000000u, // 64
|
||||
0x0000000000000000u, // 128
|
||||
0x0000000000000000u // 256
|
||||
};
|
||||
return masks[Support::ctz(size)];
|
||||
}
|
||||
|
||||
static const RegMask raConsecutiveLeadCountToRegMaskFilter[5] = {
|
||||
0xFFFFFFFFu, // [0] No consecutive.
|
||||
0x00000000u, // [1] Invalid, never used.
|
||||
0x7FFFFFFFu, // [2] 2 consecutive registers.
|
||||
0x3FFFFFFFu, // [3] 3 consecutive registers.
|
||||
0x1FFFFFFFu // [4] 4 consecutive registers.
|
||||
};
|
||||
|
||||
static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept {
|
||||
static constexpr RATiedFlags map[] = {
|
||||
RATiedFlags::kNone,
|
||||
RATiedFlags::kRead | RATiedFlags::kUse, // kRead
|
||||
RATiedFlags::kWrite | RATiedFlags::kOut, // kWrite
|
||||
RATiedFlags::kRW | RATiedFlags::kUse, // kRW
|
||||
};
|
||||
|
||||
return map[uint32_t(rwFlags & OpRWFlags::kRW)];
|
||||
}
|
||||
|
||||
static inline RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept {
|
||||
return raUseOutFlagsFromRWFlags(flags);
|
||||
}
|
||||
|
||||
static inline RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept {
|
||||
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemBaseRW)>::value;
|
||||
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
|
||||
}
|
||||
|
||||
static inline RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept {
|
||||
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemIndexRW)>::value;
|
||||
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
|
||||
}
|
||||
// a64::RACFGBuilder
|
||||
// =================
|
||||
|
||||
class RACFGBuilder : public RACFGBuilderT<RACFGBuilder> {
|
||||
public:
|
||||
Arch _arch;
|
||||
|
||||
inline RACFGBuilder(ARMRAPass* pass) noexcept
|
||||
: RACFGBuilderT<RACFGBuilder>(pass),
|
||||
_arch(pass->cc()->arch()) {}
|
||||
|
||||
inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cc); }
|
||||
|
||||
Error onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept;
|
||||
|
||||
Error onBeforeInvoke(InvokeNode* invokeNode) noexcept;
|
||||
Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept;
|
||||
|
||||
Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept;
|
||||
Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept;
|
||||
Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept;
|
||||
|
||||
Error onBeforeRet(FuncRetNode* funcRet) noexcept;
|
||||
Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept;
|
||||
};
|
||||
|
||||
// a64::RACFGBuilder - OnInst
|
||||
// ==========================
|
||||
|
||||
// TODO: [ARM] This is just a workaround...
|
||||
static InstControlFlow getControlFlowType(InstId instId) noexcept {
|
||||
switch (instId) {
|
||||
case Inst::kIdB:
|
||||
case Inst::kIdBr:
|
||||
if (BaseInst::extractARMCondCode(instId) == CondCode::kAL)
|
||||
return InstControlFlow::kJump;
|
||||
else
|
||||
return InstControlFlow::kBranch;
|
||||
case Inst::kIdBl:
|
||||
case Inst::kIdBlr:
|
||||
return InstControlFlow::kCall;
|
||||
case Inst::kIdCbz:
|
||||
case Inst::kIdCbnz:
|
||||
case Inst::kIdTbz:
|
||||
case Inst::kIdTbnz:
|
||||
return InstControlFlow::kBranch;
|
||||
case Inst::kIdRet:
|
||||
return InstControlFlow::kReturn;
|
||||
default:
|
||||
return InstControlFlow::kRegular;
|
||||
}
|
||||
}
|
||||
|
||||
Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept {
|
||||
InstRWInfo rwInfo;
|
||||
|
||||
InstId instId = inst->id();
|
||||
if (Inst::isDefinedId(instId)) {
|
||||
uint32_t opCount = inst->opCount();
|
||||
const Operand* opArray = inst->operands();
|
||||
ASMJIT_PROPAGATE(InstInternal::queryRWInfo(_arch, inst->baseInst(), opArray, opCount, &rwInfo));
|
||||
|
||||
const InstDB::InstInfo& instInfo = InstDB::infoById(instId);
|
||||
uint32_t singleRegOps = 0;
|
||||
|
||||
if (opCount) {
|
||||
uint32_t consecutiveOffset = 0xFFFFFFFFu;
|
||||
uint32_t consecutiveParent = Globals::kInvalidId;
|
||||
|
||||
for (uint32_t i = 0; i < opCount; i++) {
|
||||
const Operand& op = opArray[i];
|
||||
const OpRWInfo& opRwInfo = rwInfo.operand(i);
|
||||
|
||||
if (op.isReg()) {
|
||||
// Register Operand
|
||||
// ----------------
|
||||
const Reg& reg = op.as<Reg>();
|
||||
|
||||
RATiedFlags flags = raRegRwFlags(opRwInfo.opFlags());
|
||||
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
|
||||
|
||||
if (vIndex < Operand::kVirtIdCount) {
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
|
||||
|
||||
// Use RW instead of Write in case that not the whole register is overwritten. This is important for
|
||||
// liveness as we cannot kill a register that will be used.
|
||||
if ((flags & RATiedFlags::kRW) == RATiedFlags::kWrite) {
|
||||
if (workReg->regByteMask() & ~(opRwInfo.writeByteMask() | opRwInfo.extendByteMask())) {
|
||||
// Not write-only operation.
|
||||
flags = (flags & ~RATiedFlags::kOut) | (RATiedFlags::kRead | RATiedFlags::kUse);
|
||||
}
|
||||
}
|
||||
|
||||
RegGroup group = workReg->group();
|
||||
|
||||
RegMask useRegs = _pass->_availableRegs[group];
|
||||
RegMask outRegs = useRegs;
|
||||
|
||||
uint32_t useId = BaseReg::kIdBad;
|
||||
uint32_t outId = BaseReg::kIdBad;
|
||||
|
||||
uint32_t useRewriteMask = 0;
|
||||
uint32_t outRewriteMask = 0;
|
||||
|
||||
if (opRwInfo.consecutiveLeadCount()) {
|
||||
// There must be a single consecutive register lead, otherwise the RW data is invalid.
|
||||
if (consecutiveOffset != 0xFFFFFFFFu)
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
|
||||
// A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct.
|
||||
if (RATiedReg::consecutiveDataFromFlags(flags) != 0)
|
||||
return DebugUtils::errored(kErrorNotConsecutiveRegs);
|
||||
|
||||
flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1);
|
||||
consecutiveOffset = 0;
|
||||
|
||||
RegMask filter = raConsecutiveLeadCountToRegMaskFilter[opRwInfo.consecutiveLeadCount()];
|
||||
if (Support::test(flags, RATiedFlags::kUse)) {
|
||||
flags |= RATiedFlags::kUseConsecutive;
|
||||
useRegs &= filter;
|
||||
}
|
||||
else {
|
||||
flags |= RATiedFlags::kOutConsecutive;
|
||||
outRegs &= filter;
|
||||
}
|
||||
}
|
||||
|
||||
if (Support::test(flags, RATiedFlags::kUse)) {
|
||||
useRewriteMask = Support::bitMask(inst->getRewriteIndex(®._baseId));
|
||||
if (opRwInfo.hasOpFlag(OpRWFlags::kRegPhysId)) {
|
||||
useId = opRwInfo.physId();
|
||||
flags |= RATiedFlags::kUseFixed;
|
||||
}
|
||||
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
|
||||
if (consecutiveOffset == 0xFFFFFFFFu)
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
|
||||
}
|
||||
}
|
||||
else {
|
||||
outRewriteMask = Support::bitMask(inst->getRewriteIndex(®._baseId));
|
||||
if (opRwInfo.hasOpFlag(OpRWFlags::kRegPhysId)) {
|
||||
outId = opRwInfo.physId();
|
||||
flags |= RATiedFlags::kOutFixed;
|
||||
}
|
||||
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
|
||||
if (consecutiveOffset == 0xFFFFFFFFu)
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
|
||||
}
|
||||
}
|
||||
|
||||
// Special cases regarding element access.
|
||||
if (reg.as<Vec>().hasElementIndex()) {
|
||||
// Only the first 0..15 registers can be used if the register uses
|
||||
// element accessor that accesses half-words (h[0..7] elements).
|
||||
if (instInfo.hasFlag(InstDB::kInstFlagVH0_15) && reg.as<Vec>().elementType() == Vec::kElementTypeH) {
|
||||
if (Support::test(flags, RATiedFlags::kUse))
|
||||
useId &= 0x0000FFFFu;
|
||||
else
|
||||
outId &= 0x0000FFFFu;
|
||||
}
|
||||
}
|
||||
|
||||
ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent));
|
||||
if (singleRegOps == i)
|
||||
singleRegOps++;
|
||||
|
||||
if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive))
|
||||
consecutiveParent = workReg->workId();
|
||||
}
|
||||
}
|
||||
else if (op.isMem()) {
|
||||
// Memory Operand
|
||||
// --------------
|
||||
const Mem& mem = op.as<Mem>();
|
||||
|
||||
if (mem.isRegHome()) {
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg));
|
||||
_pass->getOrCreateStackSlot(workReg);
|
||||
}
|
||||
else if (mem.hasBaseReg()) {
|
||||
uint32_t vIndex = Operand::virtIdToIndex(mem.baseId());
|
||||
if (vIndex < Operand::kVirtIdCount) {
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
|
||||
|
||||
RATiedFlags flags = raMemBaseRwFlags(opRwInfo.opFlags());
|
||||
RegGroup group = workReg->group();
|
||||
RegMask allocable = _pass->_availableRegs[group];
|
||||
|
||||
// Base registers have never fixed id on ARM.
|
||||
const uint32_t useId = BaseReg::kIdBad;
|
||||
const uint32_t outId = BaseReg::kIdBad;
|
||||
|
||||
uint32_t useRewriteMask = 0;
|
||||
uint32_t outRewriteMask = 0;
|
||||
|
||||
if (Support::test(flags, RATiedFlags::kUse))
|
||||
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
|
||||
else
|
||||
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
|
||||
|
||||
ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
|
||||
}
|
||||
}
|
||||
|
||||
if (mem.hasIndexReg()) {
|
||||
uint32_t vIndex = Operand::virtIdToIndex(mem.indexId());
|
||||
if (vIndex < Operand::kVirtIdCount) {
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
|
||||
|
||||
RATiedFlags flags = raMemIndexRwFlags(opRwInfo.opFlags());
|
||||
RegGroup group = workReg->group();
|
||||
RegMask allocable = _pass->_availableRegs[group];
|
||||
|
||||
// Index registers have never fixed id on ARM.
|
||||
const uint32_t useId = BaseReg::kIdBad;
|
||||
const uint32_t outId = BaseReg::kIdBad;
|
||||
|
||||
uint32_t useRewriteMask = 0;
|
||||
uint32_t outRewriteMask = 0;
|
||||
|
||||
if (Support::test(flags, RATiedFlags::kUse))
|
||||
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
|
||||
else
|
||||
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
|
||||
|
||||
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
controlType = getControlFlowType(instId);
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
// a64::RACFGBuilder - OnInvoke
|
||||
// ============================
|
||||
|
||||
Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
|
||||
const FuncDetail& fd = invokeNode->detail();
|
||||
uint32_t argCount = invokeNode->argCount();
|
||||
|
||||
cc()->_setCursor(invokeNode->prev());
|
||||
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
const FuncValuePack& argPack = fd.argPack(argIndex);
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
if (!argPack[valueIndex])
|
||||
break;
|
||||
|
||||
const FuncValue& arg = argPack[valueIndex];
|
||||
const Operand& op = invokeNode->arg(argIndex, valueIndex);
|
||||
|
||||
if (op.isNone())
|
||||
continue;
|
||||
|
||||
if (op.isReg()) {
|
||||
const Reg& reg = op.as<Reg>();
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
|
||||
|
||||
if (arg.isReg()) {
|
||||
RegGroup regGroup = workReg->group();
|
||||
RegGroup argGroup = Reg::groupOf(arg.regType());
|
||||
|
||||
if (regGroup != argGroup) {
|
||||
// TODO: [ARM] Conversion is not supported.
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
}
|
||||
}
|
||||
else {
|
||||
ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg));
|
||||
}
|
||||
}
|
||||
else if (op.isImm()) {
|
||||
if (arg.isReg()) {
|
||||
BaseReg reg;
|
||||
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, op.as<Imm>(), ®));
|
||||
invokeNode->_args[argIndex][valueIndex] = reg;
|
||||
}
|
||||
else {
|
||||
ASMJIT_PROPAGATE(moveImmToStackArg(invokeNode, arg, op.as<Imm>()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cc()->_setCursor(invokeNode);
|
||||
|
||||
if (fd.hasRet()) {
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
const FuncValue& ret = fd.ret(valueIndex);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
const Operand& op = invokeNode->ret(valueIndex);
|
||||
if (op.isReg()) {
|
||||
const Reg& reg = op.as<Reg>();
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
|
||||
|
||||
if (ret.isReg()) {
|
||||
RegGroup regGroup = workReg->group();
|
||||
RegGroup retGroup = Reg::groupOf(ret.regType());
|
||||
|
||||
if (regGroup != retGroup) {
|
||||
// TODO: [ARM] Conversion is not supported.
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This block has function call(s).
|
||||
_curBlock->addFlags(RABlockFlags::kHasFuncCalls);
|
||||
_pass->func()->frame().addAttributes(FuncAttributes::kHasFuncCalls);
|
||||
_pass->func()->frame().updateCallStackSize(fd.argStackSize());
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept {
|
||||
uint32_t argCount = invokeNode->argCount();
|
||||
const FuncDetail& fd = invokeNode->detail();
|
||||
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
const FuncValuePack& argPack = fd.argPack(argIndex);
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
if (!argPack[valueIndex])
|
||||
continue;
|
||||
|
||||
const FuncValue& arg = argPack[valueIndex];
|
||||
const Operand& op = invokeNode->arg(argIndex, valueIndex);
|
||||
|
||||
if (op.isNone())
|
||||
continue;
|
||||
|
||||
if (op.isReg()) {
|
||||
const Reg& reg = op.as<Reg>();
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
|
||||
|
||||
if (arg.isIndirect()) {
|
||||
RegGroup regGroup = workReg->group();
|
||||
if (regGroup != RegGroup::kGp)
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
|
||||
}
|
||||
else if (arg.isReg()) {
|
||||
RegGroup regGroup = workReg->group();
|
||||
RegGroup argGroup = Reg::groupOf(arg.regType());
|
||||
|
||||
if (regGroup == argGroup) {
|
||||
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) {
|
||||
const FuncValue& ret = fd.ret(retIndex);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
const Operand& op = invokeNode->ret(retIndex);
|
||||
if (op.isReg()) {
|
||||
const Reg& reg = op.as<Reg>();
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
|
||||
|
||||
if (ret.isReg()) {
|
||||
RegGroup regGroup = workReg->group();
|
||||
RegGroup retGroup = Reg::groupOf(ret.regType());
|
||||
|
||||
if (regGroup == retGroup) {
|
||||
ASMJIT_PROPAGATE(ib.addCallRet(workReg, ret.regId()));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Setup clobbered registers.
|
||||
ib._clobbered[0] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(0)]) & ~fd.preservedRegs(RegGroup(0));
|
||||
ib._clobbered[1] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(1)]) & ~fd.preservedRegs(RegGroup(1));
|
||||
ib._clobbered[2] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(2)]) & ~fd.preservedRegs(RegGroup(2));
|
||||
ib._clobbered[3] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(3)]) & ~fd.preservedRegs(RegGroup(3));
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
// a64::RACFGBuilder - MoveImmToRegArg
|
||||
// ===================================
|
||||
|
||||
Error RACFGBuilder::moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept {
|
||||
DebugUtils::unused(invokeNode);
|
||||
ASMJIT_ASSERT(arg.isReg());
|
||||
|
||||
Imm imm(imm_);
|
||||
TypeId typeId = TypeId::kVoid;
|
||||
|
||||
switch (arg.typeId()) {
|
||||
case TypeId::kInt8 : typeId = TypeId::kUInt64; imm.signExtend8Bits(); break;
|
||||
case TypeId::kUInt8 : typeId = TypeId::kUInt64; imm.zeroExtend8Bits(); break;
|
||||
case TypeId::kInt16 : typeId = TypeId::kUInt64; imm.signExtend16Bits(); break;
|
||||
case TypeId::kUInt16: typeId = TypeId::kUInt64; imm.zeroExtend16Bits(); break;
|
||||
case TypeId::kInt32 : typeId = TypeId::kUInt64; imm.signExtend32Bits(); break;
|
||||
case TypeId::kUInt32: typeId = TypeId::kUInt64; imm.zeroExtend32Bits(); break;
|
||||
case TypeId::kInt64 : typeId = TypeId::kUInt64; break;
|
||||
case TypeId::kUInt64: typeId = TypeId::kUInt64; break;
|
||||
|
||||
default:
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
}
|
||||
|
||||
ASMJIT_PROPAGATE(cc()->_newReg(out, typeId, nullptr));
|
||||
cc()->virtRegById(out->id())->setWeight(BaseRAPass::kCallArgWeight);
|
||||
return cc()->mov(out->as<Gp>(), imm);
|
||||
}
|
||||
|
||||
// a64::RACFGBuilder - MoveImmToStackArg
|
||||
// =====================================
|
||||
|
||||
Error RACFGBuilder::moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept {
|
||||
BaseReg reg;
|
||||
|
||||
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, imm_, ®));
|
||||
ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg));
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
// a64::RACFGBuilder - MoveRegToStackArg
|
||||
// =====================================
|
||||
|
||||
Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept {
|
||||
DebugUtils::unused(invokeNode);
|
||||
Mem stackPtr = ptr(_pass->_sp.as<Gp>(), arg.stackOffset());
|
||||
|
||||
if (reg.isGp())
|
||||
return cc()->str(reg.as<Gp>(), stackPtr);
|
||||
|
||||
if (reg.isVec())
|
||||
return cc()->str(reg.as<Vec>(), stackPtr);
|
||||
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
|
||||
// a64::RACFGBuilder - OnReg
|
||||
// =========================
|
||||
|
||||
Error RACFGBuilder::onBeforeRet(FuncRetNode* funcRet) noexcept {
|
||||
DebugUtils::unused(funcRet);
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
Error RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept {
|
||||
const FuncDetail& funcDetail = _pass->func()->detail();
|
||||
const Operand* opArray = funcRet->operands();
|
||||
uint32_t opCount = funcRet->opCount();
|
||||
|
||||
for (uint32_t i = 0; i < opCount; i++) {
|
||||
const Operand& op = opArray[i];
|
||||
if (op.isNone()) continue;
|
||||
|
||||
const FuncValue& ret = funcDetail.ret(i);
|
||||
if (ASMJIT_UNLIKELY(!ret.isReg()))
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
|
||||
if (op.isReg()) {
|
||||
// Register return value.
|
||||
const Reg& reg = op.as<Reg>();
|
||||
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
|
||||
|
||||
if (vIndex < Operand::kVirtIdCount) {
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
|
||||
|
||||
RegGroup group = workReg->group();
|
||||
RegMask allocable = _pass->_availableRegs[group];
|
||||
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, ret.regId(), 0, 0, BaseReg::kIdBad, 0));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
}
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
// a64::ARMRAPass - Construction & Destruction
|
||||
// ===========================================
|
||||
|
||||
ARMRAPass::ARMRAPass() noexcept
|
||||
: BaseRAPass() { _iEmitHelper = &_emitHelper; }
|
||||
ARMRAPass::~ARMRAPass() noexcept {}
|
||||
|
||||
// a64::ARMRAPass - OnInit / OnDone
|
||||
// ================================
|
||||
|
||||
void ARMRAPass::onInit() noexcept {
|
||||
Arch arch = cc()->arch();
|
||||
|
||||
_emitHelper._emitter = _cb;
|
||||
|
||||
_archTraits = &ArchTraits::byArch(arch);
|
||||
_physRegCount.set(RegGroup::kGp, 32);
|
||||
_physRegCount.set(RegGroup::kVec, 32);
|
||||
_physRegCount.set(RegGroup::kExtraVirt2, 0);
|
||||
_physRegCount.set(RegGroup::kExtraVirt3, 0);
|
||||
_buildPhysIndex();
|
||||
|
||||
_availableRegCount = _physRegCount;
|
||||
_availableRegs[RegGroup::kGp] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kGp));
|
||||
_availableRegs[RegGroup::kVec] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kVec));
|
||||
_availableRegs[RegGroup::kExtraVirt3] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kExtraVirt2));
|
||||
_availableRegs[RegGroup::kExtraVirt3] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kExtraVirt3));
|
||||
|
||||
_scratchRegIndexes[0] = uint8_t(27);
|
||||
_scratchRegIndexes[1] = uint8_t(28);
|
||||
|
||||
// The architecture specific setup makes implicitly all registers available. So
|
||||
// make unavailable all registers that are special and cannot be used in general.
|
||||
bool hasFP = _func->frame().hasPreservedFP();
|
||||
|
||||
if (hasFP)
|
||||
makeUnavailable(RegGroup::kGp, Gp::kIdFp);
|
||||
|
||||
makeUnavailable(RegGroup::kGp, Gp::kIdSp);
|
||||
makeUnavailable(RegGroup::kGp, Gp::kIdOs); // OS-specific use, usually TLS.
|
||||
|
||||
_sp = sp;
|
||||
_fp = x29;
|
||||
}
|
||||
|
||||
void ARMRAPass::onDone() noexcept {}
|
||||
|
||||
// a64::ARMRAPass - BuildCFG
|
||||
// =========================
|
||||
|
||||
Error ARMRAPass::buildCFG() noexcept {
|
||||
return RACFGBuilder(this).run();
|
||||
}
|
||||
|
||||
// a64::ARMRAPass - Rewrite
|
||||
// ========================
|
||||
|
||||
ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) noexcept {
|
||||
uint32_t virtCount = cc()->_vRegArray.size();
|
||||
|
||||
BaseNode* node = first;
|
||||
while (node != stop) {
|
||||
BaseNode* next = node->next();
|
||||
if (node->isInst()) {
|
||||
InstNode* inst = node->as<InstNode>();
|
||||
RAInst* raInst = node->passData<RAInst>();
|
||||
|
||||
Operand* operands = inst->operands();
|
||||
uint32_t opCount = inst->opCount();
|
||||
|
||||
uint32_t i;
|
||||
|
||||
// Rewrite virtual registers into physical registers.
|
||||
if (raInst) {
|
||||
// If the instruction contains pass data (raInst) then it was a subject
|
||||
// for register allocation and must be rewritten to use physical regs.
|
||||
RATiedReg* tiedRegs = raInst->tiedRegs();
|
||||
uint32_t tiedCount = raInst->tiedCount();
|
||||
|
||||
for (i = 0; i < tiedCount; i++) {
|
||||
RATiedReg* tiedReg = &tiedRegs[i];
|
||||
|
||||
Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask());
|
||||
uint32_t useId = tiedReg->useId();
|
||||
while (useIt.hasNext())
|
||||
inst->rewriteIdAtIndex(useIt.next(), useId);
|
||||
|
||||
Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask());
|
||||
uint32_t outId = tiedReg->outId();
|
||||
while (outIt.hasNext())
|
||||
inst->rewriteIdAtIndex(outIt.next(), outId);
|
||||
}
|
||||
|
||||
// This data is allocated by Zone passed to `runOnFunction()`, which
|
||||
// will be reset after the RA pass finishes. So reset this data to
|
||||
// prevent having a dead pointer after the RA pass is complete.
|
||||
node->resetPassData();
|
||||
|
||||
if (ASMJIT_UNLIKELY(node->type() != NodeType::kInst)) {
|
||||
// FuncRet terminates the flow, it must either be removed if the exit
|
||||
// label is next to it (optimization) or patched to an architecture
|
||||
// dependent jump instruction that jumps to the function's exit before
|
||||
// the epilog.
|
||||
if (node->type() == NodeType::kFuncRet) {
|
||||
RABlock* block = raInst->block();
|
||||
if (!isNextTo(node, _func->exitNode())) {
|
||||
cc()->_setCursor(node->prev());
|
||||
ASMJIT_PROPAGATE(emitJump(_func->exitNode()->label()));
|
||||
}
|
||||
|
||||
BaseNode* prev = node->prev();
|
||||
cc()->removeNode(node);
|
||||
block->setLast(prev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite stack slot addresses.
|
||||
for (i = 0; i < opCount; i++) {
|
||||
Operand& op = operands[i];
|
||||
if (op.isMem()) {
|
||||
BaseMem& mem = op.as<BaseMem>();
|
||||
if (mem.isRegHome()) {
|
||||
uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId());
|
||||
if (ASMJIT_UNLIKELY(virtIndex >= virtCount))
|
||||
return DebugUtils::errored(kErrorInvalidVirtId);
|
||||
|
||||
VirtReg* virtReg = cc()->virtRegByIndex(virtIndex);
|
||||
RAWorkReg* workReg = virtReg->workReg();
|
||||
ASMJIT_ASSERT(workReg != nullptr);
|
||||
|
||||
RAStackSlot* slot = workReg->stackSlot();
|
||||
int32_t offset = slot->offset();
|
||||
|
||||
mem._setBase(_sp.type(), slot->baseRegId());
|
||||
mem.clearRegHome();
|
||||
mem.addOffsetLo32(offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node = next;
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
// a64::ARMRAPass - Prolog & Epilog
|
||||
// ================================
|
||||
|
||||
Error ARMRAPass::updateStackFrame() noexcept {
|
||||
if (_func->frame().hasFuncCalls())
|
||||
_func->frame().addDirtyRegs(RegGroup::kGp, Support::bitMask(Gp::kIdLr));
|
||||
|
||||
return BaseRAPass::updateStackFrame();
|
||||
}
|
||||
|
||||
// a64::ARMRAPass - OnEmit
|
||||
// =======================
|
||||
|
||||
Error ARMRAPass::emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
|
||||
RAWorkReg* wReg = workRegById(workId);
|
||||
BaseReg dst(wReg->signature(), dstPhysId);
|
||||
BaseReg src(wReg->signature(), srcPhysId);
|
||||
|
||||
const char* comment = nullptr;
|
||||
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
|
||||
_tmpString.assignFormat("<MOVE> %s", workRegById(workId)->name());
|
||||
comment = _tmpString.data();
|
||||
}
|
||||
#endif
|
||||
|
||||
return _emitHelper.emitRegMove(dst, src, wReg->typeId(), comment);
|
||||
}
|
||||
|
||||
Error ARMRAPass::emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
|
||||
DebugUtils::unused(aWorkId, aPhysId, bWorkId, bPhysId);
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
|
||||
Error ARMRAPass::emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept {
|
||||
RAWorkReg* wReg = workRegById(workId);
|
||||
BaseReg dstReg(wReg->signature(), dstPhysId);
|
||||
BaseMem srcMem(workRegAsMem(wReg));
|
||||
|
||||
const char* comment = nullptr;
|
||||
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
|
||||
_tmpString.assignFormat("<LOAD> %s", workRegById(workId)->name());
|
||||
comment = _tmpString.data();
|
||||
}
|
||||
#endif
|
||||
|
||||
return _emitHelper.emitRegMove(dstReg, srcMem, wReg->typeId(), comment);
|
||||
}
|
||||
|
||||
Error ARMRAPass::emitSave(uint32_t workId, uint32_t srcPhysId) noexcept {
|
||||
RAWorkReg* wReg = workRegById(workId);
|
||||
BaseMem dstMem(workRegAsMem(wReg));
|
||||
BaseReg srcReg(wReg->signature(), srcPhysId);
|
||||
|
||||
const char* comment = nullptr;
|
||||
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
|
||||
_tmpString.assignFormat("<SAVE> %s", workRegById(workId)->name());
|
||||
comment = _tmpString.data();
|
||||
}
|
||||
#endif
|
||||
|
||||
return _emitHelper.emitRegMove(dstMem, srcReg, wReg->typeId(), comment);
|
||||
}
|
||||
|
||||
Error ARMRAPass::emitJump(const Label& label) noexcept {
|
||||
return cc()->b(label);
|
||||
}
|
||||
|
||||
Error ARMRAPass::emitPreCall(InvokeNode* invokeNode) noexcept {
|
||||
DebugUtils::unused(invokeNode);
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_COMPILER
|
||||
105
src/asmjit/arm/a64rapass_p.h
Normal file
105
src/asmjit/arm/a64rapass_p.h
Normal file
@@ -0,0 +1,105 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
|
||||
|
||||
#include "../core/api-config.h"
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
|
||||
#include "../core/compiler.h"
|
||||
#include "../core/rabuilders_p.h"
|
||||
#include "../core/rapass_p.h"
|
||||
#include "../arm/a64assembler.h"
|
||||
#include "../arm/a64compiler.h"
|
||||
#include "../arm/a64emithelper_p.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
//! ARM register allocation pass.
|
||||
//!
|
||||
//! Takes care of generating function prologs and epilogs, and also performs
|
||||
//! register allocation.
|
||||
class ARMRAPass : public BaseRAPass {
|
||||
public:
|
||||
ASMJIT_NONCOPYABLE(ARMRAPass)
|
||||
typedef BaseRAPass Base;
|
||||
|
||||
EmitHelper _emitHelper;
|
||||
|
||||
//! \name Construction & Destruction
|
||||
//! \{
|
||||
|
||||
ARMRAPass() noexcept;
|
||||
virtual ~ARMRAPass() noexcept;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Accessors
|
||||
//! \{
|
||||
|
||||
//! Returns the compiler casted to `arm::Compiler`.
|
||||
inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); }
|
||||
|
||||
//! Returns emit helper.
|
||||
inline EmitHelper* emitHelper() noexcept { return &_emitHelper; }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Events
|
||||
//! \{
|
||||
|
||||
void onInit() noexcept override;
|
||||
void onDone() noexcept override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name CFG
|
||||
//! \{
|
||||
|
||||
Error buildCFG() noexcept override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Rewrite
|
||||
//! \{
|
||||
|
||||
Error _rewrite(BaseNode* first, BaseNode* stop) noexcept override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Prolog & Epilog
|
||||
//! \{
|
||||
|
||||
Error updateStackFrame() noexcept override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Emit Helpers
|
||||
//! \{
|
||||
|
||||
Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept override;
|
||||
Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept override;
|
||||
|
||||
Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept override;
|
||||
Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept override;
|
||||
|
||||
Error emitJump(const Label& label) noexcept override;
|
||||
Error emitPreCall(InvokeNode* invokeNode) noexcept override;
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_COMPILER
|
||||
#endif // ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
|
||||
179
src/asmjit/arm/a64utils.h
Normal file
179
src/asmjit/arm/a64utils.h
Normal file
@@ -0,0 +1,179 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_A64UTILS_H_INCLUDED
|
||||
#define ASMJIT_ARM_A64UTILS_H_INCLUDED
|
||||
|
||||
#include "../arm/a64globals.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
|
||||
|
||||
//! \addtogroup asmjit_a64
|
||||
//! \{
|
||||
|
||||
//! Public utilities and helpers for targeting AArch64 architecture.
|
||||
namespace Utils {
|
||||
|
||||
//! Decomposed fields of a logical immediate value (AArch64).
|
||||
struct LogicalImm {
|
||||
uint32_t n;
|
||||
uint32_t s;
|
||||
uint32_t r;
|
||||
};
|
||||
|
||||
//! Encodes the given `imm` value of the given `width` to a logical immediate value represented as N, S, and R fields
|
||||
//! and writes these fields to `out`.
|
||||
//!
|
||||
//! Encoding Table:
|
||||
//!
|
||||
//! ```
|
||||
//! +---+--------+--------+------+
|
||||
//! | N | ImmS | ImmR | Size |
|
||||
//! +---+--------+--------+------+
|
||||
//! | 1 | ssssss | rrrrrr | 64 |
|
||||
//! | 0 | 0sssss | .rrrrr | 32 |
|
||||
//! | 0 | 10ssss | ..rrrr | 16 |
|
||||
//! | 0 | 110sss | ...rrr | 8 |
|
||||
//! | 0 | 1110ss | ....rr | 4 |
|
||||
//! | 0 | 11110s | .....r | 2 |
|
||||
//! +---+--------+--------+------+
|
||||
//! ```
|
||||
ASMJIT_MAYBE_UNUSED
|
||||
static bool encodeLogicalImm(uint64_t imm, uint32_t width, a64::Utils::LogicalImm* out) noexcept {
|
||||
// Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits.
|
||||
do {
|
||||
width /= 2;
|
||||
uint64_t mask = (uint64_t(1) << width) - 1u;
|
||||
if ((imm & mask) != ((imm >> width) & mask)) {
|
||||
width *= 2;
|
||||
break;
|
||||
}
|
||||
} while (width > 2);
|
||||
|
||||
// Patterns of all zeros and all ones are not encodable.
|
||||
uint64_t lsbMask = Support::lsbMask<uint64_t>(width);
|
||||
imm &= lsbMask;
|
||||
|
||||
if (imm == 0 || imm == lsbMask)
|
||||
return false;
|
||||
|
||||
// Inspect the pattern and get the most important bit indexes.
|
||||
//
|
||||
// oIndex <-+ +-> zIndex
|
||||
// | |
|
||||
// |..zeros..|oCount|zCount|..ones..|
|
||||
// |000000000|111111|000000|11111111|
|
||||
|
||||
uint32_t zIndex = Support::ctz(~imm);
|
||||
uint64_t zImm = imm ^ ((uint64_t(1) << zIndex) - 1);
|
||||
uint32_t zCount = (zImm ? Support::ctz(zImm) : width) - zIndex;
|
||||
|
||||
uint32_t oIndex = zIndex + zCount;
|
||||
uint64_t oImm = ~(zImm ^ Support::lsbMask<uint64_t>(oIndex));
|
||||
uint32_t oCount = (oImm ? Support::ctz(oImm) : width) - (oIndex);
|
||||
|
||||
// Verify whether the bit-pattern is encodable.
|
||||
uint64_t mustBeZero = oImm ^ ~Support::lsbMask<uint64_t>(oIndex + oCount);
|
||||
if (mustBeZero != 0 || (zIndex > 0 && width - (oIndex + oCount) != 0))
|
||||
return false;
|
||||
|
||||
out->n = width == 64;
|
||||
out->s = (oCount + zIndex - 1) | (Support::neg(width * 2) & 0x3F);
|
||||
out->r = width - oIndex;
|
||||
return true;
|
||||
}
|
||||
|
||||
//! Returns true if the given `imm` value is encodable as a logical immediate. The `width` argument describes the
|
||||
//! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate
|
||||
//! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction.
|
||||
ASMJIT_MAYBE_UNUSED
|
||||
static inline bool isLogicalImm(uint64_t imm, uint32_t width) noexcept {
|
||||
LogicalImm dummy;
|
||||
return encodeLogicalImm(imm, width, &dummy);
|
||||
}
|
||||
|
||||
//! Returns true if the given `imm` value is a byte mask. Byte mask has each byte part of the value set to either
|
||||
//! 0x00 or 0xFF. Some ARM instructions accept immediates that form a byte-mask and this function can be used to
|
||||
//! verify that the immediate is encodable before using the value.
|
||||
template<typename T>
|
||||
static inline bool isByteMaskImm8(const T& imm) noexcept {
|
||||
constexpr T kMask = T(0x0101010101010101 & Support::allOnes<T>());
|
||||
return imm == (imm & kMask) * T(255);
|
||||
}
|
||||
|
||||
//! \cond
|
||||
//! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8.
|
||||
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
|
||||
static inline bool isFPImm8Generic(T val) noexcept {
|
||||
constexpr uint32_t kAllBsMask = Support::lsbMask<uint32_t>(kNumBBits);
|
||||
constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1);
|
||||
constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern;
|
||||
|
||||
T immZ = val & Support::lsbMask<T>(kNumZeroBits);
|
||||
uint32_t immB = uint32_t(val >> (kNumZeroBits + kNumCDEFGHBits)) & kAllBsMask;
|
||||
|
||||
// ImmZ must be all zeros and ImmB must either be B0 or B1 pattern.
|
||||
return immZ == 0 && (immB == kB0Pattern || immB == kB1Pattern);
|
||||
}
|
||||
//! \endcond
|
||||
|
||||
//! Returns true if the given half precision floating point `val` can be encoded as ARM IMM8 value, which represents
|
||||
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
|
||||
//!
|
||||
//! The floating point must have bits distributed in the following way:
|
||||
//!
|
||||
//! ```
|
||||
//! [aBbbcdef|gh000000]
|
||||
//! ```
|
||||
static inline bool isFP16Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 3, 6, 6>(val); }
|
||||
|
||||
//! Returns true if the given single precision floating point `val` can be encoded as ARM IMM8 value, which represents
|
||||
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
|
||||
//!
|
||||
//! The floating point must have bits distributed in the following way:
|
||||
//!
|
||||
//! ```
|
||||
//! [aBbbbbbc|defgh000|00000000|00000000]
|
||||
//! ```
|
||||
static inline bool isFP32Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 6, 6, 19>(val); }
|
||||
//! \overload
|
||||
static inline bool isFP32Imm8(float val) noexcept { return isFP32Imm8(Support::bitCast<uint32_t>(val)); }
|
||||
|
||||
//! Returns true if the given double precision floating point `val` can be encoded as ARM IMM8 value, which represents
|
||||
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
|
||||
//!
|
||||
//! The floating point must have bits distributed in the following way:
|
||||
//!
|
||||
//! ```
|
||||
//! [aBbbbbbb|bbcdefgh|00000000|00000000|00000000|00000000|00000000|00000000]
|
||||
//! ```
|
||||
static inline bool isFP64Imm8(uint64_t val) noexcept { return isFPImm8Generic<uint64_t, 9, 6, 48>(val); }
|
||||
//! \overload
|
||||
static inline bool isFP64Imm8(double val) noexcept { return isFP64Imm8(Support::bitCast<uint64_t>(val)); }
|
||||
|
||||
//! \cond
|
||||
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
|
||||
static inline uint32_t encodeFPToImm8Generic(T val) noexcept {
|
||||
uint32_t bits = uint32_t(val >> kNumZeroBits);
|
||||
return ((bits >> (kNumBBits + kNumCDEFGHBits - 7)) & 0x80u) | (bits & 0x7F);
|
||||
}
|
||||
//! \endcond
|
||||
|
||||
//! Encodes a double precision floating point value into IMM8 format.
|
||||
//!
|
||||
//! \note This function expects that `isFP64Imm8(val) == true` so it doesn't perform any checks of the value and just
|
||||
//! rearranges some bits into Imm8 order.
|
||||
static inline uint32_t encodeFP64ToImm8(uint64_t val) noexcept { return encodeFPToImm8Generic<uint64_t, 9, 6, 48>(val); }
|
||||
//! \overload
|
||||
static inline uint32_t encodeFP64ToImm8(double val) noexcept { return encodeFP64ToImm8(Support::bitCast<uint64_t>(val)); }
|
||||
|
||||
} // {Utils}
|
||||
|
||||
//! \}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_A64UTILS_H_INCLUDED
|
||||
|
||||
143
src/asmjit/arm/armformatter.cpp
Normal file
143
src/asmjit/arm/armformatter.cpp
Normal file
@@ -0,0 +1,143 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include "../core/api-build_p.h"
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
|
||||
#include "../core/misc_p.h"
|
||||
#include "../core/support.h"
|
||||
#include "../arm/armformatter_p.h"
|
||||
#include "../arm/armoperand.h"
|
||||
#include "../arm/a64instapi_p.h"
|
||||
#include "../arm/a64instdb_p.h"
|
||||
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
#include "../core/compiler.h"
|
||||
#endif
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
|
||||
|
||||
// arm::FormatterInternal - Format Feature
|
||||
// =======================================
|
||||
|
||||
Error FormatterInternal::formatFeature(String& sb, uint32_t featureId) noexcept {
|
||||
// @EnumStringBegin{"enum": "CpuFeatures::ARM", "output": "sFeature", "strip": "k"}@
|
||||
static const char sFeatureString[] =
|
||||
"None\0"
|
||||
"THUMB\0"
|
||||
"THUMBv2\0"
|
||||
"ARMv6\0"
|
||||
"ARMv7\0"
|
||||
"ARMv8a\0"
|
||||
"ARMv8_1a\0"
|
||||
"ARMv8_2a\0"
|
||||
"ARMv8_3a\0"
|
||||
"ARMv8_4a\0"
|
||||
"ARMv8_5a\0"
|
||||
"ARMv8_6a\0"
|
||||
"ARMv8_7a\0"
|
||||
"VFPv2\0"
|
||||
"VFPv3\0"
|
||||
"VFPv4\0"
|
||||
"VFP_D32\0"
|
||||
"AES\0"
|
||||
"ALTNZCV\0"
|
||||
"ASIMD\0"
|
||||
"BF16\0"
|
||||
"BTI\0"
|
||||
"CPUID\0"
|
||||
"CRC32\0"
|
||||
"DGH\0"
|
||||
"DIT\0"
|
||||
"DOTPROD\0"
|
||||
"EDSP\0"
|
||||
"FCMA\0"
|
||||
"FJCVTZS\0"
|
||||
"FLAGM\0"
|
||||
"FP16CONV\0"
|
||||
"FP16FML\0"
|
||||
"FP16FULL\0"
|
||||
"FRINT\0"
|
||||
"I8MM\0"
|
||||
"IDIVA\0"
|
||||
"IDIVT\0"
|
||||
"LSE\0"
|
||||
"MTE\0"
|
||||
"RCPC_IMMO\0"
|
||||
"RDM\0"
|
||||
"PMU\0"
|
||||
"PMULL\0"
|
||||
"RNG\0"
|
||||
"SB\0"
|
||||
"SHA1\0"
|
||||
"SHA2\0"
|
||||
"SHA3\0"
|
||||
"SHA512\0"
|
||||
"SM3\0"
|
||||
"SM4\0"
|
||||
"SSBS\0"
|
||||
"SVE\0"
|
||||
"SVE_BF16\0"
|
||||
"SVE_F32MM\0"
|
||||
"SVE_F64MM\0"
|
||||
"SVE_I8MM\0"
|
||||
"SVE_PMULL\0"
|
||||
"SVE2\0"
|
||||
"SVE2_AES\0"
|
||||
"SVE2_BITPERM\0"
|
||||
"SVE2_SHA3\0"
|
||||
"SVE2_SM4\0"
|
||||
"TME\0"
|
||||
"<Unknown>\0";
|
||||
|
||||
static const uint16_t sFeatureIndex[] = {
|
||||
0, 5, 11, 19, 25, 31, 38, 47, 56, 65, 74, 83, 92, 101, 107, 113, 119, 127,
|
||||
131, 139, 145, 150, 154, 160, 166, 170, 174, 182, 187, 192, 200, 206, 215,
|
||||
223, 232, 238, 243, 249, 255, 259, 263, 273, 277, 281, 287, 291, 294, 299,
|
||||
304, 309, 316, 320, 324, 329, 333, 342, 352, 362, 371, 381, 386, 395, 408,
|
||||
418, 427, 431
|
||||
};
|
||||
// @EnumStringEnd@
|
||||
|
||||
return sb.append(sFeatureString + sFeatureIndex[Support::min<uint32_t>(featureId, uint32_t(CpuFeatures::ARM::kMaxValue) + 1)]);
|
||||
}
|
||||
|
||||
// arm::FormatterInternal - Format Constants
|
||||
// =========================================
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatCondCode(String& sb, CondCode cc) noexcept {
|
||||
static const char condCodeData[] =
|
||||
"al\0" "na\0"
|
||||
"eq\0" "ne\0"
|
||||
"cs\0" "cc\0" "mi\0" "pl\0" "vs\0" "vc\0"
|
||||
"hi\0" "ls\0" "ge\0" "lt\0" "gt\0" "le\0"
|
||||
"<Unknown>";
|
||||
return sb.append(condCodeData + Support::min<uint32_t>(uint32_t(cc), 16u) * 3);
|
||||
}
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatShiftOp(String& sb, ShiftOp shiftOp) noexcept {
|
||||
const char* str = "<Unknown>";
|
||||
switch (shiftOp) {
|
||||
case ShiftOp::kLSL: str = "lsl"; break;
|
||||
case ShiftOp::kLSR: str = "lsr"; break;
|
||||
case ShiftOp::kASR: str = "asr"; break;
|
||||
case ShiftOp::kROR: str = "ror"; break;
|
||||
case ShiftOp::kRRX: str = "rrx"; break;
|
||||
case ShiftOp::kMSL: str = "msl"; break;
|
||||
case ShiftOp::kUXTB: str = "uxtb"; break;
|
||||
case ShiftOp::kUXTH: str = "uxth"; break;
|
||||
case ShiftOp::kUXTW: str = "uxtw"; break;
|
||||
case ShiftOp::kUXTX: str = "uxtx"; break;
|
||||
case ShiftOp::kSXTB: str = "sxtb"; break;
|
||||
case ShiftOp::kSXTH: str = "sxth"; break;
|
||||
case ShiftOp::kSXTW: str = "sxtw"; break;
|
||||
case ShiftOp::kSXTX: str = "sxtx"; break;
|
||||
}
|
||||
return sb.append(str);
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_LOGGING
|
||||
44
src/asmjit/arm/armformatter_p.h
Normal file
44
src/asmjit/arm/armformatter_p.h
Normal file
@@ -0,0 +1,44 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
|
||||
#define ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
|
||||
|
||||
#include "../core/api-config.h"
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
|
||||
#include "../core/formatter.h"
|
||||
#include "../core/string.h"
|
||||
#include "../arm/armglobals.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \addtogroup asmjit_arm
|
||||
//! \{
|
||||
|
||||
namespace FormatterInternal {
|
||||
|
||||
Error ASMJIT_CDECL formatFeature(
|
||||
String& sb,
|
||||
uint32_t featureId) noexcept;
|
||||
|
||||
Error ASMJIT_CDECL formatCondCode(
|
||||
String& sb,
|
||||
CondCode cc) noexcept;
|
||||
|
||||
Error ASMJIT_CDECL formatShiftOp(
|
||||
String& sb,
|
||||
ShiftOp shiftOp) noexcept;
|
||||
|
||||
} // {FormatterInternal}
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_LOGGING
|
||||
#endif // ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
|
||||
21
src/asmjit/arm/armglobals.h
Normal file
21
src/asmjit/arm/armglobals.h
Normal file
@@ -0,0 +1,21 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
|
||||
#define ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
|
||||
|
||||
#include "../core/archcommons.h"
|
||||
#include "../core/inst.h"
|
||||
|
||||
//! \namespace asmjit::arm
|
||||
//! \ingroup asmjit_arm
|
||||
//!
|
||||
//! API shared between AArch32 & AArch64 backends.
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
|
||||
596
src/asmjit/arm/armoperand.h
Normal file
596
src/asmjit/arm/armoperand.h
Normal file
@@ -0,0 +1,596 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#ifndef ASMJIT_ARM_ARMOPERAND_H_INCLUDED
|
||||
#define ASMJIT_ARM_ARMOPERAND_H_INCLUDED
|
||||
|
||||
#include "../core/archtraits.h"
|
||||
#include "../core/operand.h"
|
||||
#include "../core/type.h"
|
||||
#include "../arm/armglobals.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
|
||||
|
||||
//! \addtogroup asmjit_arm
|
||||
//! \{
|
||||
|
||||
class Reg;
|
||||
class Mem;
|
||||
|
||||
class Gp;
|
||||
class GpW;
|
||||
class GpX;
|
||||
|
||||
class Vec;
|
||||
class VecB;
|
||||
class VecH;
|
||||
class VecS;
|
||||
class VecD;
|
||||
class VecV;
|
||||
|
||||
//! Register traits (ARM/AArch64).
|
||||
//!
|
||||
//! Register traits contains information about a particular register type. It's used by asmjit to setup register
|
||||
//! information on-the-fly and to populate tables that contain register information (this way it's possible to
|
||||
//! change register types and groups without having to reorder these tables).
|
||||
template<RegType kRegType>
|
||||
struct RegTraits : public BaseRegTraits {};
|
||||
|
||||
//! \cond
|
||||
// <--------------------+-----+-------------------------+------------------------+---+---+------------------+
|
||||
// | Reg | Reg-Type | Reg-Group |Sz |Cnt| TypeId |
|
||||
// <--------------------+-----+-------------------------+------------------------+---+---+------------------+
|
||||
ASMJIT_DEFINE_REG_TRAITS(GpW , RegType::kARM_GpW , RegGroup::kGp , 4 , 32, TypeId::kInt32 );
|
||||
ASMJIT_DEFINE_REG_TRAITS(GpX , RegType::kARM_GpX , RegGroup::kGp , 8 , 32, TypeId::kInt64 );
|
||||
ASMJIT_DEFINE_REG_TRAITS(VecB , RegType::kARM_VecB , RegGroup::kVec , 1 , 32, TypeId::kVoid );
|
||||
ASMJIT_DEFINE_REG_TRAITS(VecH , RegType::kARM_VecH , RegGroup::kVec , 2 , 32, TypeId::kVoid );
|
||||
ASMJIT_DEFINE_REG_TRAITS(VecS , RegType::kARM_VecS , RegGroup::kVec , 4 , 32, TypeId::kInt32x1 );
|
||||
ASMJIT_DEFINE_REG_TRAITS(VecD , RegType::kARM_VecD , RegGroup::kVec , 8 , 32, TypeId::kInt32x2 );
|
||||
ASMJIT_DEFINE_REG_TRAITS(VecV , RegType::kARM_VecV , RegGroup::kVec , 16, 32, TypeId::kInt32x4 );
|
||||
//! \endcond
|
||||
|
||||
//! Register (ARM).
|
||||
class Reg : public BaseReg {
|
||||
public:
|
||||
ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg)
|
||||
|
||||
//! Gets whether the register is a `R|W` register (32-bit).
|
||||
inline constexpr bool isGpW() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
|
||||
//! Gets whether the register is an `X` register (64-bit).
|
||||
inline constexpr bool isGpX() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpX>::kSignature; }
|
||||
//! Gets whether the register is a VEC-B register (8-bit).
|
||||
inline constexpr bool isVecB() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
|
||||
//! Gets whether the register is a VEC-H register (16-bit).
|
||||
inline constexpr bool isVecH() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
|
||||
//! Gets whether the register is a VEC-S register (32-bit).
|
||||
inline constexpr bool isVecS() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
|
||||
//! Gets whether the register is a VEC-D register (64-bit).
|
||||
inline constexpr bool isVecD() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
|
||||
//! Gets whether the register is a VEC-Q register (128-bit).
|
||||
inline constexpr bool isVecQ() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
|
||||
|
||||
//! Gets whether the register is either VEC-D (64-bit) or VEC-Q (128-bit).
|
||||
inline constexpr bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; }
|
||||
|
||||
//! Gets whether the register is a VEC-V register (128-bit).
|
||||
inline constexpr bool isVecV() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
|
||||
|
||||
template<RegType kRegType>
|
||||
inline void setRegT(uint32_t id) noexcept {
|
||||
setSignature(RegTraits<kRegType>::kSignature);
|
||||
setId(id);
|
||||
}
|
||||
|
||||
inline void setTypeAndId(RegType type, uint32_t id) noexcept {
|
||||
setSignature(signatureOf(type));
|
||||
setId(id);
|
||||
}
|
||||
|
||||
static inline RegGroup groupOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToGroup(type); }
|
||||
static inline TypeId typeIdOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToTypeId(type); }
|
||||
static inline OperandSignature signatureOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToSignature(type); }
|
||||
|
||||
template<RegType kRegType>
|
||||
static inline RegGroup groupOfT() noexcept { return RegTraits<kRegType>::kGroup; }
|
||||
|
||||
template<RegType kRegType>
|
||||
static inline TypeId typeIdOfT() noexcept { return RegTraits<kRegType>::kTypeId; }
|
||||
|
||||
template<RegType kRegType>
|
||||
static inline OperandSignature signatureOfT() noexcept { return RegTraits<kRegType>::kSignature; }
|
||||
|
||||
static inline bool isGpW(const Operand_& op) noexcept { return op.as<Reg>().isGpW(); }
|
||||
static inline bool isGpX(const Operand_& op) noexcept { return op.as<Reg>().isGpX(); }
|
||||
static inline bool isVecB(const Operand_& op) noexcept { return op.as<Reg>().isVecB(); }
|
||||
static inline bool isVecH(const Operand_& op) noexcept { return op.as<Reg>().isVecH(); }
|
||||
static inline bool isVecS(const Operand_& op) noexcept { return op.as<Reg>().isVecS(); }
|
||||
static inline bool isVecD(const Operand_& op) noexcept { return op.as<Reg>().isVecD(); }
|
||||
static inline bool isVecQ(const Operand_& op) noexcept { return op.as<Reg>().isVecQ(); }
|
||||
static inline bool isVecV(const Operand_& op) noexcept { return op.as<Reg>().isVecV(); }
|
||||
|
||||
static inline bool isGpW(const Operand_& op, uint32_t id) noexcept { return isGpW(op) & (op.id() == id); }
|
||||
static inline bool isGpX(const Operand_& op, uint32_t id) noexcept { return isGpX(op) & (op.id() == id); }
|
||||
static inline bool isVecB(const Operand_& op, uint32_t id) noexcept { return isVecB(op) & (op.id() == id); }
|
||||
static inline bool isVecH(const Operand_& op, uint32_t id) noexcept { return isVecH(op) & (op.id() == id); }
|
||||
static inline bool isVecS(const Operand_& op, uint32_t id) noexcept { return isVecS(op) & (op.id() == id); }
|
||||
static inline bool isVecD(const Operand_& op, uint32_t id) noexcept { return isVecD(op) & (op.id() == id); }
|
||||
static inline bool isVecQ(const Operand_& op, uint32_t id) noexcept { return isVecQ(op) & (op.id() == id); }
|
||||
static inline bool isVecV(const Operand_& op, uint32_t id) noexcept { return isVecV(op) & (op.id() == id); }
|
||||
};
|
||||
|
||||
//! General purpose register (ARM).
|
||||
class Gp : public Reg {
|
||||
public:
|
||||
ASMJIT_DEFINE_ABSTRACT_REG(Gp, Reg)
|
||||
|
||||
//! Special register id.
|
||||
enum Id : uint32_t {
|
||||
//! Register that depends on OS, could be used as TLS offset.
|
||||
kIdOs = 18,
|
||||
//! Frame pointer.
|
||||
kIdFp = 29,
|
||||
//! Link register.
|
||||
kIdLr = 30,
|
||||
//! Stack register id.
|
||||
kIdSp = 31,
|
||||
//! Zero register id.
|
||||
//!
|
||||
//! Although zero register has the same id as stack register it has a special treatment, because we need to be
|
||||
//! able to distinguish between these two at API level. Some intructions were designed to be used with SP and
|
||||
//! some other with ZR - so we need a way to distinguish these two to make sure we emit the right thing.
|
||||
//!
|
||||
//! The number 63 is not random, when you perform `id & 31` you would always get 31 for both SP and ZR inputs,
|
||||
//! which is the identifier used by AArch64 ISA to encode either SP or ZR depending on the instruction.
|
||||
kIdZr = 63
|
||||
};
|
||||
|
||||
inline constexpr bool isZR() const noexcept { return id() == kIdZr; }
|
||||
inline constexpr bool isSP() const noexcept { return id() == kIdSp; }
|
||||
|
||||
//! Cast this register to a 32-bit R|W.
|
||||
inline GpW w() const noexcept;
|
||||
//! Cast this register to a 64-bit X.
|
||||
inline GpX x() const noexcept;
|
||||
};
|
||||
|
||||
//! Vector register (ARM).
|
||||
class Vec : public Reg {
|
||||
public:
|
||||
ASMJIT_DEFINE_ABSTRACT_REG(Vec, Reg)
|
||||
|
||||
//! Additional signature bits used by arm::Vec.
|
||||
enum AdditionalBits : uint32_t {
|
||||
// Register element type (3 bits).
|
||||
// |........|........|.XXX....|........|
|
||||
kSignatureRegElementTypeShift = 12,
|
||||
kSignatureRegElementTypeMask = 0x07 << kSignatureRegElementTypeShift,
|
||||
|
||||
// Register has element index (1 bit).
|
||||
// |........|........|X.......|........|
|
||||
kSignatureRegElementFlagShift = 15,
|
||||
kSignatureRegElementFlagMask = 0x01 << kSignatureRegElementFlagShift,
|
||||
|
||||
// Register element index (4 bits).
|
||||
// |........|....XXXX|........|........|
|
||||
kSignatureRegElementIndexShift = 16,
|
||||
kSignatureRegElementIndexMask = 0x0F << kSignatureRegElementIndexShift
|
||||
};
|
||||
|
||||
//! Element type.
|
||||
enum ElementType : uint32_t {
|
||||
//! No element type specified.
|
||||
kElementTypeNone = 0,
|
||||
//! Byte elements (B8 or B16).
|
||||
kElementTypeB,
|
||||
//! Halfword elements (H4 or H8).
|
||||
kElementTypeH,
|
||||
//! Singleword elements (S2 or S4).
|
||||
kElementTypeS,
|
||||
//! Doubleword elements (D2).
|
||||
kElementTypeD,
|
||||
//! Byte elements grouped by 4 bytes (B4).
|
||||
//!
|
||||
//! \note This element-type is only used by few instructions.
|
||||
kElementTypeB4,
|
||||
//! Halfword elements grouped by 2 halfwords (H2).
|
||||
//!
|
||||
//! \note This element-type is only used by few instructions.
|
||||
kElementTypeH2,
|
||||
|
||||
//! Count of element types.
|
||||
kElementTypeCount
|
||||
};
|
||||
|
||||
//! \cond
|
||||
//! Shortcuts.
|
||||
enum SignatureReg : uint32_t {
|
||||
kSignatureElementB = kElementTypeB << kSignatureRegElementTypeShift,
|
||||
kSignatureElementH = kElementTypeH << kSignatureRegElementTypeShift,
|
||||
kSignatureElementS = kElementTypeS << kSignatureRegElementTypeShift,
|
||||
kSignatureElementD = kElementTypeD << kSignatureRegElementTypeShift,
|
||||
kSignatureElementB4 = kElementTypeB4 << kSignatureRegElementTypeShift,
|
||||
kSignatureElementH2 = kElementTypeH2 << kSignatureRegElementTypeShift
|
||||
};
|
||||
//! \endcond
|
||||
|
||||
//! Returns whether the register has associated an element type.
|
||||
inline constexpr bool hasElementType() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask>(); }
|
||||
//! Returns whether the register has element index (it's an element index access).
|
||||
inline constexpr bool hasElementIndex() const noexcept { return _signature.hasField<kSignatureRegElementFlagMask>(); }
|
||||
//! Returns whether the reggister has element type or element index (or both).
|
||||
inline constexpr bool hasElementTypeOrIndex() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>(); }
|
||||
|
||||
//! Returns element type of the register.
|
||||
inline constexpr uint32_t elementType() const noexcept { return _signature.getField<kSignatureRegElementTypeMask>(); }
|
||||
//! Sets element type of the register to `elementType`.
|
||||
inline void setElementType(uint32_t elementType) noexcept { _signature.setField<kSignatureRegElementTypeMask>(elementType); }
|
||||
//! Resets element type to none.
|
||||
inline void resetElementType() noexcept { _signature.setField<kSignatureRegElementTypeMask>(0); }
|
||||
|
||||
//! Returns element index of the register.
|
||||
inline constexpr uint32_t elementIndex() const noexcept { return _signature.getField<kSignatureRegElementIndexMask>(); }
|
||||
//! Sets element index of the register to `elementType`.
|
||||
inline void setElementIndex(uint32_t elementIndex) noexcept {
|
||||
_signature |= kSignatureRegElementFlagMask;
|
||||
_signature.setField<kSignatureRegElementIndexMask>(elementIndex);
|
||||
}
|
||||
//! Resets element index of the register.
|
||||
inline void resetElementIndex() noexcept {
|
||||
_signature &= ~(kSignatureRegElementFlagMask | kSignatureRegElementIndexMask);
|
||||
}
|
||||
|
||||
inline constexpr bool isVecB8() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementB); }
|
||||
inline constexpr bool isVecH4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementH); }
|
||||
inline constexpr bool isVecS2() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementS); }
|
||||
inline constexpr bool isVecD1() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature); }
|
||||
|
||||
inline constexpr bool isVecB16() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB); }
|
||||
inline constexpr bool isVecH8() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH); }
|
||||
inline constexpr bool isVecS4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementS); }
|
||||
inline constexpr bool isVecD2() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementD); }
|
||||
inline constexpr bool isVecB4x4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB4); }
|
||||
inline constexpr bool isVecH2x4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH2); }
|
||||
|
||||
//! Creates a cloned register with element access.
|
||||
inline Vec at(uint32_t elementIndex) const noexcept {
|
||||
return Vec((signature() & ~kSignatureRegElementIndexMask) | (elementIndex << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id());
|
||||
}
|
||||
|
||||
//! Cast this register to an 8-bit B register (scalar).
|
||||
inline VecB b() const noexcept;
|
||||
//! Cast this register to a 16-bit H register (scalar).
|
||||
inline VecH h() const noexcept;
|
||||
//! Cast this register to a 32-bit S register (scalar).
|
||||
inline VecS s() const noexcept;
|
||||
//! Cast this register to a 64-bit D register (scalar).
|
||||
inline VecD d() const noexcept;
|
||||
//! Cast this register to a 128-bit Q register (scalar).
|
||||
inline VecV q() const noexcept;
|
||||
//! Cast this register to a 128-bit V register.
|
||||
inline VecV v() const noexcept;
|
||||
|
||||
//! Cast this register to a 128-bit V.B[elementIndex] register.
|
||||
inline VecV b(uint32_t elementIndex) const noexcept;
|
||||
//! Cast this register to a 128-bit V.H[elementIndex] register.
|
||||
inline VecV h(uint32_t elementIndex) const noexcept;
|
||||
//! Cast this register to a 128-bit V.S[elementIndex] register.
|
||||
inline VecV s(uint32_t elementIndex) const noexcept;
|
||||
//! Cast this register to a 128-bit V.D[elementIndex] register.
|
||||
inline VecV d(uint32_t elementIndex) const noexcept;
|
||||
//! Cast this register to a 128-bit V.H2[elementIndex] register.
|
||||
inline VecV h2(uint32_t elementIndex) const noexcept;
|
||||
//! Cast this register to a 128-bit V.B4[elementIndex] register.
|
||||
inline VecV b4(uint32_t elementIndex) const noexcept;
|
||||
|
||||
//! Cast this register to V.8B.
|
||||
inline VecD b8() const noexcept;
|
||||
//! Cast this register to V.16B.
|
||||
inline VecV b16() const noexcept;
|
||||
//! Cast this register to V.2H.
|
||||
inline VecS h2() const noexcept;
|
||||
//! Cast this register to V.4H.
|
||||
inline VecD h4() const noexcept;
|
||||
//! Cast this register to V.8H.
|
||||
inline VecV h8() const noexcept;
|
||||
//! Cast this register to V.2S.
|
||||
inline VecD s2() const noexcept;
|
||||
//! Cast this register to V.4S.
|
||||
inline VecV s4() const noexcept;
|
||||
//! Cast this register to V.2D.
|
||||
inline VecV d2() const noexcept;
|
||||
|
||||
static inline constexpr OperandSignature _makeElementAccessSignature(uint32_t elementType, uint32_t elementIndex) noexcept {
|
||||
return OperandSignature{
|
||||
uint32_t(RegTraits<RegType::kARM_VecV>::kSignature) |
|
||||
uint32_t(kSignatureRegElementFlagMask) |
|
||||
uint32_t(elementType << kSignatureRegElementTypeShift) |
|
||||
uint32_t(elementIndex << kSignatureRegElementIndexShift)};
|
||||
}
|
||||
};
|
||||
|
||||
//! 32-bit GPW (AArch64) and/or GPR (ARM/AArch32) register.
|
||||
class GpW : public Gp { ASMJIT_DEFINE_FINAL_REG(GpW, Gp, RegTraits<RegType::kARM_GpW>) };
|
||||
//! 64-bit GPX (AArch64) register.
|
||||
class GpX : public Gp { ASMJIT_DEFINE_FINAL_REG(GpX, Gp, RegTraits<RegType::kARM_GpX>) };
|
||||
|
||||
//! 8-bit view (S) of VFP/SIMD register.
|
||||
class VecB : public Vec { ASMJIT_DEFINE_FINAL_REG(VecB, Vec, RegTraits<RegType::kARM_VecB>) };
|
||||
//! 16-bit view (S) of VFP/SIMD register.
|
||||
class VecH : public Vec { ASMJIT_DEFINE_FINAL_REG(VecH, Vec, RegTraits<RegType::kARM_VecH>) };
|
||||
//! 32-bit view (S) of VFP/SIMD register.
|
||||
class VecS : public Vec { ASMJIT_DEFINE_FINAL_REG(VecS, Vec, RegTraits<RegType::kARM_VecS>) };
|
||||
//! 64-bit view (D) of VFP/SIMD register.
|
||||
class VecD : public Vec { ASMJIT_DEFINE_FINAL_REG(VecD, Vec, RegTraits<RegType::kARM_VecD>) };
|
||||
//! 128-bit vector register (Q or V).
|
||||
class VecV : public Vec { ASMJIT_DEFINE_FINAL_REG(VecV, Vec, RegTraits<RegType::kARM_VecV>) };
|
||||
|
||||
inline GpW Gp::w() const noexcept { return GpW(id()); }
|
||||
inline GpX Gp::x() const noexcept { return GpX(id()); }
|
||||
|
||||
inline VecB Vec::b() const noexcept { return VecB(id()); }
|
||||
inline VecH Vec::h() const noexcept { return VecH(id()); }
|
||||
inline VecS Vec::s() const noexcept { return VecS(id()); }
|
||||
inline VecD Vec::d() const noexcept { return VecD(id()); }
|
||||
inline VecV Vec::q() const noexcept { return VecV(id()); }
|
||||
inline VecV Vec::v() const noexcept { return VecV(id()); }
|
||||
|
||||
inline VecV Vec::b(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeB, elementIndex), id()); }
|
||||
inline VecV Vec::h(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeH, elementIndex), id()); }
|
||||
inline VecV Vec::s(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeS, elementIndex), id()); }
|
||||
inline VecV Vec::d(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeD, elementIndex), id()); }
|
||||
inline VecV Vec::h2(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeH2, elementIndex), id()); }
|
||||
inline VecV Vec::b4(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeB4, elementIndex), id()); }
|
||||
|
||||
inline VecD Vec::b8() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementB}, id()); }
|
||||
inline VecS Vec::h2() const noexcept { return VecS(OperandSignature{VecS::kSignature | kSignatureElementH}, id()); }
|
||||
inline VecD Vec::h4() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementH}, id()); }
|
||||
inline VecD Vec::s2() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementS}, id()); }
|
||||
inline VecV Vec::b16() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementB}, id()); }
|
||||
inline VecV Vec::h8() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementH}, id()); }
|
||||
inline VecV Vec::s4() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementS}, id()); }
|
||||
inline VecV Vec::d2() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementD}, id()); }
|
||||
|
||||
#ifndef _DOXYGEN
|
||||
namespace regs {
|
||||
#endif
|
||||
|
||||
//! Creates a 32-bit W register operand (ARM/AArch64).
|
||||
static inline constexpr GpW w(uint32_t id) noexcept { return GpW(id); }
|
||||
//! Creates a 64-bit X register operand (AArch64).
|
||||
static inline constexpr GpX x(uint32_t id) noexcept { return GpX(id); }
|
||||
//! Creates a 32-bit S register operand (ARM/AArch64).
|
||||
static inline constexpr VecS s(uint32_t id) noexcept { return VecS(id); }
|
||||
//! Creates a 64-bit D register operand (ARM/AArch64).
|
||||
static inline constexpr VecD d(uint32_t id) noexcept { return VecD(id); }
|
||||
//! Creates a 1282-bit V register operand (ARM/AArch64).
|
||||
static inline constexpr VecV v(uint32_t id) noexcept { return VecV(id); }
|
||||
|
||||
#ifndef _DOXYGEN
|
||||
} // {regs}
|
||||
|
||||
// Make `arm::regs` accessible through `arm` namespace as well.
|
||||
using namespace regs;
|
||||
#endif
|
||||
|
||||
//! Memory operand (ARM).
|
||||
class Mem : public BaseMem {
|
||||
public:
|
||||
//! \cond INTERNAL
|
||||
//! Additional bits of operand's signature used by `arm::Mem`.
|
||||
enum AdditionalBits : uint32_t {
|
||||
// Index shift value (5 bits).
|
||||
// |........|.....XXX|XX......|........|
|
||||
kSignatureMemShiftValueShift = 14,
|
||||
kSignatureMemShiftValueMask = 0x1Fu << kSignatureMemShiftValueShift,
|
||||
|
||||
// Shift operation type (4 bits).
|
||||
// |........|XXXX....|........|........|
|
||||
kSignatureMemPredicateShift = 20,
|
||||
kSignatureMemPredicateMask = 0x0Fu << kSignatureMemPredicateShift
|
||||
};
|
||||
//! \endcond
|
||||
|
||||
//! Memory offset mode.
|
||||
//!
|
||||
//! Additional constants that can be used with the `predicate`.
|
||||
enum OffsetMode : uint32_t {
|
||||
//! Pre-index "[BASE, #Offset {, <shift>}]!" with write-back.
|
||||
kOffsetPreIndex = 0xE,
|
||||
//! Post-index "[BASE], #Offset {, <shift>}" with write-back.
|
||||
kOffsetPostIndex = 0xF
|
||||
};
|
||||
|
||||
//! \name Construction & Destruction
|
||||
//! \{
|
||||
|
||||
//! Construct a default `Mem` operand, that points to [0].
|
||||
inline constexpr Mem() noexcept
|
||||
: BaseMem() {}
|
||||
|
||||
inline constexpr Mem(const Mem& other) noexcept
|
||||
: BaseMem(other) {}
|
||||
|
||||
inline explicit Mem(Globals::NoInit_) noexcept
|
||||
: BaseMem(Globals::NoInit) {}
|
||||
|
||||
inline constexpr Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept
|
||||
: BaseMem(signature, baseId, indexId, offset) {}
|
||||
|
||||
inline constexpr explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
|
||||
: BaseMem(Signature::fromOpType(OperandType::kMem) |
|
||||
Signature::fromMemBaseType(RegType::kLabelTag) |
|
||||
signature, base.id(), 0, off) {}
|
||||
|
||||
inline constexpr explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
|
||||
: BaseMem(Signature::fromOpType(OperandType::kMem) |
|
||||
Signature::fromMemBaseType(base.type()) |
|
||||
signature, base.id(), 0, off) {}
|
||||
|
||||
inline constexpr Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept
|
||||
: BaseMem(Signature::fromOpType(OperandType::kMem) |
|
||||
Signature::fromMemBaseType(base.type()) |
|
||||
Signature::fromMemIndexType(index.type()) |
|
||||
signature, base.id(), index.id(), 0) {}
|
||||
|
||||
inline constexpr Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept
|
||||
: BaseMem(Signature::fromOpType(OperandType::kMem) |
|
||||
Signature::fromMemBaseType(base.type()) |
|
||||
Signature::fromMemIndexType(index.type()) |
|
||||
Signature::fromValue<kSignatureMemPredicateMask>(uint32_t(shift.op())) |
|
||||
Signature::fromValue<kSignatureMemShiftValueMask>(shift.value()) |
|
||||
signature, base.id(), index.id(), 0) {}
|
||||
|
||||
inline constexpr Mem(uint64_t base, Signature signature = Signature{0}) noexcept
|
||||
: BaseMem(Signature::fromOpType(OperandType::kMem) |
|
||||
signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {}
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Overloaded Operators
|
||||
//! \{
|
||||
|
||||
inline Mem& operator=(const Mem& other) noexcept = default;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name ARM Specific Features
|
||||
//! \{
|
||||
|
||||
//! Clones the memory operand.
|
||||
inline constexpr Mem clone() const noexcept { return Mem(*this); }
|
||||
//! Gets new memory operand adjusted by `off`.
|
||||
inline Mem cloneAdjusted(int64_t off) const noexcept {
|
||||
Mem result(*this);
|
||||
result.addOffset(off);
|
||||
return result;
|
||||
}
|
||||
|
||||
using BaseMem::setIndex;
|
||||
|
||||
inline void setIndex(const BaseReg& index, uint32_t shift) noexcept {
|
||||
setIndex(index);
|
||||
setShift(shift);
|
||||
}
|
||||
|
||||
//! Gets whether the memory operand has shift (aka scale) constant.
|
||||
inline constexpr bool hasShift() const noexcept { return _signature.hasField<kSignatureMemShiftValueMask>(); }
|
||||
//! Gets the memory operand's shift (aka scale) constant.
|
||||
inline constexpr uint32_t shift() const noexcept { return _signature.getField<kSignatureMemShiftValueMask>(); }
|
||||
//! Sets the memory operand's shift (aka scale) constant.
|
||||
inline void setShift(uint32_t shift) noexcept { _signature.setField<kSignatureMemShiftValueMask>(shift); }
|
||||
//! Resets the memory operand's shift (aka scale) constant to zero.
|
||||
inline void resetShift() noexcept { _signature.setField<kSignatureMemShiftValueMask>(0); }
|
||||
|
||||
//! Gets memory predicate (shift mode or offset mode), see \ref ShiftOp and \ref OffsetMode.
|
||||
inline constexpr uint32_t predicate() const noexcept { return _signature.getField<kSignatureMemPredicateMask>(); }
|
||||
//! Sets memory predicate to `predicate`, see `Mem::ShiftOp`.
|
||||
inline void setPredicate(uint32_t predicate) noexcept { _signature.setField<kSignatureMemPredicateMask>(predicate); }
|
||||
//! Resets shift mode to LSL (default).
|
||||
inline void resetPredicate() noexcept { _signature.setField<kSignatureMemPredicateMask>(0); }
|
||||
|
||||
inline constexpr bool isFixedOffset() const noexcept { return predicate() < kOffsetPreIndex; }
|
||||
inline constexpr bool isPreOrPost() const noexcept { return predicate() >= kOffsetPreIndex; }
|
||||
inline constexpr bool isPreIndex() const noexcept { return predicate() == kOffsetPreIndex; }
|
||||
inline constexpr bool isPostIndex() const noexcept { return predicate() == kOffsetPostIndex; }
|
||||
|
||||
inline void resetToFixedOffset() noexcept { resetPredicate(); }
|
||||
inline void makePreIndex() noexcept { setPredicate(kOffsetPreIndex); }
|
||||
inline void makePostIndex() noexcept { setPredicate(kOffsetPostIndex); }
|
||||
|
||||
inline Mem pre() const noexcept {
|
||||
Mem result(*this);
|
||||
result.setPredicate(kOffsetPreIndex);
|
||||
return result;
|
||||
}
|
||||
|
||||
inline Mem pre(int64_t off) const noexcept {
|
||||
Mem result(*this);
|
||||
result.setPredicate(kOffsetPreIndex);
|
||||
result.addOffset(off);
|
||||
return result;
|
||||
}
|
||||
|
||||
inline Mem post() const noexcept {
|
||||
Mem result(*this);
|
||||
result.setPredicate(kOffsetPreIndex);
|
||||
return result;
|
||||
}
|
||||
|
||||
inline Mem post(int64_t off) const noexcept {
|
||||
Mem result(*this);
|
||||
result.setPredicate(kOffsetPostIndex);
|
||||
result.addOffset(off);
|
||||
return result;
|
||||
}
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
//! Creates `[base.reg, offset]` memory operand (offset mode).
|
||||
static inline constexpr Mem ptr(const Gp& base, int32_t offset = 0) noexcept {
|
||||
return Mem(base, offset);
|
||||
}
|
||||
|
||||
//! Creates `[base.reg, offset]!` memory operand (pre-index mode).
|
||||
static inline constexpr Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept {
|
||||
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPreIndex));
|
||||
}
|
||||
|
||||
//! Creates `[base.reg], offset` memory operand (post-index mode).
|
||||
static inline constexpr Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept {
|
||||
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPostIndex));
|
||||
}
|
||||
|
||||
//! Creates `[base.reg, index]` memory operand.
|
||||
static inline constexpr Mem ptr(const Gp& base, const Gp& index) noexcept {
|
||||
return Mem(base, index);
|
||||
}
|
||||
|
||||
//! Creates `[base.reg], index` memory operand (post-index mode).
|
||||
static inline constexpr Mem ptr_post(const Gp& base, const Gp& index) noexcept {
|
||||
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPostIndex));
|
||||
}
|
||||
|
||||
//! Creates `[base.reg, index, SHIFT_OP #shift]` memory operand.
|
||||
static inline constexpr Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept {
|
||||
return Mem(base, index, shift);
|
||||
}
|
||||
|
||||
//! Creates `[base + offset]` memory operand.
|
||||
static inline constexpr Mem ptr(const Label& base, int32_t offset = 0) noexcept {
|
||||
return Mem(base, offset);
|
||||
}
|
||||
|
||||
// TODO: [ARM] PC + offset address.
|
||||
#if 0
|
||||
//! Creates `[PC + offset]` (relative) memory operand.
|
||||
static inline constexpr Mem ptr(const PC& pc, int32_t offset = 0) noexcept {
|
||||
return Mem(pc, offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
//! Creates `[base]` absolute memory operand.
|
||||
//!
|
||||
//! \note The concept of absolute memory operands doesn't exist on ARM, the ISA only provides PC relative addressing.
|
||||
//! Absolute memory operands can only be used if it's known that the PC relative offset is encodable and that it
|
||||
//! would be within the limits. Absolute address is also often output from disassemblers, so AsmJit support it so it
|
||||
//! can assemble it back.
|
||||
static inline constexpr Mem ptr(uint64_t base) noexcept { return Mem(base); }
|
||||
|
||||
//! \}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
//! \cond INTERNAL
|
||||
ASMJIT_BEGIN_NAMESPACE
|
||||
ASMJIT_DEFINE_TYPE_ID(arm::GpW, TypeId::kInt32);
|
||||
ASMJIT_DEFINE_TYPE_ID(arm::GpX, TypeId::kInt64);
|
||||
ASMJIT_DEFINE_TYPE_ID(arm::VecS, TypeId::kFloat32x1);
|
||||
ASMJIT_DEFINE_TYPE_ID(arm::VecD, TypeId::kFloat64x1);
|
||||
ASMJIT_DEFINE_TYPE_ID(arm::VecV, TypeId::kInt32x4);
|
||||
ASMJIT_END_NAMESPACE
|
||||
//! \endcond
|
||||
|
||||
#endif // ASMJIT_ARM_ARMOPERAND_H_INCLUDED
|
||||
@@ -26,9 +26,9 @@ namespace asmjit {
|
||||
//!
|
||||
//! \note It's important to understand that in order to learn AsmJit all groups are important. Some groups can be
|
||||
//! omitted if a particular tool is out of interest - for example \ref asmjit_assembler users don't need to know
|
||||
//! about \ref asmjit_builder, but it's not the opposite. \ref asmjit_builder users must know about \ref
|
||||
//! about \ref asmjit_builder, but it's not the opposite. \ref asmjit_builder users should know about \ref
|
||||
//! asmjit_assembler as it also uses operands, labels, and other concepts. Similarly \ref asmjit_compiler users
|
||||
//! must know how both \ref asmjit_assembler and \ref asmjit_builder tools work.
|
||||
//! should know how both \ref asmjit_assembler and \ref asmjit_builder tools work.
|
||||
//!
|
||||
//! \section where_to_start Where To Start
|
||||
//!
|
||||
@@ -70,13 +70,13 @@ namespace asmjit {
|
||||
//!
|
||||
//! - Make sure you put a breakpoint into \ref DebugUtils::errored() function if you have a problem with AsmJit
|
||||
//! returning errors during instruction encoding or register allocation. Having an active breakpoint there can
|
||||
//! help to reveal the origin of the error, to inspect variables and other conditions that caused to it.
|
||||
//! help to reveal the origin of the error, to inspect variables and other conditions that caused it.
|
||||
//!
|
||||
//! The reason for using \ref Logger and \ref ErrorHandler is that they provide a very useful information about what's
|
||||
//! happening inside emitters. In many cases the information provided by these two is crucial to quickly fix issues
|
||||
//! that happen during development (for example wrong instruction, address, or register used). In addition, output from
|
||||
//! \ref Logger is always necessary when filling bug reports. In other words, using logging and proper error handling
|
||||
//! can save a lot of time during the development.
|
||||
//! happening inside emitters. In many cases the information provided by these two is crucial to quickly identify and
|
||||
//! fix issues that happen during development (for example wrong instruction, address, or register used). In addition,
|
||||
//! output from \ref Logger is always necessary when filling bug reports. In other words, using logging and proper error
|
||||
//! handling can save a lot of time during the development and can also save users from submitting issues.
|
||||
//!
|
||||
//! \section main_other Other Pages
|
||||
//!
|
||||
@@ -147,7 +147,9 @@ namespace asmjit {
|
||||
//!
|
||||
//! ### Supported Backends / Architectures
|
||||
//!
|
||||
//! - **X86** - Both 32-bit and 64-bit backends tested on our CI.
|
||||
//! - **X86** and **X86_64** - Both 32-bit and 64-bit backends tested on CI.
|
||||
//! - **AArch64** - AArch64 backend is currently only partially tested (there is no native AArch64 runner to test
|
||||
//! AsmJit Builder/Compiler)
|
||||
//!
|
||||
//! ### Static Builds and Embedding
|
||||
//!
|
||||
@@ -1807,6 +1809,14 @@ namespace asmjit {
|
||||
//! \brief X86/X64 backend.
|
||||
|
||||
|
||||
//! \defgroup asmjit_arm ARM Commons
|
||||
//! \brief ARM commons shared between AArch32 and AArch64.
|
||||
|
||||
|
||||
//! \defgroup asmjit_a64 AArch64 Backend
|
||||
//! \brief AArch64 backend.
|
||||
|
||||
|
||||
//! \cond INTERNAL
|
||||
//! \defgroup asmjit_ra RA
|
||||
//! \brief Register allocator internals.
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
//! \{
|
||||
|
||||
//! AsmJit library version in `(Major << 16) | (Minor << 8) | (Patch)` format.
|
||||
#define ASMJIT_LIBRARY_VERSION 0x010800 /* 1.8.0 */
|
||||
#define ASMJIT_LIBRARY_VERSION 0x010900 /* 1.9.0 */
|
||||
|
||||
//! \def ASMJIT_ABI_NAMESPACE
|
||||
//!
|
||||
@@ -24,7 +24,7 @@
|
||||
//! default, which makes it possible to use use multiple AsmJit libraries within a single project, totally controlled
|
||||
//! by the users. This is useful especially in cases in which some of such library comes from a third party.
|
||||
#ifndef ASMJIT_ABI_NAMESPACE
|
||||
#define ASMJIT_ABI_NAMESPACE _abi_1_8
|
||||
#define ASMJIT_ABI_NAMESPACE _abi_1_9
|
||||
#endif
|
||||
|
||||
//! \}
|
||||
@@ -79,6 +79,12 @@ namespace asmjit {
|
||||
//! Disables X86/X64 backends.
|
||||
#define ASMJIT_NO_X86
|
||||
|
||||
//! Disables AArch32 backends (both ARM and Thumb).
|
||||
#define ASMJIT_NO_AARCH32
|
||||
|
||||
//! Disables AArch64 backend.
|
||||
#define ASMJIT_NO_AARCH64
|
||||
|
||||
//! Disables non-host backends entirely (useful for JIT compilers to minimize the library size).
|
||||
#define ASMJIT_NO_FOREIGN
|
||||
|
||||
@@ -206,6 +212,10 @@ namespace asmjit {
|
||||
#if !ASMJIT_ARCH_X86 && !defined(ASMJIT_NO_X86)
|
||||
#define ASMJIT_NO_X86
|
||||
#endif
|
||||
|
||||
#if !ASMJIT_ARCH_ARM && !defined(ASMJIT_NO_AARCH64)
|
||||
#define ASMJIT_NO_AARCH64
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
@@ -344,7 +354,6 @@ namespace asmjit {
|
||||
#define ASMJIT_VECTORCALL
|
||||
#endif
|
||||
|
||||
|
||||
// Type alignment (not allowed by C++11 'alignas' keyword).
|
||||
#if defined(__GNUC__)
|
||||
#define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
|
||||
@@ -383,6 +392,15 @@ namespace asmjit {
|
||||
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT
|
||||
#endif
|
||||
|
||||
//! \def ASMJIT_NOEXCEPT_TYPE
|
||||
//!
|
||||
//! Defined to `noexcept` in C++17 mode or nothing otherwise. Used by function typedefs.
|
||||
#if __cplusplus >= 201703L
|
||||
#define ASMJIT_NOEXCEPT_TYPE noexcept
|
||||
#else
|
||||
#define ASMJIT_NOEXCEPT_TYPE
|
||||
#endif
|
||||
|
||||
//! \def ASMJIT_ASSUME(...)
|
||||
//!
|
||||
//! Macro that tells the C/C++ compiler that the expression `...` evaluates to true.
|
||||
|
||||
@@ -17,12 +17,105 @@ ASMJIT_BEGIN_SUB_NAMESPACE(arm)
|
||||
//! \addtogroup asmjit_arm
|
||||
//! \{
|
||||
|
||||
//! Condition code (both AArch32 & AArch64).
|
||||
//!
|
||||
//! \note This enumeration doesn't match condition code that is used in AArch32/AArch64 opcodes. In general this
|
||||
//! condition code is encoded as `(cc - 2) & 0xF` so that `kAL` condition code is zero and encoded as 0xE in opcode.
|
||||
//! This makes it easier to use a condition code as an instruction modifier that defaults to 'al'.
|
||||
enum class CondCode : uint8_t {
|
||||
kAL = 0x00u, //!< (no condition code) (always)
|
||||
kNA = 0x01u, //!< (not available) (special)
|
||||
kEQ = 0x02u, //!< Z==1 (any_sign ==)
|
||||
kNE = 0x03u, //!< Z==0 (any_sign !=)
|
||||
kCS = 0x04u, //!< C==1 (unsigned >=)
|
||||
kHS = 0x04u, //!< C==1 (unsigned >=)
|
||||
kCC = 0x05u, //!< C==0 (unsigned < )
|
||||
kLO = 0x05u, //!< C==0 (unsigned < )
|
||||
kMI = 0x06u, //!< N==1 (is negative)
|
||||
kPL = 0x07u, //!< N==0 (is positive or zero)
|
||||
kVS = 0x08u, //!< V==1 (is overflow)
|
||||
kVC = 0x09u, //!< V==0 (no overflow)
|
||||
kHI = 0x0Au, //!< C==1 & Z==0 (unsigned > )
|
||||
kLS = 0x0Bu, //!< C==0 | Z==1 (unsigned <=)
|
||||
kGE = 0x0Cu, //!< N==V (signed >=)
|
||||
kLT = 0x0Du, //!< N!=V (signed < )
|
||||
kGT = 0x0Eu, //!< Z==0 & N==V (signed > )
|
||||
kLE = 0x0Fu, //!< Z==1 | N!=V (signed <=)
|
||||
|
||||
kSign = kMI, //!< Sign.
|
||||
kNotSign = kPL, //!< Not sign.
|
||||
|
||||
kOverflow = kVS, //!< Signed overflow.
|
||||
kNotOverflow = kVC, //!< Not signed overflow.
|
||||
|
||||
kEqual = kEQ, //!< Equal `a == b`.
|
||||
kNotEqual = kNE, //!< Not Equal `a != b`.
|
||||
|
||||
kZero = kEQ, //!< Zero (alias to equal).
|
||||
kNotZero = kNE, //!< Not Zero (alias to Not Equal).
|
||||
|
||||
kNegative = kMI, //!< Negative.
|
||||
kPositive = kPL, //!< Positive or zero.
|
||||
|
||||
kSignedLT = kLT, //!< Signed `a < b`.
|
||||
kSignedLE = kLE, //!< Signed `a <= b`.
|
||||
kSignedGT = kGT, //!< Signed `a > b`.
|
||||
kSignedGE = kGE, //!< Signed `a >= b`.
|
||||
|
||||
kUnsignedLT = kLO, //!< Unsigned `a < b`.
|
||||
kUnsignedLE = kLS, //!< Unsigned `a <= b`.
|
||||
kUnsignedGT = kHI, //!< Unsigned `a > b`.
|
||||
kUnsignedGE = kHS, //!< Unsigned `a >= b`.
|
||||
|
||||
kAlways = kAL, //!< No condition code (always).
|
||||
|
||||
kMaxValue = 0x0Fu //!< Maximum value of `CondCode`.
|
||||
};
|
||||
|
||||
//! Negates a condition code.
|
||||
static inline constexpr CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); }
|
||||
|
||||
//! Data type that can be encoded with the instruction (AArch32 only).
|
||||
enum class DataType : uint32_t {
|
||||
//! No data type specified (default for all general purpose instructions).
|
||||
kNone = 0,
|
||||
//! 8-bit signed integer, specified as `.s8` in assembly.
|
||||
kS8 = 1,
|
||||
//! 16-bit signed integer, specified as `.s16` in assembly.
|
||||
kS16 = 2,
|
||||
//! 32-bit signed integer, specified as `.s32` in assembly.
|
||||
kS32 = 3,
|
||||
//! 64-bit signed integer, specified as `.s64` in assembly.
|
||||
kS64 = 4,
|
||||
//! 8-bit unsigned integer, specified as `.u8` in assembly.
|
||||
kU8 = 5,
|
||||
//! 16-bit unsigned integer, specified as `.u16` in assembly.
|
||||
kU16 = 6,
|
||||
//! 32-bit unsigned integer, specified as `.u32` in assembly.
|
||||
kU32 = 7,
|
||||
//! 64-bit unsigned integer, specified as `.u64` in assembly.
|
||||
kU64 = 8,
|
||||
//! 16-bit floating point (half precision), specified as `.f16` in assembly.
|
||||
kF16 = 10,
|
||||
//! 32-bit floating point (single precision), specified as `.f32` in assembly.
|
||||
kF32 = 11,
|
||||
//! 64-bit floating point (double precision), specified as `.f64` in assembly.
|
||||
kF64 = 12,
|
||||
//! 8-bit polynomial.
|
||||
kP8 = 13,
|
||||
//! 64-bit polynomial.
|
||||
kP64 = 15,
|
||||
|
||||
//! Maximum value of `DataType`.
|
||||
kMaxValue = 15
|
||||
};
|
||||
|
||||
//! Shift operation predicate (ARM) describes either SHIFT or EXTEND operation.
|
||||
//!
|
||||
//! \note The constants are AsmJit specific. The first 5 values describe real constants on ARM32 and AArch64 hardware,
|
||||
//! however, the addition constants that describe extend modes are specific to AsmJit and would be translated to the
|
||||
//! AArch64 specific constants by the assembler.
|
||||
enum class ShiftOp {
|
||||
enum class ShiftOp : uint32_t {
|
||||
//! Shift left logical operation (default).
|
||||
//!
|
||||
//! Available to all ARM architectures.
|
||||
@@ -38,14 +131,10 @@ enum class ShiftOp {
|
||||
//! Available to all ARM architectures.
|
||||
kASR = 0x02u,
|
||||
|
||||
//! Rotate right operation.
|
||||
//!
|
||||
//! \note Not available in AArch64 mode.
|
||||
//! Rotate right operation (AArch32 only).
|
||||
kROR = 0x03u,
|
||||
|
||||
//! Rotate right with carry operation (encoded as `kShiftROR` with zero).
|
||||
//!
|
||||
//! \note Not available in AArch64 mode.
|
||||
//! Rotate right with carry operation (encoded as `ShiftOp::kROR` with zero) (AArch32 only).
|
||||
kRRX = 0x04u,
|
||||
|
||||
//! Shift left by filling low order bits with ones.
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
#include "../x86/x86archtraits_p.h"
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
#include "../arm/armarchtraits_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
#include "../arm/a64archtraits_p.h"
|
||||
#endif
|
||||
|
||||
ASMJIT_BEGIN_NAMESPACE
|
||||
@@ -83,8 +83,8 @@ ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1] = {
|
||||
noArchTraits,
|
||||
|
||||
// AArch64 architecture.
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
arm::a64ArchTraits,
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
a64::a64ArchTraits,
|
||||
#else
|
||||
noArchTraits,
|
||||
#endif
|
||||
|
||||
@@ -271,7 +271,7 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
|
||||
|
||||
re->_sourceSectionId = _section->id();
|
||||
re->_sourceOffset = offset();
|
||||
re->_format.resetToDataValue(uint32_t(dataSize));
|
||||
re->_format.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
|
||||
|
||||
if (le->isBound()) {
|
||||
re->_targetSectionId = le->section()->id();
|
||||
@@ -279,7 +279,7 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
|
||||
}
|
||||
else {
|
||||
OffsetFormat of;
|
||||
of.resetToDataValue(uint32_t(dataSize));
|
||||
of.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
|
||||
|
||||
LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of);
|
||||
if (ASMJIT_UNLIKELY(!link))
|
||||
@@ -348,7 +348,7 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
|
||||
exp->setValueAsLabel(0, labelEntry);
|
||||
exp->setValueAsLabel(1, baseEntry);
|
||||
|
||||
re->_format.resetToDataValue(dataSize);
|
||||
re->_format.resetToSimpleValue(OffsetType::kSignedOffset, dataSize);
|
||||
re->_sourceSectionId = _section->id();
|
||||
re->_sourceOffset = offset();
|
||||
re->_payload = (uint64_t)(uintptr_t)exp;
|
||||
|
||||
@@ -590,7 +590,9 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1,
|
||||
Operand_ opArray[Globals::kMaxOpCount];
|
||||
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
|
||||
|
||||
Error err = InstAPI::validate(arch(), BaseInst(instId, options, _extraReg), opArray, opCount);
|
||||
ValidationFlags validationFlags = isCompiler() ? ValidationFlags::kEnableVirtRegs : ValidationFlags::kNone;
|
||||
Error err = _funcs.validate(arch(), BaseInst(instId, options, _extraReg), opArray, opCount, validationFlags);
|
||||
|
||||
if (ASMJIT_UNLIKELY(err)) {
|
||||
resetInstOptions();
|
||||
resetExtraReg();
|
||||
|
||||
@@ -746,6 +746,9 @@ public:
|
||||
|
||||
//! Returns the instruction id, see `BaseInst::Id`.
|
||||
inline InstId id() const noexcept { return _baseInst.id(); }
|
||||
//! Returns the instruction real id, see `BaseInst::Id`.
|
||||
inline InstId realId() const noexcept { return _baseInst.realId(); }
|
||||
|
||||
//! Sets the instruction id to `id`, see `BaseInst::Id`.
|
||||
inline void setId(InstId id) noexcept { _baseInst.setId(id); }
|
||||
|
||||
|
||||
@@ -194,6 +194,10 @@ Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
|
||||
if (ASMJIT_UNLIKELY(type == EmitterType::kNone || uint32_t(type) > uint32_t(EmitterType::kMaxValue)))
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
|
||||
uint64_t archMask = emitter->_archMask;
|
||||
if (ASMJIT_UNLIKELY(!(archMask & (uint64_t(1) << uint32_t(arch())))))
|
||||
return DebugUtils::errored(kErrorInvalidArch);
|
||||
|
||||
// This is suspicious, but don't fail if `emitter` is already attached
|
||||
// to this code holder. This is not error, but it's not recommended.
|
||||
if (emitter->_code != nullptr) {
|
||||
@@ -944,7 +948,6 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
|
||||
return DebugUtils::errored(kErrorInvalidRelocEntry);
|
||||
|
||||
uint8_t* buffer = sourceSection->data();
|
||||
size_t valueOffset = size_t(re->sourceOffset()) + re->format().valueOffset();
|
||||
|
||||
switch (re->relocType()) {
|
||||
case RelocType::kExpression: {
|
||||
@@ -970,12 +973,18 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
|
||||
|
||||
case RelocType::kAbsToRel: {
|
||||
value -= baseAddress + sectionOffset + sourceOffset + regionSize;
|
||||
if (addressSize > 4 && !Support::isInt32(int64_t(value)))
|
||||
|
||||
// Sign extend as we are not interested in the high 32-bit word in a 32-bit address space.
|
||||
if (addressSize <= 4)
|
||||
value = uint64_t(int64_t(int32_t(value & 0xFFFFFFFFu)));
|
||||
else if (!Support::isInt32(int64_t(value)))
|
||||
return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case RelocType::kX64AddressEntry: {
|
||||
size_t valueOffset = size_t(re->sourceOffset()) + re->format().valueOffset();
|
||||
if (re->format().valueSize() != 4 || valueOffset < 2)
|
||||
return DebugUtils::errored(kErrorInvalidRelocEntry);
|
||||
|
||||
@@ -1030,25 +1039,8 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
|
||||
return DebugUtils::errored(kErrorInvalidRelocEntry);
|
||||
}
|
||||
|
||||
switch (re->format().valueSize()) {
|
||||
case 1:
|
||||
Support::writeU8(buffer + valueOffset, uint8_t(value & 0xFFu));
|
||||
break;
|
||||
|
||||
case 2:
|
||||
Support::writeU16uLE(buffer + valueOffset, uint16_t(value & 0xFFFFu));
|
||||
break;
|
||||
|
||||
case 4:
|
||||
Support::writeU32uLE(buffer + valueOffset, uint32_t(value & 0xFFFFFFFFu));
|
||||
break;
|
||||
|
||||
case 8:
|
||||
Support::writeU64uLE(buffer + valueOffset, value);
|
||||
break;
|
||||
|
||||
default:
|
||||
return DebugUtils::errored(kErrorInvalidRelocEntry);
|
||||
if (!CodeWriterUtils::writeOffset(buffer + re->sourceOffset(), int64_t(value), re->format())) {
|
||||
return DebugUtils::errored(kErrorInvalidRelocEntry);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -276,7 +276,10 @@ enum class OffsetType : uint8_t {
|
||||
//!
|
||||
//! This offset type is sufficient for many targets that store offset as a continuous set bits within an
|
||||
//! instruction word / sequence of bytes.
|
||||
kCommon = 0,
|
||||
kSignedOffset,
|
||||
|
||||
//! An unsigned value having `_immBitCount` bits and shifted by `_immBitShift`.
|
||||
kUnsignedOffset,
|
||||
|
||||
// AArch64 Specific Offset Formats
|
||||
// -------------------------------
|
||||
@@ -370,15 +373,15 @@ struct OffsetFormat {
|
||||
//!
|
||||
//! The region will be the same size as data and immediate bits would correspond to `dataSize * 8`. There will be
|
||||
//! no immediate bit shift or discarded bits.
|
||||
inline void resetToDataValue(size_t dataSize) noexcept {
|
||||
ASMJIT_ASSERT(dataSize <= 8u);
|
||||
inline void resetToSimpleValue(OffsetType type, size_t valueSize) noexcept {
|
||||
ASMJIT_ASSERT(valueSize <= 8u);
|
||||
|
||||
_type = OffsetType::kCommon;
|
||||
_type = type;
|
||||
_flags = uint8_t(0);
|
||||
_regionSize = uint8_t(dataSize);
|
||||
_valueSize = uint8_t(dataSize);
|
||||
_regionSize = uint8_t(valueSize);
|
||||
_valueSize = uint8_t(valueSize);
|
||||
_valueOffset = uint8_t(0);
|
||||
_immBitCount = uint8_t(dataSize * 8u);
|
||||
_immBitCount = uint8_t(valueSize * 8u);
|
||||
_immBitShift = uint8_t(0);
|
||||
_immDiscardLsb = uint8_t(0);
|
||||
}
|
||||
|
||||
@@ -14,26 +14,46 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
|
||||
uint32_t bitShift = format.immBitShift();
|
||||
uint32_t discardLsb = format.immDiscardLsb();
|
||||
|
||||
// Invalid offset (should not happen).
|
||||
if (!bitCount || bitCount > format.valueSize() * 8u)
|
||||
return false;
|
||||
|
||||
if (discardLsb) {
|
||||
ASMJIT_ASSERT(discardLsb <= 32);
|
||||
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
|
||||
uint32_t value;
|
||||
|
||||
// First handle all unsigned offset types.
|
||||
if (format.type() == OffsetType::kUnsignedOffset) {
|
||||
if (discardLsb) {
|
||||
ASMJIT_ASSERT(discardLsb <= 32);
|
||||
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
|
||||
return false;
|
||||
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
|
||||
}
|
||||
|
||||
value = uint32_t(offset64 & Support::lsbMask<uint32_t>(bitCount));
|
||||
if (value != offset64)
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
// The rest of OffsetType options are all signed.
|
||||
if (discardLsb) {
|
||||
ASMJIT_ASSERT(discardLsb <= 32);
|
||||
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
|
||||
return false;
|
||||
offset64 >>= discardLsb;
|
||||
}
|
||||
|
||||
if (!Support::isInt32(offset64))
|
||||
return false;
|
||||
|
||||
value = uint32_t(int32_t(offset64));
|
||||
if (!Support::isEncodableOffset32(int32_t(value), bitCount))
|
||||
return false;
|
||||
offset64 >>= discardLsb;
|
||||
}
|
||||
|
||||
if (!Support::isInt32(offset64))
|
||||
return false;
|
||||
|
||||
int32_t offset32 = int32_t(offset64);
|
||||
if (!Support::isEncodableOffset32(offset32, bitCount))
|
||||
return false;
|
||||
|
||||
switch (format.type()) {
|
||||
case OffsetType::kCommon: {
|
||||
*dst = (uint32_t(offset32) & Support::lsbMask<uint32_t>(bitCount)) << bitShift;
|
||||
case OffsetType::kSignedOffset:
|
||||
case OffsetType::kUnsignedOffset: {
|
||||
*dst = (value & Support::lsbMask<uint32_t>(bitCount)) << bitShift;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -43,8 +63,8 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
|
||||
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5)
|
||||
return false;
|
||||
|
||||
uint32_t immLo = uint32_t(offset32) & 0x3u;
|
||||
uint32_t immHi = uint32_t(offset32 >> 2) & Support::lsbMask<uint32_t>(19);
|
||||
uint32_t immLo = value & 0x3u;
|
||||
uint32_t immHi = (value >> 2) & Support::lsbMask<uint32_t>(19);
|
||||
|
||||
*dst = (immLo << 29) | (immHi << 5);
|
||||
return true;
|
||||
@@ -62,19 +82,40 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
|
||||
if (!bitCount || bitCount > format.valueSize() * 8u)
|
||||
return false;
|
||||
|
||||
if (discardLsb) {
|
||||
ASMJIT_ASSERT(discardLsb <= 32);
|
||||
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
|
||||
uint64_t value;
|
||||
|
||||
// First handle all unsigned offset types.
|
||||
if (format.type() == OffsetType::kUnsignedOffset) {
|
||||
if (discardLsb) {
|
||||
ASMJIT_ASSERT(discardLsb <= 32);
|
||||
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
|
||||
return false;
|
||||
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
|
||||
}
|
||||
|
||||
value = uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount);
|
||||
if (value != uint64_t(offset64))
|
||||
return false;
|
||||
offset64 >>= discardLsb;
|
||||
}
|
||||
else {
|
||||
// The rest of OffsetType options are all signed.
|
||||
if (discardLsb) {
|
||||
ASMJIT_ASSERT(discardLsb <= 32);
|
||||
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
|
||||
return false;
|
||||
offset64 >>= discardLsb;
|
||||
}
|
||||
|
||||
if (!Support::isEncodableOffset64(offset64, bitCount))
|
||||
return false;
|
||||
|
||||
value = uint64_t(offset64);
|
||||
}
|
||||
|
||||
if (!Support::isEncodableOffset64(offset64, bitCount))
|
||||
return false;
|
||||
|
||||
switch (format.type()) {
|
||||
case OffsetType::kCommon: {
|
||||
*dst = (uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount)) << format.immBitShift();
|
||||
case OffsetType::kSignedOffset:
|
||||
case OffsetType::kUnsignedOffset: {
|
||||
*dst = (value & Support::lsbMask<uint64_t>(bitCount)) << format.immBitShift();
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -109,8 +150,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
|
||||
|
||||
case 4: {
|
||||
uint32_t mask;
|
||||
if (!encodeOffset32(&mask, offset64, format))
|
||||
if (!encodeOffset32(&mask, offset64, format)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Support::writeU32uLE(dst, Support::readU32uLE(dst) | mask);
|
||||
return true;
|
||||
|
||||
@@ -980,7 +980,7 @@ namespace AppleHWId {
|
||||
|
||||
static ASMJIT_FAVOR_SIZE uint32_t queryARMCpuFamilyId() noexcept {
|
||||
uint32_t result = 0;
|
||||
size_t size = sizeof(cpuFamily);
|
||||
size_t size = sizeof(result);
|
||||
|
||||
int res = sysctlbyname("hw.cpufamily", &result, &size, nullptr, 0);
|
||||
if (res != 0)
|
||||
|
||||
@@ -429,7 +429,7 @@ public:
|
||||
kARMv8_4a, //!< ARMv8.4-A ISA.
|
||||
kARMv8_5a, //!< ARMv8.5-A ISA.
|
||||
kARMv8_6a, //!< ARMv8.6-A ISA.
|
||||
kARMv8_7a, //!< ARMv8.6-A ISA.
|
||||
kARMv8_7a, //!< ARMv8.7-A ISA.
|
||||
|
||||
kVFPv2, //!< CPU has VFPv2 instruction set.
|
||||
kVFPv3, //!< CPU has VFPv3 instruction set.
|
||||
|
||||
@@ -9,16 +9,6 @@
|
||||
#include "../core/logger.h"
|
||||
#include "../core/support.h"
|
||||
|
||||
#if !defined(ASMJIT_NO_X86)
|
||||
#include "../x86/x86emithelper_p.h"
|
||||
#include "../x86/x86instdb_p.h"
|
||||
#endif // !ASMJIT_NO_X86
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
#include "../arm/a64emithelper_p.h"
|
||||
#include "../arm/a64instdb.h"
|
||||
#endif // ASMJIT_BUILD_ARM
|
||||
|
||||
ASMJIT_BEGIN_NAMESPACE
|
||||
|
||||
// BaseEmitter - Construction & Destruction
|
||||
@@ -219,70 +209,28 @@ Error BaseEmitter::_emitOpArray(InstId instId, const Operand_* operands, size_t
|
||||
}
|
||||
}
|
||||
|
||||
// BaseEmitter - Emit (High-Level)
|
||||
// ===============================
|
||||
// BaseEmitter - Emit Utilities
|
||||
// ============================
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitProlog(const FuncFrame& frame) {
|
||||
Error BaseEmitter::emitProlog(const FuncFrame& frame) {
|
||||
if (ASMJIT_UNLIKELY(!_code))
|
||||
return DebugUtils::errored(kErrorNotInitialized);
|
||||
|
||||
#if !defined(ASMJIT_NO_X86)
|
||||
if (environment().isFamilyX86()) {
|
||||
x86::EmitHelper emitHelper(this, frame.isAvxEnabled(), frame.isAvx512Enabled());
|
||||
return emitHelper.emitProlog(frame);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (environment().isArchAArch64()) {
|
||||
a64::EmitHelper emitHelper(this);
|
||||
return emitHelper.emitProlog(frame);
|
||||
}
|
||||
#endif
|
||||
|
||||
return DebugUtils::errored(kErrorInvalidArch);
|
||||
return _funcs.emitProlog(this, frame);
|
||||
}
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
|
||||
Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
|
||||
if (ASMJIT_UNLIKELY(!_code))
|
||||
return DebugUtils::errored(kErrorNotInitialized);
|
||||
|
||||
#if !defined(ASMJIT_NO_X86)
|
||||
if (environment().isFamilyX86()) {
|
||||
x86::EmitHelper emitHelper(this, frame.isAvxEnabled(), frame.isAvx512Enabled());
|
||||
return emitHelper.emitEpilog(frame);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (environment().isArchAArch64()) {
|
||||
a64::EmitHelper emitHelper(this);
|
||||
return emitHelper.emitEpilog(frame);
|
||||
}
|
||||
#endif
|
||||
|
||||
return DebugUtils::errored(kErrorInvalidArch);
|
||||
return _funcs.emitEpilog(this, frame);
|
||||
}
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
|
||||
Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
|
||||
if (ASMJIT_UNLIKELY(!_code))
|
||||
return DebugUtils::errored(kErrorNotInitialized);
|
||||
|
||||
#if !defined(ASMJIT_NO_X86)
|
||||
if (environment().isFamilyX86()) {
|
||||
x86::EmitHelper emitHelper(this, frame.isAvxEnabled(), frame.isAvx512Enabled());
|
||||
return emitHelper.emitArgsAssignment(frame, args);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (environment().isArchAArch64()) {
|
||||
a64::EmitHelper emitHelper(this);
|
||||
return emitHelper.emitArgsAssignment(frame, args);
|
||||
}
|
||||
#endif
|
||||
|
||||
return DebugUtils::errored(kErrorInvalidArch);
|
||||
return _funcs.emitArgsAssignment(this, frame, args);
|
||||
}
|
||||
|
||||
// BaseEmitter - Comment
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include "../core/archtraits.h"
|
||||
#include "../core/codeholder.h"
|
||||
#include "../core/formatter.h"
|
||||
#include "../core/inst.h"
|
||||
#include "../core/operand.h"
|
||||
#include "../core/type.h"
|
||||
@@ -209,6 +210,9 @@ public:
|
||||
//! Validation options.
|
||||
DiagnosticOptions _diagnosticOptions = DiagnosticOptions::kNone;
|
||||
|
||||
//! All supported architectures in a bit-mask, where LSB is the bit with a zero index.
|
||||
uint64_t _archMask = 0;
|
||||
|
||||
//! Encoding options.
|
||||
EncodingOptions _encodingOptions = EncodingOptions::kNone;
|
||||
|
||||
@@ -236,6 +240,45 @@ public:
|
||||
//! Inline comment of the next instruction (affects the next instruction).
|
||||
const char* _inlineComment = nullptr;
|
||||
|
||||
//! Function callbacks used by emitter implementation.
|
||||
//!
|
||||
//! These are typically shared between Assembler/Builder/Compiler of a single backend.
|
||||
struct Funcs {
|
||||
typedef Error (ASMJIT_CDECL* EmitProlog)(BaseEmitter* emitter, const FuncFrame& frame);
|
||||
typedef Error (ASMJIT_CDECL* EmitEpilog)(BaseEmitter* emitter, const FuncFrame& frame);
|
||||
typedef Error (ASMJIT_CDECL* EmitArgsAssignment)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args);
|
||||
|
||||
typedef Error (ASMJIT_CDECL* FormatInstruction)(
|
||||
String& sb,
|
||||
FormatFlags formatFlags,
|
||||
const BaseEmitter* emitter,
|
||||
Arch arch,
|
||||
const BaseInst& inst, const Operand_* operands, size_t opCount) ASMJIT_NOEXCEPT_TYPE;
|
||||
|
||||
typedef Error (ASMJIT_CDECL* ValidateFunc)(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) ASMJIT_NOEXCEPT_TYPE;
|
||||
|
||||
//! Emit prolog implementation.
|
||||
EmitProlog emitProlog;
|
||||
//! Emit epilog implementation.
|
||||
EmitEpilog emitEpilog;
|
||||
//! Emit arguments assignment implementation.
|
||||
EmitArgsAssignment emitArgsAssignment;
|
||||
//! Instruction formatter implementation.
|
||||
FormatInstruction formatInstruction;
|
||||
//! Instruction validation implementation.
|
||||
ValidateFunc validate;
|
||||
|
||||
//! Resets all functions to nullptr.
|
||||
inline void reset() noexcept {
|
||||
emitProlog = nullptr;
|
||||
emitEpilog = nullptr;
|
||||
emitArgsAssignment = nullptr;
|
||||
validate = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
Funcs _funcs {};
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Construction & Destruction
|
||||
|
||||
@@ -86,7 +86,7 @@ void logInstructionEmitted(
|
||||
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
|
||||
|
||||
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode));
|
||||
Formatter::formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
|
||||
self->_funcs.formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
|
||||
|
||||
if (Support::test(formatFlags, FormatFlags::kMachineCode))
|
||||
finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
|
||||
@@ -109,7 +109,7 @@ Error logInstructionFailed(
|
||||
Operand_ opArray[Globals::kMaxOpCount];
|
||||
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
|
||||
|
||||
Formatter::formatInstruction(sb, FormatFlags::kNone, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
|
||||
self->_funcs.formatInstruction(sb, FormatFlags::kNone, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
|
||||
|
||||
if (self->inlineComment()) {
|
||||
sb.append(" ; ");
|
||||
|
||||
@@ -339,8 +339,12 @@ public:
|
||||
|
||||
//! Tests whether this architecture is of X86 family.
|
||||
inline bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
|
||||
//! Tests whether this architecture family is ARM, Thumb, or AArch64.
|
||||
//! Tests whether this architecture family is ARM, THUMB, or AArch64.
|
||||
inline bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
|
||||
//! Tests whether this architecture family is AArch32 (ARM or THUMB).
|
||||
inline bool isFamilyAArch32() const noexcept { return isFamilyAArch32(_arch); }
|
||||
//! Tests whether this architecture family is AArch64.
|
||||
inline bool isFamilyAArch64() const noexcept { return isFamilyAArch64(_arch); }
|
||||
//! Tests whether this architecture family is MISP or MIPS64.
|
||||
inline bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
|
||||
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
|
||||
@@ -431,16 +435,16 @@ public:
|
||||
return uint32_t(arch) >= uint32_t(Arch::kBigEndian);
|
||||
}
|
||||
|
||||
//! Tests whether the given architecture is ARM or ARM_BE.
|
||||
static inline bool isArchARM(Arch arch) noexcept {
|
||||
return arch == Arch::kARM || arch == Arch::kARM_BE;
|
||||
}
|
||||
|
||||
//! Tests whether the given architecture is Thumb or Thumb_BE.
|
||||
static inline bool isArchThumb(Arch arch) noexcept {
|
||||
return arch == Arch::kThumb || arch == Arch::kThumb_BE;
|
||||
}
|
||||
|
||||
//! Tests whether the given architecture is ARM or ARM_BE.
|
||||
static inline bool isArchARM(Arch arch) noexcept {
|
||||
return arch == Arch::kARM || arch == Arch::kARM_BE;
|
||||
}
|
||||
|
||||
//! Tests whether the given architecture is AArch64 or AArch64_BE.
|
||||
static inline bool isArchAArch64(Arch arch) noexcept {
|
||||
return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE;
|
||||
@@ -461,11 +465,21 @@ public:
|
||||
return arch == Arch::kX86 || arch == Arch::kX64;
|
||||
}
|
||||
|
||||
//! Tests whether the given architecture family is ARM, Thumb, or AArch64.
|
||||
//! Tests whether the given architecture family is ARM, THUMB, or AArch64.
|
||||
static inline bool isFamilyARM(Arch arch) noexcept {
|
||||
return isArchARM(arch) || isArchAArch64(arch) || isArchThumb(arch);
|
||||
}
|
||||
|
||||
//! Tests whether the given architecture family is AArch32 (ARM or THUMB).
|
||||
static inline bool isFamilyAArch32(Arch arch) noexcept {
|
||||
return isArchARM(arch) || isArchThumb(arch);
|
||||
}
|
||||
|
||||
//! Tests whether the given architecture family is AArch64.
|
||||
static inline bool isFamilyAArch64(Arch arch) noexcept {
|
||||
return isArchAArch64(arch);
|
||||
}
|
||||
|
||||
//! Tests whether the given architecture family is MISP or MIPS64.
|
||||
static inline bool isFamilyMIPS(Arch arch) noexcept {
|
||||
return isArchMIPS32(arch) || isArchMIPS64(arch);
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
#include "../x86/x86formatter_p.h"
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
#include "../arm/armformatter_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
#include "../arm/a64formatter_p.h"
|
||||
#endif
|
||||
|
||||
ASMJIT_BEGIN_NAMESPACE
|
||||
@@ -107,7 +107,7 @@ Error formatFeature(
|
||||
return x86::FormatterInternal::formatFeature(sb, featureId);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
#if !defined(ASMJIT_NO_AARCH32) && !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyARM(arch))
|
||||
return arm::FormatterInternal::formatFeature(sb, featureId);
|
||||
#endif
|
||||
@@ -164,9 +164,9 @@ Error formatRegister(
|
||||
return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (Environment::isFamilyARM(arch))
|
||||
return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyAArch64(arch))
|
||||
return a64::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
|
||||
#endif
|
||||
|
||||
return kErrorInvalidArch;
|
||||
@@ -184,9 +184,9 @@ Error formatOperand(
|
||||
return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (Environment::isFamilyARM(arch))
|
||||
return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyAArch64(arch))
|
||||
return a64::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
|
||||
#endif
|
||||
|
||||
return kErrorInvalidArch;
|
||||
@@ -282,9 +282,9 @@ Error formatInstruction(
|
||||
return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyARM(arch))
|
||||
return arm::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
|
||||
return a64::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
|
||||
#endif
|
||||
|
||||
return kErrorInvalidArch;
|
||||
@@ -408,7 +408,7 @@ Error formatNode(
|
||||
case NodeType::kInst:
|
||||
case NodeType::kJump: {
|
||||
const InstNode* instNode = node->as<InstNode>();
|
||||
ASMJIT_PROPAGATE(formatInstruction(sb, formatOptions.flags(), builder,
|
||||
ASMJIT_PROPAGATE(builder->_funcs.formatInstruction(sb, formatOptions.flags(), builder,
|
||||
builder->arch(),
|
||||
instNode->baseInst(), instNode->operands(), instNode->opCount()));
|
||||
break;
|
||||
@@ -525,7 +525,7 @@ Error formatNode(
|
||||
|
||||
case NodeType::kInvoke: {
|
||||
const InvokeNode* invokeNode = node->as<InvokeNode>();
|
||||
ASMJIT_PROPAGATE(formatInstruction(sb, formatOptions.flags(), builder,
|
||||
ASMJIT_PROPAGATE(builder->_funcs.formatInstruction(sb, formatOptions.flags(), builder,
|
||||
builder->arch(),
|
||||
invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
|
||||
break;
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
#include "../x86/x86func_p.h"
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
#include "../arm/armfunc_p.h"
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
#include "../arm/a64func_p.h"
|
||||
#endif
|
||||
|
||||
ASMJIT_BEGIN_NAMESPACE
|
||||
@@ -31,9 +31,9 @@ ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId ccId, const Environment& envir
|
||||
return x86::FuncInternal::initCallConv(*this, ccId, environment);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (environment.isFamilyARM())
|
||||
return arm::FuncInternal::initCallConv(*this, ccId, environment);
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (environment.isFamilyAArch64())
|
||||
return a64::FuncInternal::initCallConv(*this, ccId, environment);
|
||||
#endif
|
||||
|
||||
return DebugUtils::errored(kErrorInvalidArgument);
|
||||
@@ -73,9 +73,9 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
|
||||
return x86::FuncInternal::initFuncDetail(*this, signature, registerSize);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (environment.isFamilyARM())
|
||||
return arm::FuncInternal::initFuncDetail(*this, signature, registerSize);
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (environment.isFamilyAArch64())
|
||||
return a64::FuncInternal::initFuncDetail(*this, signature, registerSize);
|
||||
#endif
|
||||
|
||||
// We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include "../x86/x86instapi_p.h"
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
#include "../arm/a64instapi_p.h"
|
||||
#endif
|
||||
|
||||
@@ -27,8 +27,8 @@ Error InstAPI::instIdToString(Arch arch, InstId instId, String& output) noexcept
|
||||
return x86::InstInternal::instIdToString(arch, instId, output);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (Environment::isArchAArch64(arch))
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyAArch64(arch))
|
||||
return a64::InstInternal::instIdToString(arch, instId, output);
|
||||
#endif
|
||||
|
||||
@@ -41,8 +41,8 @@ InstId InstAPI::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
|
||||
return x86::InstInternal::stringToInstId(arch, s, len);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (Environment::isArchAArch64(arch))
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyAArch64(arch))
|
||||
return a64::InstInternal::stringToInstId(arch, s, len);
|
||||
#endif
|
||||
|
||||
@@ -60,8 +60,8 @@ Error InstAPI::validate(Arch arch, const BaseInst& inst, const Operand_* operand
|
||||
return x86::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (Environment::isArchAArch64(arch))
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyAArch64(arch))
|
||||
return a64::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
|
||||
#endif
|
||||
|
||||
@@ -82,8 +82,8 @@ Error InstAPI::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* oper
|
||||
return x86::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (Environment::isArchAArch64(arch))
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyAArch64(arch))
|
||||
return a64::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
|
||||
#endif
|
||||
|
||||
@@ -101,8 +101,8 @@ Error InstAPI::queryFeatures(Arch arch, const BaseInst& inst, const Operand_* op
|
||||
return x86::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
|
||||
#endif
|
||||
|
||||
#ifdef ASMJIT_BUILD_ARM
|
||||
if (Environment::isArchAArch64(arch))
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if (Environment::isFamilyAArch64(arch))
|
||||
return a64::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -16,17 +16,41 @@ ASMJIT_BEGIN_NAMESPACE
|
||||
//! \addtogroup asmjit_instruction_db
|
||||
//! \{
|
||||
|
||||
//! Describes an instruction.
|
||||
//! Describes an instruction id and modifiers used together with the id.
|
||||
//!
|
||||
//! Each architecture has a set of valid instructions indexed from 0. Instruction with 0 id is, however, a special
|
||||
//! instruction that describes an invalid instruction. Different architectures can share the same instruction id,
|
||||
//! which would describe a different instruction per architecture.
|
||||
//! instruction that describes a "no instruction" or "invalid instruction". Different architectures can assign a.
|
||||
//! different instruction to the same id, each architecture typicall has its own instructions indexed from 1.
|
||||
//!
|
||||
//! Instruction identifiers listed by architecture:
|
||||
//!
|
||||
//! - \ref x86::Inst (X86 and X86_64)
|
||||
//! - \ref a64::Inst (AArch64)
|
||||
typedef uint32_t InstId;
|
||||
|
||||
//! Instruction id parts.
|
||||
//!
|
||||
//! A mask that specifies a bit-layout of \ref InstId.
|
||||
enum class InstIdParts : uint32_t {
|
||||
// Common Masks
|
||||
// ------------
|
||||
|
||||
//! Real id without any modifiers (always 16 least significant bits).
|
||||
kRealId = 0x0000FFFFu,
|
||||
//! Instruction is abstract (or virtual, IR, etc...).
|
||||
kAbstract = 0x80000000u,
|
||||
|
||||
// ARM Specific
|
||||
// ------------
|
||||
|
||||
//! AArch32 first data type, used by ASIMD instructions (`inst.dt.dt2`).
|
||||
kA32_DT = 0x000F0000u,
|
||||
//! AArch32 second data type, used by ASIMD instructions (`inst.dt.dt2`).
|
||||
kA32_DT2 = 0x00F00000u,
|
||||
//! AArch32/AArch64 condition code.
|
||||
kARM_Cond = 0x78000000u
|
||||
};
|
||||
|
||||
//! Instruction options.
|
||||
//!
|
||||
//! Instruction options complement instruction identifier and attributes.
|
||||
@@ -184,7 +208,7 @@ public:
|
||||
//! \name Members
|
||||
//! \{
|
||||
|
||||
//! Instruction id.
|
||||
//! Instruction id with modifiers.
|
||||
InstId _id;
|
||||
//! Instruction options.
|
||||
InstOptions _options;
|
||||
@@ -224,16 +248,29 @@ public:
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Instruction Id
|
||||
//! \name Instruction id and modifiers
|
||||
//! \{
|
||||
|
||||
//! Returns the instruction id.
|
||||
//! Returns the instruction id with modifiers.
|
||||
inline InstId id() const noexcept { return _id; }
|
||||
//! Sets the instruction id to the given `id`.
|
||||
//! Sets the instruction id and modiiers from `id`.
|
||||
inline void setId(InstId id) noexcept { _id = id; }
|
||||
//! Resets the instruction id to zero, see \ref kIdNone.
|
||||
//! Resets the instruction id and modifiers to zero, see \ref kIdNone.
|
||||
inline void resetId() noexcept { _id = 0; }
|
||||
|
||||
//! Returns a real instruction id that doesn't contain any modifiers.
|
||||
inline InstId realId() const noexcept { return _id & uint32_t(InstIdParts::kRealId); }
|
||||
|
||||
template<InstIdParts kPart>
|
||||
inline uint32_t getInstIdPart() const noexcept {
|
||||
return (uint32_t(_id) & uint32_t(kPart)) >> Support::ConstCTZ<uint32_t(kPart)>::value;
|
||||
}
|
||||
|
||||
template<InstIdParts kPart>
|
||||
inline void setInstIdPart(uint32_t value) noexcept {
|
||||
_id = (_id & ~uint32_t(kPart)) | (value << Support::ConstCTZ<uint32_t(kPart)>::value);
|
||||
}
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Instruction Options
|
||||
@@ -259,6 +296,27 @@ public:
|
||||
inline void resetExtraReg() noexcept { _extraReg.reset(); }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name ARM Specific
|
||||
//! \{
|
||||
|
||||
inline arm::CondCode armCondCode() const noexcept { return (arm::CondCode)getInstIdPart<InstIdParts::kARM_Cond>(); }
|
||||
inline void setArmCondCode(arm::CondCode cc) noexcept { setInstIdPart<InstIdParts::kARM_Cond>(uint32_t(cc)); }
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Statics
|
||||
//! \{
|
||||
|
||||
static inline constexpr InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept {
|
||||
return id | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
|
||||
}
|
||||
|
||||
static inline constexpr arm::CondCode extractARMCondCode(uint32_t id) noexcept {
|
||||
return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
|
||||
}
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
//! CPU read/write flags used by \ref InstRWInfo.
|
||||
|
||||
@@ -43,6 +43,8 @@ Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
|
||||
// Recalculate the final code size and shrink the memory we allocated for it
|
||||
// in case that some relocations didn't require records in an address table.
|
||||
size_t codeSize = code->codeSize();
|
||||
if (codeSize < estimatedCodeSize)
|
||||
_allocator.shrink(rx, codeSize);
|
||||
|
||||
if (codeSize < estimatedCodeSize)
|
||||
_allocator.shrink(rx, codeSize);
|
||||
|
||||
@@ -98,6 +98,9 @@ enum class RegType : uint8_t {
|
||||
// X86 Specific Register Types
|
||||
// ---------------------------
|
||||
|
||||
// X86 Specific Register Types
|
||||
// ===========================
|
||||
|
||||
//! Instruction pointer (RIP), only addressable in \ref x86::Mem in 64-bit targets.
|
||||
kX86_Rip = kPC,
|
||||
//! Low GPB register (AL, BL, CL, DL, ...).
|
||||
@@ -133,6 +136,26 @@ enum class RegType : uint8_t {
|
||||
//! TMM register (AMX_TILE)
|
||||
kX86_Tmm = kExtra + 6,
|
||||
|
||||
// ARM Specific Register Types
|
||||
// ===========================
|
||||
|
||||
//! Program pointer (PC) register (AArch64).
|
||||
kARM_PC = kPC,
|
||||
//! 32-bit general purpose register (R or W).
|
||||
kARM_GpW = kGp32,
|
||||
//! 64-bit general purpose register (X).
|
||||
kARM_GpX = kGp64,
|
||||
//! 8-bit view of VFP/ASIMD register (B).
|
||||
kARM_VecB = kVec8,
|
||||
//! 16-bit view of VFP/ASIMD register (H).
|
||||
kARM_VecH = kVec16,
|
||||
//! 32-bit view of VFP/ASIMD register (S).
|
||||
kARM_VecS = kVec32,
|
||||
//! 64-bit view of VFP/ASIMD register (D).
|
||||
kARM_VecD = kVec64,
|
||||
//! 128-bit view of VFP/ASIMD register (Q|V).
|
||||
kARM_VecV = kVec128,
|
||||
|
||||
//! Maximum value of `RegType`.
|
||||
kMaxValue = 31
|
||||
};
|
||||
|
||||
@@ -176,6 +176,12 @@ static constexpr X sar(const X& x, const Y& y) noexcept {
|
||||
return X(S(x) >> y);
|
||||
}
|
||||
|
||||
template<typename X, typename Y>
|
||||
static constexpr X ror(const X& x, const Y& y) noexcept {
|
||||
typedef typename std::make_unsigned<X>::type U;
|
||||
return X((U(x) >> y) | (U(x) << (bitSizeOf<U>() - y)));
|
||||
}
|
||||
|
||||
//! Returns `x | (x >> y)` - helper used by some bit manipulation helpers.
|
||||
template<typename X, typename Y>
|
||||
static constexpr X or_shr(const X& x, const Y& y) noexcept { return X(x | shr(x, y)); }
|
||||
@@ -187,7 +193,33 @@ static constexpr T blsi(T x) noexcept {
|
||||
return T(U(x) & neg(U(x)));
|
||||
}
|
||||
|
||||
//! Generate a trailing bit-mask that has `n` least significant (trailing) bits set.
|
||||
//! Tests whether the given value `x` has `n`th bit set.
|
||||
template<typename T, typename IndexT>
|
||||
static constexpr bool bitTest(T x, IndexT n) noexcept {
|
||||
typedef typename std::make_unsigned<T>::type U;
|
||||
return (U(x) & (U(1) << asStdInt(n))) != 0;
|
||||
}
|
||||
|
||||
// Tests whether the given `value` is a consecutive mask of bits that starts at
|
||||
// the least significant bit.
|
||||
template<typename T>
|
||||
static inline constexpr bool isLsbMask(const T& value) {
|
||||
typedef typename std::make_unsigned<T>::type U;
|
||||
return value && ((U(value) + 1u) & U(value)) == 0;
|
||||
}
|
||||
|
||||
// Tests whether the given value contains at least one bit or whether it's a
|
||||
// bit-mask of consecutive bits.
|
||||
//
|
||||
// This function is similar to \ref isLsbMask(), but the mask doesn't have to
|
||||
// start at a least significant bit.
|
||||
template<typename T>
|
||||
static inline constexpr bool isConsecutiveMask(const T& value) {
|
||||
typedef typename std::make_unsigned<T>::type U;
|
||||
return value && isLsbMask((U(value) - 1u) | U(value));
|
||||
}
|
||||
|
||||
//! Generates a trailing bit-mask that has `n` least significant (trailing) bits set.
|
||||
template<typename T, typename CountT>
|
||||
static constexpr T lsbMask(const CountT& n) noexcept {
|
||||
typedef typename std::make_unsigned<T>::type U;
|
||||
@@ -198,11 +230,15 @@ static constexpr T lsbMask(const CountT& n) noexcept {
|
||||
: n ? T(shr(allOnes<T>(), bitSizeOf<T>() - size_t(n))) : T(0);
|
||||
}
|
||||
|
||||
//! Tests whether the given value `x` has `n`th bit set.
|
||||
template<typename T, typename IndexT>
|
||||
static constexpr bool bitTest(T x, IndexT n) noexcept {
|
||||
//! Generats a leading bit-mask that has `n` most significant (leading) bits set.
|
||||
template<typename T, typename CountT>
|
||||
static constexpr T msbMask(const CountT& n) noexcept {
|
||||
typedef typename std::make_unsigned<T>::type U;
|
||||
return (U(x) & (U(1) << asUInt(n))) != 0;
|
||||
return (sizeof(U) < sizeof(uintptr_t))
|
||||
// Prevent undefined behavior by using a larger type than T.
|
||||
? T(allOnes<uintptr_t>() >> (bitSizeOf<uintptr_t>() - n))
|
||||
// Prevent undefined behavior by performing `n & (nBits - 1)` so it's always within the range.
|
||||
: T(sar(U(n != 0) << (bitSizeOf<U>() - 1), n ? uint32_t(n - 1) : uint32_t(0)));
|
||||
}
|
||||
|
||||
//! Returns a bit-mask that has `x` bit set.
|
||||
|
||||
@@ -211,6 +211,7 @@ Error releaseDualMapping(DualMapping* dm, size_t size) noexcept {
|
||||
dm->rw = nullptr;
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// Virtual Memory [Posix]
|
||||
|
||||
@@ -364,6 +364,7 @@ public:
|
||||
static inline ZoneTreeNode* _doubleRotate(ZoneTreeNode* ASMJIT_NONNULL(root), size_t dir) noexcept {
|
||||
ZoneTreeNode* child = root->_getChild(!dir);
|
||||
ASMJIT_ASSUME(child != nullptr);
|
||||
|
||||
root->_setChild(!dir, _singleRotate(child, !dir));
|
||||
return _singleRotate(root, dir);
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
#include "../core/misc_p.h"
|
||||
#include "../core/support.h"
|
||||
#include "../x86/x86assembler.h"
|
||||
#include "../x86/x86emithelper_p.h"
|
||||
#include "../x86/x86instapi_p.h"
|
||||
#include "../x86/x86instdb_p.h"
|
||||
#include "../x86/x86formatter_p.h"
|
||||
#include "../x86/x86opcode_p.h"
|
||||
@@ -525,6 +527,10 @@ static ASMJIT_FORCE_INLINE bool x86ShouldUseMovabs(Assembler* self, X86BufferWri
|
||||
// ===========================================
|
||||
|
||||
Assembler::Assembler(CodeHolder* code) noexcept : BaseAssembler() {
|
||||
_archMask = (uint64_t(1) << uint32_t(Arch::kX86)) |
|
||||
(uint64_t(1) << uint32_t(Arch::kX64)) ;
|
||||
assignEmitterFuncs(this);
|
||||
|
||||
if (code)
|
||||
code->attach(this);
|
||||
}
|
||||
@@ -604,7 +610,7 @@ ASMJIT_FAVOR_SPEED Error Assembler::_emit(InstId instId, const Operand_& o0, con
|
||||
Operand_ opArray[Globals::kMaxOpCount];
|
||||
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
|
||||
|
||||
err = InstAPI::validate(arch(), BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount);
|
||||
err = _funcs.validate(arch(), BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount, ValidationFlags::kNone);
|
||||
if (ASMJIT_UNLIKELY(err))
|
||||
goto Failed;
|
||||
}
|
||||
@@ -4159,7 +4165,7 @@ EmitModSib:
|
||||
|
||||
re->_sourceSectionId = _section->id();
|
||||
re->_sourceOffset = offset();
|
||||
re->_format.resetToDataValue(4);
|
||||
re->_format.resetToSimpleValue(OffsetType::kSignedOffset, 4);
|
||||
re->_format.setLeadingAndTrailingSize(writer.offsetFrom(_bufferPtr), immSize);
|
||||
re->_payload = uint64_t(rmRel->as<Mem>().offset());
|
||||
|
||||
@@ -4256,7 +4262,7 @@ EmitModSib_LabelRip_X86:
|
||||
|
||||
re->_sourceSectionId = _section->id();
|
||||
re->_sourceOffset = offset();
|
||||
re->_format.resetToDataValue(4);
|
||||
re->_format.resetToSimpleValue(OffsetType::kUnsignedOffset, 4);
|
||||
re->_format.setLeadingAndTrailingSize(writer.offsetFrom(_bufferPtr), immSize);
|
||||
re->_payload = uint64_t(int64_t(relOffset));
|
||||
|
||||
@@ -4281,7 +4287,7 @@ EmitModSib_LabelRip_X86:
|
||||
|
||||
re->_sourceSectionId = _section->id();
|
||||
re->_targetSectionId = _section->id();
|
||||
re->_format.resetToDataValue(4);
|
||||
re->_format.resetToSimpleValue(OffsetType::kUnsignedOffset, 4);
|
||||
re->_format.setLeadingAndTrailingSize(writer.offsetFrom(_bufferPtr), immSize);
|
||||
re->_sourceOffset = offset();
|
||||
re->_payload = re->_sourceOffset + re->_format.regionSize() + uint64_t(int64_t(relOffset));
|
||||
@@ -4871,13 +4877,13 @@ EmitJmpCall:
|
||||
writer.emit8If(0x0F, (opcode & Opcode::kMM_Mask) != 0); // Emit 0F prefix.
|
||||
writer.emit8(opcode.v); // Emit opcode.
|
||||
writer.emit8If(x86EncodeMod(3, opReg, 0), opReg != 0); // Emit MOD.
|
||||
re->_format.resetToDataValue(4);
|
||||
re->_format.resetToSimpleValue(OffsetType::kSignedOffset, 4);
|
||||
re->_format.setLeadingAndTrailingSize(writer.offsetFrom(_bufferPtr), immSize);
|
||||
writer.emit32uLE(0); // Emit DISP32.
|
||||
}
|
||||
else {
|
||||
writer.emit8(opCode8); // Emit opcode.
|
||||
re->_format.resetToDataValue(4);
|
||||
re->_format.resetToSimpleValue(OffsetType::kSignedOffset, 1);
|
||||
re->_format.setLeadingAndTrailingSize(writer.offsetFrom(_bufferPtr), immSize);
|
||||
writer.emit8(0); // Emit DISP8 (zero).
|
||||
}
|
||||
@@ -4919,7 +4925,7 @@ EmitRel:
|
||||
// Chain with label.
|
||||
size_t offset = size_t(writer.offsetFrom(_bufferData));
|
||||
OffsetFormat of;
|
||||
of.resetToDataValue(relSize);
|
||||
of.resetToSimpleValue(OffsetType::kSignedOffset, relSize);
|
||||
|
||||
LabelLink* link = _code->newLabelLink(label, _section->id(), offset, relOffset, of);
|
||||
if (ASMJIT_UNLIKELY(!link))
|
||||
@@ -5077,9 +5083,6 @@ Error Assembler::align(AlignMode alignMode, uint32_t alignment) {
|
||||
|
||||
Error Assembler::onAttach(CodeHolder* code) noexcept {
|
||||
Arch arch = code->arch();
|
||||
if (!Environment::isFamilyX86(arch))
|
||||
return DebugUtils::errored(kErrorInvalidArch);
|
||||
|
||||
ASMJIT_PROPAGATE(Base::onAttach(code));
|
||||
|
||||
if (Environment::is32Bit(arch)) {
|
||||
@@ -5099,7 +5102,6 @@ Error Assembler::onAttach(CodeHolder* code) noexcept {
|
||||
Error Assembler::onDetach(CodeHolder* code) noexcept {
|
||||
_forcedInstOptions &= ~InstOptions::kX86_InvalidRex;
|
||||
_setAddressOverrideMask(0);
|
||||
|
||||
return Base::onDetach(code);
|
||||
}
|
||||
|
||||
|
||||
@@ -8,15 +8,37 @@
|
||||
|
||||
#include "../x86/x86assembler.h"
|
||||
#include "../x86/x86builder.h"
|
||||
#include "../x86/x86emithelper_p.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(x86)
|
||||
|
||||
// x86::Builder - Construction & Destruction
|
||||
// =========================================
|
||||
|
||||
Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
|
||||
_archMask = (uint64_t(1) << uint32_t(Arch::kX86)) |
|
||||
(uint64_t(1) << uint32_t(Arch::kX64)) ;
|
||||
assignEmitterFuncs(this);
|
||||
|
||||
if (code)
|
||||
code->attach(this);
|
||||
}
|
||||
Builder::~Builder() noexcept {}
|
||||
|
||||
// x86::Builder - Events
|
||||
// =====================
|
||||
|
||||
Error Builder::onAttach(CodeHolder* code) noexcept {
|
||||
return Base::onAttach(code);
|
||||
}
|
||||
|
||||
Error Builder::onDetach(CodeHolder* code) noexcept {
|
||||
return Base::onDetach(code);
|
||||
}
|
||||
|
||||
// x86::Builder - Finalize
|
||||
// =======================
|
||||
|
||||
Error Builder::finalize() {
|
||||
ASMJIT_PROPAGATE(runPasses());
|
||||
Assembler a(_code);
|
||||
@@ -25,14 +47,6 @@ Error Builder::finalize() {
|
||||
return serializeTo(&a);
|
||||
}
|
||||
|
||||
Error Builder::onAttach(CodeHolder* code) noexcept {
|
||||
Arch arch = code->arch();
|
||||
if (!Environment::isFamilyX86(arch))
|
||||
return DebugUtils::errored(kErrorInvalidArch);
|
||||
|
||||
return Base::onAttach(code);
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_X86 && !ASMJIT_NO_BUILDER
|
||||
|
||||
@@ -327,17 +327,18 @@ public:
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Finalize
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error finalize() override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Events
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
|
||||
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Finalize
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error finalize() override;
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
@@ -8,29 +8,28 @@
|
||||
|
||||
#include "../x86/x86assembler.h"
|
||||
#include "../x86/x86compiler.h"
|
||||
#include "../x86/x86instapi_p.h"
|
||||
#include "../x86/x86rapass_p.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(x86)
|
||||
|
||||
// x86::Compiler - Construction & Destruction
|
||||
// ==========================================
|
||||
|
||||
Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
|
||||
_archMask = (uint64_t(1) << uint32_t(Arch::kX86)) |
|
||||
(uint64_t(1) << uint32_t(Arch::kX64)) ;
|
||||
assignEmitterFuncs(this);
|
||||
|
||||
if (code)
|
||||
code->attach(this);
|
||||
}
|
||||
Compiler::~Compiler() noexcept {}
|
||||
|
||||
Error Compiler::finalize() {
|
||||
ASMJIT_PROPAGATE(runPasses());
|
||||
Assembler a(_code);
|
||||
a.addEncodingOptions(encodingOptions());
|
||||
a.addDiagnosticOptions(diagnosticOptions());
|
||||
return serializeTo(&a);
|
||||
}
|
||||
// x86::Compiler - Events
|
||||
// ======================
|
||||
|
||||
Error Compiler::onAttach(CodeHolder* code) noexcept {
|
||||
Arch arch = code->arch();
|
||||
if (!Environment::isFamilyX86(arch))
|
||||
return DebugUtils::errored(kErrorInvalidArch);
|
||||
|
||||
ASMJIT_PROPAGATE(Base::onAttach(code));
|
||||
Error err = addPassT<X86RAPass>();
|
||||
|
||||
@@ -42,6 +41,21 @@ Error Compiler::onAttach(CodeHolder* code) noexcept {
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
Error Compiler::onDetach(CodeHolder* code) noexcept {
|
||||
return Base::onDetach(code);
|
||||
}
|
||||
|
||||
// x86::Compiler - Finalize
|
||||
// ========================
|
||||
|
||||
Error Compiler::finalize() {
|
||||
ASMJIT_PROPAGATE(runPasses());
|
||||
Assembler a(_code);
|
||||
a.addEncodingOptions(encodingOptions());
|
||||
a.addDiagnosticOptions(diagnosticOptions());
|
||||
return serializeTo(&a);
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_X86 && !ASMJIT_NO_COMPILER
|
||||
|
||||
@@ -697,17 +697,18 @@ public:
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Finalize
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error finalize() override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Events
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
|
||||
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Finalize
|
||||
//! \{
|
||||
|
||||
ASMJIT_API Error finalize() override;
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
#include "../core/radefs_p.h"
|
||||
#include "../x86/x86emithelper_p.h"
|
||||
#include "../x86/x86emitter.h"
|
||||
#include "../x86/x86formatter_p.h"
|
||||
#include "../x86/x86instapi_p.h"
|
||||
|
||||
ASMJIT_BEGIN_SUB_NAMESPACE(x86)
|
||||
|
||||
@@ -583,6 +585,35 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
static Error ASMJIT_CDECL Emitter_emitProlog(BaseEmitter* emitter, const FuncFrame& frame) {
|
||||
EmitHelper emitHelper(emitter, frame.isAvxEnabled(), frame.isAvx512Enabled());
|
||||
return emitHelper.emitProlog(frame);
|
||||
}
|
||||
|
||||
static Error ASMJIT_CDECL Emitter_emitEpilog(BaseEmitter* emitter, const FuncFrame& frame) {
|
||||
EmitHelper emitHelper(emitter, frame.isAvxEnabled(), frame.isAvx512Enabled());
|
||||
return emitHelper.emitEpilog(frame);
|
||||
}
|
||||
|
||||
static Error ASMJIT_CDECL Emitter_emitArgsAssignment(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args) {
|
||||
EmitHelper emitHelper(emitter, frame.isAvxEnabled(), frame.isAvx512Enabled());
|
||||
return emitHelper.emitArgsAssignment(frame, args);
|
||||
}
|
||||
|
||||
void assignEmitterFuncs(BaseEmitter* emitter) {
|
||||
emitter->_funcs.emitProlog = Emitter_emitProlog;
|
||||
emitter->_funcs.emitEpilog = Emitter_emitEpilog;
|
||||
emitter->_funcs.emitArgsAssignment = Emitter_emitArgsAssignment;
|
||||
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
emitter->_funcs.formatInstruction = FormatterInternal::formatInstruction;
|
||||
#endif
|
||||
|
||||
#ifndef ASMJIT_NO_VALIDATION
|
||||
emitter->_funcs.validate = InstInternal::validate;
|
||||
#endif
|
||||
}
|
||||
|
||||
ASMJIT_END_SUB_NAMESPACE
|
||||
|
||||
#endif // !ASMJIT_NO_X86
|
||||
|
||||
@@ -50,6 +50,8 @@ public:
|
||||
Error emitEpilog(const FuncFrame& frame);
|
||||
};
|
||||
|
||||
void assignEmitterFuncs(BaseEmitter* emitter);
|
||||
|
||||
//! \}
|
||||
//! \endcond
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include "../core/misc_p.h"
|
||||
#include "../core/support.h"
|
||||
#include "../x86/x86formatter_p.h"
|
||||
#include "../x86/x86instapi_p.h"
|
||||
#include "../x86/x86instdb_p.h"
|
||||
#include "../x86/x86operand.h"
|
||||
|
||||
@@ -880,7 +881,7 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction(
|
||||
}
|
||||
}
|
||||
|
||||
ASMJIT_PROPAGATE(InstAPI::instIdToString(arch, instId, sb));
|
||||
ASMJIT_PROPAGATE(InstInternal::instIdToString(arch, instId, sb));
|
||||
}
|
||||
else {
|
||||
ASMJIT_PROPAGATE(sb.appendFormat("[InstId=#%u]", unsigned(instId)));
|
||||
|
||||
@@ -21,11 +21,11 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86)
|
||||
|
||||
namespace FormatterInternal {
|
||||
|
||||
Error formatFeature(
|
||||
Error ASMJIT_CDECL formatFeature(
|
||||
String& sb,
|
||||
uint32_t featureId) noexcept;
|
||||
|
||||
Error formatRegister(
|
||||
Error ASMJIT_CDECL formatRegister(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
@@ -33,14 +33,14 @@ Error formatRegister(
|
||||
RegType regType,
|
||||
uint32_t regId) noexcept;
|
||||
|
||||
Error formatOperand(
|
||||
Error ASMJIT_CDECL formatOperand(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
Arch arch,
|
||||
const Operand_& op) noexcept;
|
||||
|
||||
Error formatInstruction(
|
||||
Error ASMJIT_CDECL formatInstruction(
|
||||
String& sb,
|
||||
FormatFlags flags,
|
||||
const BaseEmitter* emitter,
|
||||
|
||||
@@ -18,17 +18,17 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86)
|
||||
namespace InstInternal {
|
||||
|
||||
#ifndef ASMJIT_NO_TEXT
|
||||
Error instIdToString(Arch arch, InstId instId, String& output) noexcept;
|
||||
InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept;
|
||||
Error ASMJIT_CDECL instIdToString(Arch arch, InstId instId, String& output) noexcept;
|
||||
InstId ASMJIT_CDECL stringToInstId(Arch arch, const char* s, size_t len) noexcept;
|
||||
#endif // !ASMJIT_NO_TEXT
|
||||
|
||||
#ifndef ASMJIT_NO_VALIDATION
|
||||
Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
|
||||
Error ASMJIT_CDECL validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
|
||||
#endif // !ASMJIT_NO_VALIDATION
|
||||
|
||||
#ifndef ASMJIT_NO_INTROSPECTION
|
||||
Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
|
||||
Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
|
||||
Error ASMJIT_CDECL queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
|
||||
Error ASMJIT_CDECL queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
|
||||
#endif // !ASMJIT_NO_INTROSPECTION
|
||||
|
||||
} // {InstInternal}
|
||||
|
||||
@@ -18,6 +18,10 @@ bool testX86Assembler(const TestSettings& settings) noexcept;
|
||||
bool testX64Assembler(const TestSettings& settings) noexcept;
|
||||
#endif
|
||||
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
bool testA64Assembler(const TestSettings& settings) noexcept;
|
||||
#endif
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
CmdLine cmdLine(argc, argv);
|
||||
|
||||
@@ -43,6 +47,7 @@ int main(int argc, char* argv[]) {
|
||||
const char* arch = cmdLine.valueOf("--arch", "all");
|
||||
bool x86Failed = false;
|
||||
bool x64Failed = false;
|
||||
bool aarch64Failed = false;
|
||||
|
||||
#if !defined(ASMJIT_NO_X86)
|
||||
if ((strcmp(arch, "all") == 0 || strcmp(arch, "x86") == 0))
|
||||
@@ -52,11 +57,23 @@ int main(int argc, char* argv[]) {
|
||||
x64Failed = !testX64Assembler(settings);
|
||||
#endif
|
||||
|
||||
bool failed = x86Failed || x64Failed;
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
if ((strcmp(arch, "all") == 0 || strcmp(arch, "aarch64") == 0))
|
||||
aarch64Failed = !testA64Assembler(settings);
|
||||
#endif
|
||||
|
||||
bool failed = x86Failed || x64Failed || aarch64Failed;
|
||||
|
||||
if (failed) {
|
||||
if (x86Failed) printf("** X86 test suite failed **\n");
|
||||
if (x64Failed) printf("** X64 test suite failed **\n");
|
||||
if (x86Failed)
|
||||
printf("** X86 test suite failed **\n");
|
||||
|
||||
if (x64Failed)
|
||||
printf("** X64 test suite failed **\n");
|
||||
|
||||
if (aarch64Failed)
|
||||
printf("** AArch64 test suite failed **\n");
|
||||
|
||||
printf("** FAILURE **\n");
|
||||
}
|
||||
else {
|
||||
|
||||
@@ -20,6 +20,7 @@ public:
|
||||
asmjit::Environment env {};
|
||||
asmjit::CodeHolder code {};
|
||||
AssemblerType assembler {};
|
||||
asmjit::Label L0 {};
|
||||
const TestSettings& settings;
|
||||
|
||||
size_t passed {};
|
||||
@@ -45,6 +46,7 @@ public:
|
||||
code.reset();
|
||||
code.init(env, 0);
|
||||
code.attach(&assembler);
|
||||
L0 = assembler.newLabel();
|
||||
|
||||
if (settings.validate)
|
||||
assembler.addDiagnosticOptions(asmjit::DiagnosticOptions::kValidateAssembler);
|
||||
|
||||
4006
test/asmjit_test_assembler_a64.cpp
Normal file
4006
test/asmjit_test_assembler_a64.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@@ -24,7 +24,7 @@
|
||||
void compiler_add_x86_tests(TestApp& app);
|
||||
#endif
|
||||
|
||||
#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM == 64
|
||||
#if !defined(ASMJIT_NO_AARCH64) && ASMJIT_ARCH_ARM == 64
|
||||
#include <asmjit/a64.h>
|
||||
void compiler_add_a64_tests(TestApp& app);
|
||||
#endif
|
||||
@@ -33,7 +33,7 @@ void compiler_add_a64_tests(TestApp& app);
|
||||
#define ASMJIT_HAVE_WORKING_JIT
|
||||
#endif
|
||||
|
||||
#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM == 64
|
||||
#if !defined(ASMJIT_NO_AARCH64) && ASMJIT_ARCH_ARM == 64
|
||||
#define ASMJIT_HAVE_WORKING_JIT
|
||||
#endif
|
||||
|
||||
@@ -118,8 +118,8 @@ int TestApp::run() {
|
||||
x86::Compiler cc(&code);
|
||||
#endif
|
||||
|
||||
#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM == 64
|
||||
arm::Compiler cc(&code);
|
||||
#if !defined(ASMJIT_NO_AARCH64) && ASMJIT_ARCH_ARM == 64
|
||||
a64::Compiler cc(&code);
|
||||
#endif
|
||||
|
||||
#ifndef ASMJIT_NO_LOGGING
|
||||
@@ -245,7 +245,7 @@ int main(int argc, char* argv[]) {
|
||||
compiler_add_x86_tests(app);
|
||||
#endif
|
||||
|
||||
#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM == 64
|
||||
#if !defined(ASMJIT_NO_AARCH64) && ASMJIT_ARCH_ARM == 64
|
||||
compiler_add_a64_tests(app);
|
||||
#endif
|
||||
|
||||
|
||||
690
test/asmjit_test_compiler_a64.cpp
Normal file
690
test/asmjit_test_compiler_a64.cpp
Normal file
@@ -0,0 +1,690 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include <asmjit/core.h>
|
||||
#if !defined(ASMJIT_NO_AARCH64) && ASMJIT_ARCH_ARM == 64
|
||||
|
||||
#include <asmjit/a64.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "./asmjit_test_compiler.h"
|
||||
|
||||
using namespace asmjit;
|
||||
|
||||
// a64::Compiler - A64TestCase
|
||||
// ===========================
|
||||
|
||||
class A64TestCase : public TestCase {
|
||||
public:
|
||||
A64TestCase(const char* name = nullptr)
|
||||
: TestCase(name) {}
|
||||
|
||||
virtual void compile(BaseCompiler& cc) override {
|
||||
compile(static_cast<a64::Compiler&>(cc));
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) = 0;
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_GpArgs
|
||||
// ==============================
|
||||
|
||||
class A64Test_GpArgs : public A64TestCase {
|
||||
public:
|
||||
uint32_t _argCount;
|
||||
bool _preserveFP;
|
||||
|
||||
A64Test_GpArgs(uint32_t argCount, bool preserveFP)
|
||||
: _argCount(argCount),
|
||||
_preserveFP(preserveFP) {
|
||||
_name.assignFormat("GpArgs {NumArgs=%u PreserveFP=%c}", argCount, preserveFP ? 'Y' : 'N');
|
||||
}
|
||||
|
||||
static void add(TestApp& app) {
|
||||
for (uint32_t i = 0; i <= 16; i++) {
|
||||
app.add(new A64Test_GpArgs(i, true));
|
||||
app.add(new A64Test_GpArgs(i, false));
|
||||
}
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
uint32_t i;
|
||||
uint32_t argCount = _argCount;
|
||||
|
||||
FuncSignatureBuilder signature;
|
||||
signature.setRetT<int>();
|
||||
for (i = 0; i < argCount; i++)
|
||||
signature.addArgT<int>();
|
||||
|
||||
FuncNode* funcNode = cc.addFunc(signature);
|
||||
if (_preserveFP)
|
||||
funcNode->frame().setPreservedFP();
|
||||
|
||||
arm::Gp sum;
|
||||
|
||||
if (argCount) {
|
||||
for (i = 0; i < argCount; i++) {
|
||||
arm::Gp iReg = cc.newInt32("i%u", i);
|
||||
funcNode->setArg(i, iReg);
|
||||
|
||||
if (i == 0)
|
||||
sum = iReg;
|
||||
else
|
||||
cc.add(sum, sum, iReg);
|
||||
}
|
||||
}
|
||||
else {
|
||||
sum = cc.newInt32("i");
|
||||
cc.mov(sum, 0);
|
||||
}
|
||||
|
||||
cc.ret(sum);
|
||||
cc.endFunc();
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef unsigned int U;
|
||||
|
||||
typedef U (*Func0)();
|
||||
typedef U (*Func1)(U);
|
||||
typedef U (*Func2)(U, U);
|
||||
typedef U (*Func3)(U, U, U);
|
||||
typedef U (*Func4)(U, U, U, U);
|
||||
typedef U (*Func5)(U, U, U, U, U);
|
||||
typedef U (*Func6)(U, U, U, U, U, U);
|
||||
typedef U (*Func7)(U, U, U, U, U, U, U);
|
||||
typedef U (*Func8)(U, U, U, U, U, U, U, U);
|
||||
typedef U (*Func9)(U, U, U, U, U, U, U, U, U);
|
||||
typedef U (*Func10)(U, U, U, U, U, U, U, U, U, U);
|
||||
typedef U (*Func11)(U, U, U, U, U, U, U, U, U, U, U);
|
||||
typedef U (*Func12)(U, U, U, U, U, U, U, U, U, U, U, U);
|
||||
typedef U (*Func13)(U, U, U, U, U, U, U, U, U, U, U, U, U);
|
||||
typedef U (*Func14)(U, U, U, U, U, U, U, U, U, U, U, U, U, U);
|
||||
typedef U (*Func15)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U);
|
||||
typedef U (*Func16)(U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U);
|
||||
|
||||
unsigned int resultRet = 0;
|
||||
unsigned int expectRet = 0;
|
||||
|
||||
switch (_argCount) {
|
||||
case 0:
|
||||
resultRet = ptr_as_func<Func0>(_func)();
|
||||
expectRet = 0;
|
||||
break;
|
||||
case 1:
|
||||
resultRet = ptr_as_func<Func1>(_func)(1);
|
||||
expectRet = 1;
|
||||
break;
|
||||
case 2:
|
||||
resultRet = ptr_as_func<Func2>(_func)(1, 2);
|
||||
expectRet = 1 + 2;
|
||||
break;
|
||||
case 3:
|
||||
resultRet = ptr_as_func<Func3>(_func)(1, 2, 3);
|
||||
expectRet = 1 + 2 + 3;
|
||||
break;
|
||||
case 4:
|
||||
resultRet = ptr_as_func<Func4>(_func)(1, 2, 3, 4);
|
||||
expectRet = 1 + 2 + 3 + 4;
|
||||
break;
|
||||
case 5:
|
||||
resultRet = ptr_as_func<Func5>(_func)(1, 2, 3, 4, 5);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5;
|
||||
break;
|
||||
case 6:
|
||||
resultRet = ptr_as_func<Func6>(_func)(1, 2, 3, 4, 5, 6);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6;
|
||||
break;
|
||||
case 7:
|
||||
resultRet = ptr_as_func<Func7>(_func)(1, 2, 3, 4, 5, 6, 7);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7;
|
||||
break;
|
||||
case 8:
|
||||
resultRet = ptr_as_func<Func8>(_func)(1, 2, 3, 4, 5, 6, 7, 8);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8;
|
||||
break;
|
||||
case 9:
|
||||
resultRet = ptr_as_func<Func9>(_func)(1, 2, 3, 4, 5, 6, 7, 8, 9);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9;
|
||||
break;
|
||||
case 10:
|
||||
resultRet = ptr_as_func<Func10>(_func)(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10;
|
||||
break;
|
||||
case 11:
|
||||
resultRet = ptr_as_func<Func11>(_func)(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11;
|
||||
break;
|
||||
case 12:
|
||||
resultRet = ptr_as_func<Func12>(_func)(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12;
|
||||
break;
|
||||
case 13:
|
||||
resultRet = ptr_as_func<Func13>(_func)(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13;
|
||||
break;
|
||||
case 14:
|
||||
resultRet = ptr_as_func<Func14>(_func)(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14;
|
||||
break;
|
||||
case 15:
|
||||
resultRet = ptr_as_func<Func15>(_func)(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15;
|
||||
break;
|
||||
case 16:
|
||||
resultRet = ptr_as_func<Func16>(_func)(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
|
||||
expectRet = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16;
|
||||
break;
|
||||
}
|
||||
|
||||
result.assignFormat("ret={%u, %u}", resultRet >> 28, resultRet & 0x0FFFFFFFu);
|
||||
expect.assignFormat("ret={%u, %u}", expectRet >> 28, expectRet & 0x0FFFFFFFu);
|
||||
|
||||
return resultRet == expectRet;
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_Simd1
|
||||
// =============================
|
||||
|
||||
class A64Test_Simd1 : public A64TestCase {
|
||||
public:
|
||||
A64Test_Simd1()
|
||||
: A64TestCase("Simd1") {}
|
||||
|
||||
static void add(TestApp& app) {
|
||||
app.add(new A64Test_Simd1());
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
FuncNode* funcNode = cc.addFunc(FuncSignatureT<void, void*, const void*, const void*>());
|
||||
|
||||
arm::Gp dst = cc.newUIntPtr("dst");
|
||||
arm::Gp src1 = cc.newUIntPtr("src1");
|
||||
arm::Gp src2 = cc.newUIntPtr("src2");
|
||||
|
||||
funcNode->setArg(0, dst);
|
||||
funcNode->setArg(1, src1);
|
||||
funcNode->setArg(2, src2);
|
||||
|
||||
arm::Vec v1 = cc.newVecQ("vec1");
|
||||
arm::Vec v2 = cc.newVecQ("vec2");
|
||||
arm::Vec v3 = cc.newVecQ("vec3");
|
||||
|
||||
cc.ldr(v2, arm::ptr(src1));
|
||||
cc.ldr(v3, arm::ptr(src2));
|
||||
cc.add(v1.b16(), v2.b16(), v3.b16());
|
||||
cc.str(v1, arm::ptr(dst));
|
||||
|
||||
cc.endFunc();
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef void (*Func)(void*, const void*, const void*);
|
||||
|
||||
uint32_t dst[4];
|
||||
uint32_t aSrc[4] = { 0 , 1 , 2 , 255 };
|
||||
uint32_t bSrc[4] = { 99, 17, 33, 1 };
|
||||
|
||||
// NOTE: It's a byte-add, so uint8_t(255+1) == 0.
|
||||
uint32_t ref[4] = { 99, 18, 35, 0 };
|
||||
|
||||
ptr_as_func<Func>(_func)(dst, aSrc, bSrc);
|
||||
|
||||
unsigned int resultRet = 0;
|
||||
unsigned int expectRet = 0;
|
||||
|
||||
result.assignFormat("ret={%u, %u, %u, %u}", dst[0], dst[1], dst[2], dst[3]);
|
||||
expect.assignFormat("ret={%u, %u, %u, %u}", ref[0], ref[1], ref[2], ref[3]);
|
||||
|
||||
return resultRet == expectRet;
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_ManyRegs
|
||||
// ================================
|
||||
|
||||
class A64Test_ManyRegs : public A64TestCase {
|
||||
public:
|
||||
uint32_t _regCount;
|
||||
|
||||
A64Test_ManyRegs(uint32_t n)
|
||||
: A64TestCase(),
|
||||
_regCount(n) {
|
||||
_name.assignFormat("GpRegs {NumRegs=%u}", n);
|
||||
}
|
||||
|
||||
static void add(TestApp& app) {
|
||||
for (uint32_t i = 2; i < 64; i++)
|
||||
app.add(new A64Test_ManyRegs(i));
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
cc.addFunc(FuncSignatureT<int>());
|
||||
|
||||
arm::Gp* regs = static_cast<arm::Gp*>(malloc(_regCount * sizeof(arm::Gp)));
|
||||
|
||||
for (uint32_t i = 0; i < _regCount; i++) {
|
||||
regs[i] = cc.newUInt32("reg%u", i);
|
||||
cc.mov(regs[i], i + 1);
|
||||
}
|
||||
|
||||
arm::Gp sum = cc.newUInt32("sum");
|
||||
cc.mov(sum, 0);
|
||||
|
||||
for (uint32_t i = 0; i < _regCount; i++) {
|
||||
cc.add(sum, sum, regs[i]);
|
||||
}
|
||||
|
||||
cc.ret(sum);
|
||||
cc.endFunc();
|
||||
|
||||
free(regs);
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef int (*Func)(void);
|
||||
Func func = ptr_as_func<Func>(_func);
|
||||
|
||||
result.assignFormat("ret={%d}", func());
|
||||
expect.assignFormat("ret={%d}", calcSum());
|
||||
|
||||
return result == expect;
|
||||
}
|
||||
|
||||
uint32_t calcSum() const {
|
||||
return (_regCount | 1) * ((_regCount + 1) / 2);
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_Adr
|
||||
// ===========================
|
||||
|
||||
class A64Test_Adr : public A64TestCase {
|
||||
public:
|
||||
A64Test_Adr()
|
||||
: A64TestCase("Adr") {}
|
||||
|
||||
static void add(TestApp& app) {
|
||||
app.add(new A64Test_Adr());
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
cc.addFunc(FuncSignatureT<int>());
|
||||
|
||||
arm::Gp addr = cc.newIntPtr("addr");
|
||||
arm::Gp val = cc.newIntPtr("val");
|
||||
|
||||
Label L_Table = cc.newLabel();
|
||||
|
||||
cc.adr(addr, L_Table);
|
||||
cc.ldrsw(val, arm::ptr(addr, 8));
|
||||
cc.ret(val);
|
||||
cc.endFunc();
|
||||
|
||||
cc.bind(L_Table);
|
||||
cc.embedInt32(1);
|
||||
cc.embedInt32(2);
|
||||
cc.embedInt32(3);
|
||||
cc.embedInt32(4);
|
||||
cc.embedInt32(5);
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef int (*Func)(void);
|
||||
Func func = ptr_as_func<Func>(_func);
|
||||
|
||||
result.assignFormat("ret={%d}", func());
|
||||
expect.assignFormat("ret={%d}", 3);
|
||||
|
||||
return result == expect;
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_Branch1
|
||||
// ===============================
|
||||
|
||||
class A64Test_Branch1 : public A64TestCase {
|
||||
public:
|
||||
A64Test_Branch1()
|
||||
: A64TestCase("Branch1") {}
|
||||
|
||||
static void add(TestApp& app) {
|
||||
app.add(new A64Test_Branch1());
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
FuncNode* funcNode = cc.addFunc(FuncSignatureT<void, void*, size_t>());
|
||||
|
||||
arm::Gp p = cc.newIntPtr("p");
|
||||
arm::Gp count = cc.newIntPtr("count");
|
||||
arm::Gp i = cc.newIntPtr("i");
|
||||
Label L = cc.newLabel();
|
||||
|
||||
funcNode->setArg(0, p);
|
||||
funcNode->setArg(1, count);
|
||||
|
||||
cc.mov(i, 0);
|
||||
|
||||
cc.bind(L);
|
||||
cc.strb(i.w(), a64::ptr(p, i));
|
||||
cc.add(i, i, 1);
|
||||
cc.cmp(i, count);
|
||||
cc.b_ne(L);
|
||||
|
||||
cc.endFunc();
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef void (*Func)(void* p, size_t n);
|
||||
Func func = ptr_as_func<Func>(_func);
|
||||
|
||||
uint8_t array[16];
|
||||
func(array, 16);
|
||||
|
||||
expect.assign("ret={0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}");
|
||||
|
||||
result.assign("ret={");
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
if (i)
|
||||
result.append(", ");
|
||||
result.appendFormat("%d", int(array[i]));
|
||||
}
|
||||
result.append("}");
|
||||
|
||||
return result == expect;
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_Invoke1
|
||||
// ===============================
|
||||
|
||||
class A64Test_Invoke1 : public A64TestCase {
|
||||
public:
|
||||
A64Test_Invoke1()
|
||||
: A64TestCase("Invoke1") {}
|
||||
|
||||
static void add(TestApp& app) {
|
||||
app.add(new A64Test_Invoke1());
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
FuncNode* funcNode = cc.addFunc(FuncSignatureT<uint32_t, uint32_t, uint32_t>());
|
||||
|
||||
arm::Gp x = cc.newUInt32("x");
|
||||
arm::Gp y = cc.newUInt32("y");
|
||||
arm::Gp r = cc.newUInt32("r");
|
||||
arm::Gp fn = cc.newUIntPtr("fn");
|
||||
|
||||
funcNode->setArg(0, x);
|
||||
funcNode->setArg(1, y);
|
||||
|
||||
cc.mov(fn, (uint64_t)calledFunc);
|
||||
|
||||
InvokeNode* invokeNode;
|
||||
cc.invoke(&invokeNode, fn, FuncSignatureT<uint32_t, uint32_t, uint32_t>(CallConvId::kHost));
|
||||
invokeNode->setArg(0, x);
|
||||
invokeNode->setArg(1, y);
|
||||
invokeNode->setRet(0, r);
|
||||
|
||||
cc.ret(r);
|
||||
cc.endFunc();
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef uint32_t (*Func)(uint32_t, uint32_t);
|
||||
Func func = ptr_as_func<Func>(_func);
|
||||
|
||||
uint32_t x = 49;
|
||||
uint32_t y = 7;
|
||||
|
||||
result.assignFormat("ret={%u}", func(x, y));
|
||||
expect.assignFormat("ret={%u}", x - y);
|
||||
|
||||
return result == expect;
|
||||
}
|
||||
|
||||
static uint32_t calledFunc(uint32_t x, uint32_t y) {
|
||||
return x - y;
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_Invoke2
|
||||
// ===============================
|
||||
|
||||
class A64Test_Invoke2 : public A64TestCase {
|
||||
public:
|
||||
A64Test_Invoke2()
|
||||
: A64TestCase("Invoke2") {}
|
||||
|
||||
static void add(TestApp& app) {
|
||||
app.add(new A64Test_Invoke2());
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
FuncNode* funcNode = cc.addFunc(FuncSignatureT<double, double, double>());
|
||||
|
||||
arm::Vec x = cc.newVecD("x");
|
||||
arm::Vec y = cc.newVecD("y");
|
||||
arm::Vec r = cc.newVecD("r");
|
||||
arm::Gp fn = cc.newUIntPtr("fn");
|
||||
|
||||
funcNode->setArg(0, x);
|
||||
funcNode->setArg(1, y);
|
||||
cc.mov(fn, (uint64_t)calledFunc);
|
||||
|
||||
InvokeNode* invokeNode;
|
||||
cc.invoke(&invokeNode, fn, FuncSignatureT<double, double, double>(CallConvId::kHost));
|
||||
invokeNode->setArg(0, x);
|
||||
invokeNode->setArg(1, y);
|
||||
invokeNode->setRet(0, r);
|
||||
|
||||
cc.ret(r);
|
||||
cc.endFunc();
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef double (*Func)(double, double);
|
||||
Func func = ptr_as_func<Func>(_func);
|
||||
|
||||
double x = 49;
|
||||
double y = 7;
|
||||
|
||||
result.assignFormat("ret={%f}", func(x, y));
|
||||
expect.assignFormat("ret={%f}", calledFunc(x, y));
|
||||
|
||||
return result == expect;
|
||||
}
|
||||
|
||||
static double calledFunc(double x, double y) {
|
||||
return x - y;
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_Invoke3
|
||||
// ===============================
|
||||
|
||||
class A64Test_Invoke3 : public A64TestCase {
|
||||
public:
|
||||
A64Test_Invoke3()
|
||||
: A64TestCase("Invoke3") {}
|
||||
|
||||
static void add(TestApp& app) {
|
||||
app.add(new A64Test_Invoke3());
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
FuncNode* funcNode = cc.addFunc(FuncSignatureT<double, double, double>());
|
||||
|
||||
arm::Vec x = cc.newVecD("x");
|
||||
arm::Vec y = cc.newVecD("y");
|
||||
arm::Vec r = cc.newVecD("r");
|
||||
arm::Gp fn = cc.newUIntPtr("fn");
|
||||
|
||||
funcNode->setArg(0, x);
|
||||
funcNode->setArg(1, y);
|
||||
cc.mov(fn, (uint64_t)calledFunc);
|
||||
|
||||
InvokeNode* invokeNode;
|
||||
cc.invoke(&invokeNode, fn, FuncSignatureT<double, double, double>(CallConvId::kHost));
|
||||
invokeNode->setArg(0, y);
|
||||
invokeNode->setArg(1, x);
|
||||
invokeNode->setRet(0, r);
|
||||
|
||||
cc.ret(r);
|
||||
cc.endFunc();
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef double (*Func)(double, double);
|
||||
Func func = ptr_as_func<Func>(_func);
|
||||
|
||||
double x = 49;
|
||||
double y = 7;
|
||||
|
||||
result.assignFormat("ret={%f}", func(x, y));
|
||||
expect.assignFormat("ret={%f}", calledFunc(y, x));
|
||||
|
||||
return result == expect;
|
||||
}
|
||||
|
||||
static double calledFunc(double x, double y) {
|
||||
return x - y;
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - A64Test_JumpTable
|
||||
// =================================
|
||||
|
||||
class A64Test_JumpTable : public A64TestCase {
|
||||
public:
|
||||
bool _annotated;
|
||||
|
||||
A64Test_JumpTable(bool annotated)
|
||||
: A64TestCase("A64Test_JumpTable"),
|
||||
_annotated(annotated) {
|
||||
_name.assignFormat("JumpTable {%s}", annotated ? "Annotated" : "Unknown Target");
|
||||
}
|
||||
|
||||
enum Operator {
|
||||
kOperatorAdd = 0,
|
||||
kOperatorSub = 1,
|
||||
kOperatorMul = 2,
|
||||
kOperatorDiv = 3
|
||||
};
|
||||
|
||||
static void add(TestApp& app) {
|
||||
app.add(new A64Test_JumpTable(false));
|
||||
app.add(new A64Test_JumpTable(true));
|
||||
}
|
||||
|
||||
virtual void compile(a64::Compiler& cc) {
|
||||
FuncNode* funcNode = cc.addFunc(FuncSignatureT<float, float, float, uint32_t>());
|
||||
|
||||
arm::Vec a = cc.newVecS("a");
|
||||
arm::Vec b = cc.newVecS("b");
|
||||
arm::Gp op = cc.newUInt32("op");
|
||||
|
||||
arm::Gp target = cc.newIntPtr("target");
|
||||
arm::Gp offset = cc.newIntPtr("offset");
|
||||
|
||||
Label L_End = cc.newLabel();
|
||||
|
||||
Label L_Table = cc.newLabel();
|
||||
Label L_Add = cc.newLabel();
|
||||
Label L_Sub = cc.newLabel();
|
||||
Label L_Mul = cc.newLabel();
|
||||
Label L_Div = cc.newLabel();
|
||||
|
||||
funcNode->setArg(0, a);
|
||||
funcNode->setArg(1, b);
|
||||
funcNode->setArg(2, op);
|
||||
|
||||
cc.adr(target, L_Table);
|
||||
cc.ldrsw(offset, arm::ptr(target, op, arm::sxtw(2)));
|
||||
cc.add(target, target, offset);
|
||||
|
||||
// JumpAnnotation allows to annotate all possible jump targets of
|
||||
// instructions where it cannot be deduced from operands.
|
||||
if (_annotated) {
|
||||
JumpAnnotation* annotation = cc.newJumpAnnotation();
|
||||
annotation->addLabel(L_Add);
|
||||
annotation->addLabel(L_Sub);
|
||||
annotation->addLabel(L_Mul);
|
||||
annotation->addLabel(L_Div);
|
||||
cc.br(target, annotation);
|
||||
}
|
||||
else {
|
||||
cc.br(target);
|
||||
}
|
||||
|
||||
cc.bind(L_Add);
|
||||
cc.fadd(a, a, b);
|
||||
cc.b(L_End);
|
||||
|
||||
cc.bind(L_Sub);
|
||||
cc.fsub(a, a, b);
|
||||
cc.b(L_End);
|
||||
|
||||
cc.bind(L_Mul);
|
||||
cc.fmul(a, a, b);
|
||||
cc.b(L_End);
|
||||
|
||||
cc.bind(L_Div);
|
||||
cc.fdiv(a, a, b);
|
||||
|
||||
cc.bind(L_End);
|
||||
cc.ret(a);
|
||||
cc.endFunc();
|
||||
|
||||
cc.bind(L_Table);
|
||||
cc.embedLabelDelta(L_Add, L_Table, 4);
|
||||
cc.embedLabelDelta(L_Sub, L_Table, 4);
|
||||
cc.embedLabelDelta(L_Mul, L_Table, 4);
|
||||
cc.embedLabelDelta(L_Div, L_Table, 4);
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef float (*Func)(float, float, uint32_t);
|
||||
Func func = ptr_as_func<Func>(_func);
|
||||
|
||||
float dst[4];
|
||||
float ref[4];
|
||||
|
||||
dst[0] = func(33.0f, 14.0f, kOperatorAdd);
|
||||
dst[1] = func(33.0f, 14.0f, kOperatorSub);
|
||||
dst[2] = func(10.0f, 6.0f, kOperatorMul);
|
||||
dst[3] = func(80.0f, 8.0f, kOperatorDiv);
|
||||
|
||||
ref[0] = 47.0f;
|
||||
ref[1] = 19.0f;
|
||||
ref[2] = 60.0f;
|
||||
ref[3] = 10.0f;
|
||||
|
||||
result.assignFormat("ret={%f, %f, %f, %f}", dst[0], dst[1], dst[2], dst[3]);
|
||||
expect.assignFormat("ret={%f, %f, %f, %f}", ref[0], ref[1], ref[2], ref[3]);
|
||||
|
||||
return result == expect;
|
||||
}
|
||||
};
|
||||
|
||||
// a64::Compiler - Export
|
||||
// ======================
|
||||
|
||||
void compiler_add_a64_tests(TestApp& app) {
|
||||
app.addT<A64Test_GpArgs>();
|
||||
app.addT<A64Test_ManyRegs>();
|
||||
app.addT<A64Test_Simd1>();
|
||||
app.addT<A64Test_Adr>();
|
||||
app.addT<A64Test_Branch1>();
|
||||
app.addT<A64Test_Invoke1>();
|
||||
app.addT<A64Test_Invoke2>();
|
||||
app.addT<A64Test_Invoke3>();
|
||||
app.addT<A64Test_JumpTable>();
|
||||
}
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64 && ASMJIT_ARCH_ARM == 64
|
||||
@@ -16,6 +16,10 @@ using namespace asmjit;
|
||||
void benchmarkX86Emitters(uint32_t numIterations, bool testX86, bool testX64) noexcept;
|
||||
#endif
|
||||
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
void benchmarkA64Emitters(uint32_t numIterations);
|
||||
#endif
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
CmdLine cmdLine(argc, argv);
|
||||
uint32_t numIterations = 20000;
|
||||
@@ -47,5 +51,12 @@ int main(int argc, char* argv[]) {
|
||||
benchmarkX86Emitters(numIterations, testX86, testX64);
|
||||
#endif
|
||||
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
bool testAArch64 = strcmp(arch, "all") == 0 || strcmp(arch, "aarch64") == 0;
|
||||
|
||||
if (testAArch64)
|
||||
benchmarkA64Emitters(numIterations);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
699
test/asmjit_test_perf_a64.cpp
Normal file
699
test/asmjit_test_perf_a64.cpp
Normal file
@@ -0,0 +1,699 @@
|
||||
// This file is part of AsmJit project <https://asmjit.com>
|
||||
//
|
||||
// See asmjit.h or LICENSE.md for license and copyright information
|
||||
// SPDX-License-Identifier: Zlib
|
||||
|
||||
#include <asmjit/core.h>
|
||||
|
||||
#if !defined(ASMJIT_NO_AARCH64)
|
||||
#include <asmjit/a64.h>
|
||||
|
||||
#include <limits>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "asmjit_test_perf.h"
|
||||
|
||||
using namespace asmjit;
|
||||
|
||||
// Generates a long sequence of GP instructions.
|
||||
template<typename Emitter>
|
||||
static void generateGpSequenceInternal(
|
||||
Emitter& cc,
|
||||
const a64::Gp& a, const a64::Gp& b, const a64::Gp& c, const a64::Gp& d) {
|
||||
|
||||
using namespace asmjit::a64;
|
||||
|
||||
Gp wA = a.w();
|
||||
Gp wB = b.w();
|
||||
Gp wC = c.w();
|
||||
Gp wD = d.w();
|
||||
|
||||
Gp xA = a.x();
|
||||
Gp xB = b.x();
|
||||
Gp xC = c.x();
|
||||
Gp xD = d.x();
|
||||
|
||||
Mem m = ptr(xD);
|
||||
|
||||
cc.mov(wA, 0);
|
||||
cc.mov(wB, 1);
|
||||
cc.mov(wC, 2);
|
||||
cc.mov(wD, 3);
|
||||
|
||||
cc.adc(wA, wB, wC);
|
||||
cc.adc(xA, xB, xC);
|
||||
cc.adc(wA, wzr, wC);
|
||||
cc.adc(xA, xzr, xC);
|
||||
cc.adc(wzr, wB, wC);
|
||||
cc.adc(xzr, xB, xC);
|
||||
cc.adcs(wA, wB, wC);
|
||||
cc.adcs(xA, xB, xC);
|
||||
cc.add(wA, wB, wC);
|
||||
cc.add(xA, xB, xC);
|
||||
cc.add(wA, wB, wC, lsl(3));
|
||||
cc.add(xA, xB, xC, lsl(3));
|
||||
cc.add(wA, wzr, wC);
|
||||
cc.add(xA, xzr, xC);
|
||||
cc.add(wzr, wB, wC);
|
||||
cc.add(xzr, xB, xC);
|
||||
cc.add(wC, wD, 0, lsl(12));
|
||||
cc.add(xC, xD, 0, lsl(12));
|
||||
cc.add(wC, wD, 1024, lsl(12));
|
||||
cc.add(xC, xD, 1024, lsl(12));
|
||||
cc.add(wC, wD, 1024, lsl(12));
|
||||
cc.add(xC, xD, 1024, lsl(12));
|
||||
cc.adds(wA, wB, wC);
|
||||
cc.adds(xA, xB, xC);
|
||||
cc.adr(xA, 0);
|
||||
cc.adr(xA, 256);
|
||||
cc.adrp(xA, 4096);
|
||||
cc.and_(wA, wB, wC);
|
||||
cc.and_(xA, xB, xC);
|
||||
cc.and_(wA, wB, 1);
|
||||
cc.and_(xA, xB, 1);
|
||||
cc.and_(wA, wB, 15);
|
||||
cc.and_(xA, xB, 15);
|
||||
cc.and_(wA, wzr, wC);
|
||||
cc.and_(xA, xzr, xC);
|
||||
cc.and_(wzr, wB, wC);
|
||||
cc.and_(xzr, xB, xC);
|
||||
cc.and_(wA, wB, 0x1);
|
||||
cc.and_(xA, xB, 0x1);
|
||||
cc.and_(wA, wB, 0xf);
|
||||
cc.and_(xA, xB, 0xf);
|
||||
cc.ands(wA, wB, wC);
|
||||
cc.ands(xA, xB, xC);
|
||||
cc.ands(wA, wzr, wC);
|
||||
cc.ands(xA, xzr, xC);
|
||||
cc.ands(wzr, wB, wC);
|
||||
cc.ands(xzr, xB, xC);
|
||||
cc.ands(wA, wB, 0x1);
|
||||
cc.ands(xA, xB, 0x1);
|
||||
cc.ands(wA, wB, 0xf);
|
||||
cc.ands(xA, xB, 0xf);
|
||||
cc.asr(wA, wB, 15);
|
||||
cc.asr(xA, xB, 15);
|
||||
cc.asrv(wA, wB, wC);
|
||||
cc.asrv(xA, xB, xC);
|
||||
cc.bfc(wA, 8, 16);
|
||||
cc.bfc(xA, 8, 16);
|
||||
cc.bfi(wA, wB, 8, 16);
|
||||
cc.bfi(xA, xB, 8, 16);
|
||||
cc.bfm(wA, wB, 8, 16);
|
||||
cc.bfm(xA, xB, 8, 16);
|
||||
cc.bfxil(wA, wB, 8, 16);
|
||||
cc.bfxil(xA, xB, 8, 16);
|
||||
cc.bic(wA, wB, wC, lsl(4));
|
||||
cc.bic(xA, xB, xC, lsl(4));
|
||||
cc.bic(wA, wzr, wC);
|
||||
cc.bic(xA, xzr, xC);
|
||||
cc.bics(wA, wB, wC, lsl(4));
|
||||
cc.bics(xA, xB, xC, lsl(4));
|
||||
cc.bics(wA, wzr, wC);
|
||||
cc.bics(xA, xzr, xC);
|
||||
cc.cas(wA, wB, m);
|
||||
cc.cas(xA, xB, m);
|
||||
cc.casa(wA, wB, m);
|
||||
cc.casa(xA, xB, m);
|
||||
cc.casab(wA, wB, m);
|
||||
cc.casah(wA, wB, m);
|
||||
cc.casal(wA, wB, m);
|
||||
cc.casal(xA, xB, m);
|
||||
cc.casalb(wA, wB, m);
|
||||
cc.casalh(wA, wB, m);
|
||||
cc.casb(wA, wB, m);
|
||||
cc.cash(wA, wB, m);
|
||||
cc.casl(wA, wB, m);
|
||||
cc.casl(xA, xB, m);
|
||||
cc.caslb(wA, wB, m);
|
||||
cc.caslh(wA, wB, m);
|
||||
cc.casp(wA, wB, wC, wD, m);
|
||||
cc.casp(xA, xB, xC, xD, m);
|
||||
cc.caspa(wA, wB, wC, wD, m);
|
||||
cc.caspa(xA, xB, xC, xD, m);
|
||||
cc.caspal(wA, wB, wC, wD, m);
|
||||
cc.caspal(xA, xB, xC, xD, m);
|
||||
cc.caspl(wA, wB, wC, wD, m);
|
||||
cc.caspl(xA, xB, xC, xD, m);
|
||||
cc.ccmn(wA, wB, 3, CondCode::kEQ);
|
||||
cc.ccmn(xA, xB, 3, CondCode::kEQ);
|
||||
cc.ccmn(wA, 2, 3, CondCode::kEQ);
|
||||
cc.ccmn(xA, 2, 3, CondCode::kEQ);
|
||||
cc.ccmn(wA, wzr, 3, CondCode::kEQ);
|
||||
cc.ccmn(xA, xzr, 3, CondCode::kEQ);
|
||||
cc.ccmp(wA, wB, 3, CondCode::kEQ);
|
||||
cc.ccmp(xA, xB, 3, CondCode::kEQ);
|
||||
cc.ccmp(wA, 2, 3, CondCode::kEQ);
|
||||
cc.ccmp(xA, 2, 3, CondCode::kEQ);
|
||||
cc.ccmp(wA, wzr, 3, CondCode::kEQ);
|
||||
cc.ccmp(xA, xzr, 3, CondCode::kEQ);
|
||||
cc.cinc(wA, wB, CondCode::kEQ);
|
||||
cc.cinc(xA, xB, CondCode::kEQ);
|
||||
cc.cinc(wzr, wB, CondCode::kEQ);
|
||||
cc.cinc(wA, wzr, CondCode::kEQ);
|
||||
cc.cinc(xzr, xB, CondCode::kEQ);
|
||||
cc.cinc(xA, xzr, CondCode::kEQ);
|
||||
cc.cinv(wA, wB, CondCode::kEQ);
|
||||
cc.cinv(xA, xB, CondCode::kEQ);
|
||||
cc.cinv(wzr, wB, CondCode::kEQ);
|
||||
cc.cinv(wA, wzr, CondCode::kEQ);
|
||||
cc.cinv(xzr, xB, CondCode::kEQ);
|
||||
cc.cinv(xA, xzr, CondCode::kEQ);
|
||||
cc.cls(wA, wB);
|
||||
cc.cls(xA, xB);
|
||||
cc.cls(wA, wzr);
|
||||
cc.cls(xA, xzr);
|
||||
cc.cls(wzr, wB);
|
||||
cc.cls(xzr, xB);
|
||||
cc.clz(wA, wB);
|
||||
cc.clz(xA, xB);
|
||||
cc.clz(wA, wzr);
|
||||
cc.clz(xA, xzr);
|
||||
cc.clz(wzr, wB);
|
||||
cc.clz(xzr, xB);
|
||||
cc.cmn(wA, 33);
|
||||
cc.cmn(xA, 33);
|
||||
cc.cmn(wA, wB);
|
||||
cc.cmn(xA, xB);
|
||||
cc.cmn(wA, wB, uxtb(2));
|
||||
cc.cmn(xA, xB, uxtb(2));
|
||||
cc.cmp(wA, 33);
|
||||
cc.cmp(xA, 33);
|
||||
cc.cmp(wA, wB);
|
||||
cc.cmp(xA, xB);
|
||||
cc.cmp(wA, wB, uxtb(2));
|
||||
cc.cmp(xA, xB, uxtb(2));
|
||||
cc.crc32b(wA, wB, wC);
|
||||
cc.crc32b(wzr, wB, wC);
|
||||
cc.crc32b(wA, wzr, wC);
|
||||
cc.crc32b(wA, wB, wzr);
|
||||
cc.crc32cb(wA, wB, wC);
|
||||
cc.crc32cb(wzr, wB, wC);
|
||||
cc.crc32cb(wA, wzr, wC);
|
||||
cc.crc32cb(wA, wB, wzr);
|
||||
cc.crc32ch(wA, wB, wC);
|
||||
cc.crc32ch(wzr, wB, wC);
|
||||
cc.crc32ch(wA, wzr, wC);
|
||||
cc.crc32ch(wA, wB, wzr);
|
||||
cc.crc32cw(wA, wB, wC);
|
||||
cc.crc32cw(wzr, wB, wC);
|
||||
cc.crc32cw(wA, wzr, wC);
|
||||
cc.crc32cw(wA, wB, wzr);
|
||||
cc.crc32cx(wA, wB, xC);
|
||||
cc.crc32cx(wzr, wB, xC);
|
||||
cc.crc32cx(wA, wzr, xC);
|
||||
cc.crc32cx(wA, wB, xzr);
|
||||
cc.crc32h(wA, wB, wC);
|
||||
cc.crc32h(wzr, wB, wC);
|
||||
cc.crc32h(wA, wzr, wC);
|
||||
cc.crc32h(wA, wB, wzr);
|
||||
cc.crc32w(wA, wB, wC);
|
||||
cc.crc32w(wzr, wB, wC);
|
||||
cc.crc32w(wA, wzr, wC);
|
||||
cc.crc32w(wA, wB, wzr);
|
||||
cc.crc32x(wA, wB, xC);
|
||||
cc.crc32x(wzr, wB, xC);
|
||||
cc.crc32x(wA, wzr, xC);
|
||||
cc.crc32x(wA, wB, xzr);
|
||||
cc.csel(wA, wB, wC, CondCode::kEQ);
|
||||
cc.csel(xA, xB, xC, CondCode::kEQ);
|
||||
cc.cset(wA, CondCode::kEQ);
|
||||
cc.cset(xA, CondCode::kEQ);
|
||||
cc.cset(wA, CondCode::kEQ);
|
||||
cc.cset(xA, CondCode::kEQ);
|
||||
cc.csetm(wA, CondCode::kEQ);
|
||||
cc.csetm(xA, CondCode::kEQ);
|
||||
cc.csinc(wA, wB, wC, CondCode::kEQ);
|
||||
cc.csinc(xA, xB, xC, CondCode::kEQ);
|
||||
cc.csinv(wA, wB, wC, CondCode::kEQ);
|
||||
cc.csinv(xA, xB, xC, CondCode::kEQ);
|
||||
cc.csneg(wA, wB, wC, CondCode::kEQ);
|
||||
cc.csneg(xA, xB, xC, CondCode::kEQ);
|
||||
cc.eon(wA, wB, wC);
|
||||
cc.eon(wzr, wB, wC);
|
||||
cc.eon(wA, wzr, wC);
|
||||
cc.eon(wA, wB, wzr);
|
||||
cc.eon(wA, wB, wC, lsl(4));
|
||||
cc.eon(xA, xB, xC);
|
||||
cc.eon(xzr, xB, xC);
|
||||
cc.eon(xA, xzr, xC);
|
||||
cc.eon(xA, xB, xzr);
|
||||
cc.eon(xA, xB, xC, lsl(4));
|
||||
cc.eor(wA, wB, wC);
|
||||
cc.eor(wzr, wB, wC);
|
||||
cc.eor(wA, wzr, wC);
|
||||
cc.eor(wA, wB, wzr);
|
||||
cc.eor(xA, xB, xC);
|
||||
cc.eor(xzr, xB, xC);
|
||||
cc.eor(xA, xzr, xC);
|
||||
cc.eor(xA, xB, xzr);
|
||||
cc.eor(wA, wB, wC, lsl(4));
|
||||
cc.eor(xA, xB, xC, lsl(4));
|
||||
cc.eor(wA, wB, 0x4000);
|
||||
cc.eor(xA, xB, 0x8000);
|
||||
cc.extr(wA, wB, wC, 15);
|
||||
cc.extr(wzr, wB, wC, 15);
|
||||
cc.extr(wA, wzr, wC, 15);
|
||||
cc.extr(wA, wB, wzr, 15);
|
||||
cc.extr(xA, xB, xC, 15);
|
||||
cc.extr(xzr, xB, xC, 15);
|
||||
cc.extr(xA, xzr, xC, 15);
|
||||
cc.extr(xA, xB, xzr, 15);
|
||||
cc.ldadd(wA, wB, m);
|
||||
cc.ldadd(xA, xB, m);
|
||||
cc.ldadda(wA, wB, m);
|
||||
cc.ldadda(xA, xB, m);
|
||||
cc.ldaddab(wA, wB, m);
|
||||
cc.ldaddah(wA, wB, m);
|
||||
cc.ldaddal(wA, wB, m);
|
||||
cc.ldaddal(xA, xB, m);
|
||||
cc.ldaddalb(wA, wB, m);
|
||||
cc.ldaddalh(wA, wB, m);
|
||||
cc.ldaddb(wA, wB, m);
|
||||
cc.ldaddh(wA, wB, m);
|
||||
cc.ldaddl(wA, wB, m);
|
||||
cc.ldaddl(xA, xB, m);
|
||||
cc.ldaddlb(wA, wB, m);
|
||||
cc.ldaddlh(wA, wB, m);
|
||||
cc.ldclr(wA, wB, m);
|
||||
cc.ldclr(xA, xB, m);
|
||||
cc.ldclra(wA, wB, m);
|
||||
cc.ldclra(xA, xB, m);
|
||||
cc.ldclrab(wA, wB, m);
|
||||
cc.ldclrah(wA, wB, m);
|
||||
cc.ldclral(wA, wB, m);
|
||||
cc.ldclral(xA, xB, m);
|
||||
cc.ldclralb(wA, wB, m);
|
||||
cc.ldclralh(wA, wB, m);
|
||||
cc.ldclrb(wA, wB, m);
|
||||
cc.ldclrh(wA, wB, m);
|
||||
cc.ldclrl(wA, wB, m);
|
||||
cc.ldclrl(xA, xB, m);
|
||||
cc.ldclrlb(wA, wB, m);
|
||||
cc.ldclrlh(wA, wB, m);
|
||||
cc.ldeor(wA, wB, m);
|
||||
cc.ldeor(xA, xB, m);
|
||||
cc.ldeora(wA, wB, m);
|
||||
cc.ldeora(xA, xB, m);
|
||||
cc.ldeorab(wA, wB, m);
|
||||
cc.ldeorah(wA, wB, m);
|
||||
cc.ldeoral(wA, wB, m);
|
||||
cc.ldeoral(xA, xB, m);
|
||||
cc.ldeoralb(wA, wB, m);
|
||||
cc.ldeoralh(wA, wB, m);
|
||||
cc.ldeorb(wA, wB, m);
|
||||
cc.ldeorh(wA, wB, m);
|
||||
cc.ldeorl(wA, wB, m);
|
||||
cc.ldeorl(xA, xB, m);
|
||||
cc.ldeorlb(wA, wB, m);
|
||||
cc.ldeorlh(wA, wB, m);
|
||||
cc.ldlar(wA, m);
|
||||
cc.ldlar(xA, m);
|
||||
cc.ldlarb(wA, m);
|
||||
cc.ldlarh(wA, m);
|
||||
cc.ldnp(wA, wB, m);
|
||||
cc.ldnp(xA, xB, m);
|
||||
cc.ldp(wA, wB, m);
|
||||
cc.ldp(xA, xB, m);
|
||||
cc.ldpsw(xA, xB, m);
|
||||
cc.ldr(wA, m);
|
||||
cc.ldr(xA, m);
|
||||
cc.ldrb(wA, m);
|
||||
cc.ldrh(wA, m);
|
||||
cc.ldrsw(xA, m);
|
||||
cc.ldraa(xA, m);
|
||||
cc.ldrab(xA, m);
|
||||
cc.ldset(wA, wB, m);
|
||||
cc.ldset(xA, xB, m);
|
||||
cc.ldseta(wA, wB, m);
|
||||
cc.ldseta(xA, xB, m);
|
||||
cc.ldsetab(wA, wB, m);
|
||||
cc.ldsetah(wA, wB, m);
|
||||
cc.ldsetal(wA, wB, m);
|
||||
cc.ldsetal(xA, xB, m);
|
||||
cc.ldsetalh(wA, wB, m);
|
||||
cc.ldsetalb(wA, wB, m);
|
||||
cc.ldsetb(wA, wB, m);
|
||||
cc.ldseth(wA, wB, m);
|
||||
cc.ldsetl(wA, wB, m);
|
||||
cc.ldsetl(xA, xB, m);
|
||||
cc.ldsetlb(wA, wB, m);
|
||||
cc.ldsetlh(wA, wB, m);
|
||||
cc.ldsmax(wA, wB, m);
|
||||
cc.ldsmax(xA, xB, m);
|
||||
cc.ldsmaxa(wA, wB, m);
|
||||
cc.ldsmaxa(xA, xB, m);
|
||||
cc.ldsmaxab(wA, wB, m);
|
||||
cc.ldsmaxah(wA, wB, m);
|
||||
cc.ldsmaxal(wA, wB, m);
|
||||
cc.ldsmaxal(xA, xB, m);
|
||||
cc.ldsmaxalb(wA, wB, m);
|
||||
cc.ldsmaxalh(wA, wB, m);
|
||||
cc.ldsmaxb(wA, wB, m);
|
||||
cc.ldsmaxh(wA, wB, m);
|
||||
cc.ldsmaxl(wA, wB, m);
|
||||
cc.ldsmaxl(xA, xB, m);
|
||||
cc.ldsmaxlb(wA, wB, m);
|
||||
cc.ldsmaxlh(wA, wB, m);
|
||||
cc.ldsmin(wA, wB, m);
|
||||
cc.ldsmin(xA, xB, m);
|
||||
cc.ldsmina(wA, wB, m);
|
||||
cc.ldsmina(xA, xB, m);
|
||||
cc.ldsminab(wA, wB, m);
|
||||
cc.ldsminah(wA, wB, m);
|
||||
cc.ldsminal(wA, wB, m);
|
||||
cc.ldsminal(xA, xB, m);
|
||||
cc.ldsminalb(wA, wB, m);
|
||||
cc.ldsminalh(wA, wB, m);
|
||||
cc.ldsminb(wA, wB, m);
|
||||
cc.ldsminh(wA, wB, m);
|
||||
cc.ldsminl(wA, wB, m);
|
||||
cc.ldsminl(xA, xB, m);
|
||||
cc.ldsminlb(wA, wB, m);
|
||||
cc.ldsminlh(wA, wB, m);
|
||||
cc.ldtr(wA, m);
|
||||
cc.ldtr(xA, m);
|
||||
cc.ldtrb(wA, m);
|
||||
cc.ldtrh(wA, m);
|
||||
cc.ldtrsb(wA, m);
|
||||
cc.ldtrsh(wA, m);
|
||||
cc.ldtrsw(xA, m);
|
||||
cc.ldumax(wA, wB, m);
|
||||
cc.ldumax(xA, xB, m);
|
||||
cc.ldumaxa(wA, wB, m);
|
||||
cc.ldumaxa(xA, xB, m);
|
||||
cc.ldumaxab(wA, wB, m);
|
||||
cc.ldumaxah(wA, wB, m);
|
||||
cc.ldumaxal(wA, wB, m);
|
||||
cc.ldumaxal(xA, xB, m);
|
||||
cc.ldumaxalb(wA, wB, m);
|
||||
cc.ldumaxalh(wA, wB, m);
|
||||
cc.ldumaxb(wA, wB, m);
|
||||
cc.ldumaxh(wA, wB, m);
|
||||
cc.ldumaxl(wA, wB, m);
|
||||
cc.ldumaxl(xA, xB, m);
|
||||
cc.ldumaxlb(wA, wB, m);
|
||||
cc.ldumaxlh(wA, wB, m);
|
||||
cc.ldumin(wA, wB, m);
|
||||
cc.ldumin(xA, xB, m);
|
||||
cc.ldumina(wA, wB, m);
|
||||
cc.ldumina(xA, xB, m);
|
||||
cc.lduminab(wA, wB, m);
|
||||
cc.lduminah(wA, wB, m);
|
||||
cc.lduminal(wA, wB, m);
|
||||
cc.lduminal(xA, xB, m);
|
||||
cc.lduminalb(wA, wB, m);
|
||||
cc.lduminalh(wA, wB, m);
|
||||
cc.lduminb(wA, wB, m);
|
||||
cc.lduminh(wA, wB, m);
|
||||
cc.lduminl(wA, wB, m);
|
||||
cc.lduminl(xA, xB, m);
|
||||
cc.lduminlb(wA, wB, m);
|
||||
cc.lduminlh(wA, wB, m);
|
||||
cc.ldur(wA, m);
|
||||
cc.ldur(xA, m);
|
||||
cc.ldurb(wA, m);
|
||||
cc.ldurh(wA, m);
|
||||
cc.ldursb(wA, m);
|
||||
cc.ldursh(wA, m);
|
||||
cc.ldursw(xA, m);
|
||||
cc.ldxp(wA, wB, m);
|
||||
cc.ldxp(xA, xB, m);
|
||||
cc.ldxr(wA, m);
|
||||
cc.ldxr(xA, m);
|
||||
cc.ldxrb(wA, m);
|
||||
cc.ldxrh(wA, m);
|
||||
cc.lsl(wA, wB, wC);
|
||||
cc.lsl(xA, xB, xC);
|
||||
cc.lsl(wA, wB, 15);
|
||||
cc.lsl(xA, xB, 15);
|
||||
cc.lslv(wA, wB, wC);
|
||||
cc.lslv(xA, xB, xC);
|
||||
cc.lsr(wA, wB, wC);
|
||||
cc.lsr(xA, xB, xC);
|
||||
cc.lsr(wA, wB, 15);
|
||||
cc.lsr(xA, xB, 15);
|
||||
cc.lsrv(wA, wB, wC);
|
||||
cc.lsrv(xA, xB, xC);
|
||||
cc.madd(wA, wB, wC, wD);
|
||||
cc.madd(xA, xB, xC, xD);
|
||||
cc.mneg(wA, wB, wC);
|
||||
cc.mneg(xA, xB, xC);
|
||||
cc.mov(wA, wB);
|
||||
cc.mov(xA, xB);
|
||||
cc.mov(wA, 0);
|
||||
cc.mov(wA, 1);
|
||||
cc.mov(wA, 2);
|
||||
cc.mov(wA, 3);
|
||||
cc.mov(wA, 4);
|
||||
cc.mov(wA, 5);
|
||||
cc.mov(wA, 6);
|
||||
cc.mov(wA, 7);
|
||||
cc.mov(wA, 8);
|
||||
cc.mov(wA, 9);
|
||||
cc.mov(wA, 10);
|
||||
cc.mov(wA, 0xA234);
|
||||
cc.mov(xA, 0xA23400000000);
|
||||
cc.msub(wA, wB, wC, wD);
|
||||
cc.msub(xA, xB, xC, xD);
|
||||
cc.mul(wA, wB, wC);
|
||||
cc.mul(xA, xB, xC);
|
||||
cc.mvn(wA, wB);
|
||||
cc.mvn(xA, xB);
|
||||
cc.mvn(wA, wB, lsl(4));
|
||||
cc.mvn(xA, xB, lsl(4));
|
||||
cc.neg(wA, wB);
|
||||
cc.neg(xA, xB);
|
||||
cc.neg(wA, wB, lsl(4));
|
||||
cc.neg(xA, xB, lsl(4));
|
||||
cc.negs(wA, wB);
|
||||
cc.negs(xA, xB);
|
||||
cc.negs(wA, wB, lsl(4));
|
||||
cc.negs(xA, xB, lsl(4));
|
||||
cc.ngc(wA, wB);
|
||||
cc.ngc(xA, xB);
|
||||
cc.ngcs(wA, wB);
|
||||
cc.ngcs(xA, xB);
|
||||
cc.orn(wA, wB, wC);
|
||||
cc.orn(xA, xB, xC);
|
||||
cc.orn(wA, wB, wC, lsl(4));
|
||||
cc.orn(xA, xB, xC, lsl(4));
|
||||
cc.orr(wA, wB, wC);
|
||||
cc.orr(xA, xB, xC);
|
||||
cc.orr(wA, wB, wC, lsl(4));
|
||||
cc.orr(xA, xB, xC, lsl(4));
|
||||
cc.orr(wA, wB, 0x4000);
|
||||
cc.orr(xA, xB, 0x8000);
|
||||
cc.rbit(wA, wB);
|
||||
cc.rbit(xA, xB);
|
||||
cc.rev(wA, wB);
|
||||
cc.rev(xA, xB);
|
||||
cc.rev16(wA, wB);
|
||||
cc.rev16(xA, xB);
|
||||
cc.rev32(xA, xB);
|
||||
cc.rev64(xA, xB);
|
||||
cc.ror(wA, wB, wC);
|
||||
cc.ror(xA, xB, xC);
|
||||
cc.ror(wA, wB, 15);
|
||||
cc.ror(xA, xB, 15);
|
||||
cc.rorv(wA, wB, wC);
|
||||
cc.rorv(xA, xB, xC);
|
||||
cc.sbc(wA, wB, wC);
|
||||
cc.sbc(xA, xB, xC);
|
||||
cc.sbcs(wA, wB, wC);
|
||||
cc.sbcs(xA, xB, xC);
|
||||
cc.sbfiz(wA, wB, 5, 10);
|
||||
cc.sbfiz(xA, xB, 5, 10);
|
||||
cc.sbfm(wA, wB, 5, 10);
|
||||
cc.sbfm(xA, xB, 5, 10);
|
||||
cc.sbfx(wA, wB, 5, 10);
|
||||
cc.sbfx(xA, xB, 5, 10);
|
||||
cc.sdiv(wA, wB, wC);
|
||||
cc.sdiv(xA, xB, xC);
|
||||
cc.smaddl(xA, wB, wC, xD);
|
||||
cc.smnegl(xA, wB, wC);
|
||||
cc.smsubl(xA, wB, wC, xD);
|
||||
cc.smulh(xA, xB, xC);
|
||||
cc.smull(xA, wB, wC);
|
||||
cc.stp(wA, wB, m);
|
||||
cc.stp(xA, xB, m);
|
||||
cc.sttr(wA, m);
|
||||
cc.sttr(xA, m);
|
||||
cc.sttrb(wA, m);
|
||||
cc.sttrh(wA, m);
|
||||
cc.stur(wA, m);
|
||||
cc.stur(xA, m);
|
||||
cc.sturb(wA, m);
|
||||
cc.sturh(wA, m);
|
||||
cc.stxp(wA, wB, wC, m);
|
||||
cc.stxp(wA, xB, xC, m);
|
||||
cc.stxr(wA, wB, m);
|
||||
cc.stxr(wA, xB, m);
|
||||
cc.stxrb(wA, wB, m);
|
||||
cc.stxrh(wA, wB, m);
|
||||
cc.sub(wA, wB, wC);
|
||||
cc.sub(xA, xB, xC);
|
||||
cc.sub(wA, wB, wC, lsl(3));
|
||||
cc.sub(xA, xB, xC, lsl(3));
|
||||
cc.subg(xA, xB, 32, 11);
|
||||
cc.subp(xA, xB, xC);
|
||||
cc.subps(xA, xB, xC);
|
||||
cc.subs(wA, wB, wC);
|
||||
cc.subs(xA, xB, xC);
|
||||
cc.subs(wA, wB, wC, lsl(3));
|
||||
cc.subs(xA, xB, xC, lsl(3));
|
||||
cc.sxtb(wA, wB);
|
||||
cc.sxtb(xA, wB);
|
||||
cc.sxth(wA, wB);
|
||||
cc.sxth(xA, wB);
|
||||
cc.sxtw(xA, wB);
|
||||
cc.tst(wA, 1);
|
||||
cc.tst(xA, 1);
|
||||
cc.tst(wA, wB);
|
||||
cc.tst(xA, xB);
|
||||
cc.tst(wA, wB, lsl(4));
|
||||
cc.tst(xA, xB, lsl(4));
|
||||
cc.udiv(wA, wB, wC);
|
||||
cc.udiv(xA, xB, xC);
|
||||
cc.ubfiz(wA, wB, 5, 10);
|
||||
cc.ubfiz(xA, xB, 5, 10);
|
||||
cc.ubfm(wA, wB, 5, 10);
|
||||
cc.ubfm(xA, xB, 5, 10);
|
||||
cc.ubfx(wA, wB, 5, 10);
|
||||
cc.ubfx(xA, xB, 5, 10);
|
||||
cc.umaddl(xA, wB, wC, xD);
|
||||
cc.umnegl(xA, wB, wC);
|
||||
cc.umsubl(xA, wB, wC, xD);
|
||||
cc.umulh(xA, xB, xC);
|
||||
cc.umull(xA, wB, wC);
|
||||
cc.uxtb(wA, wB);
|
||||
cc.uxth(wA, wB);
|
||||
}
|
||||
|
||||
static void generateGpSequence(BaseEmitter& emitter, bool emitPrologEpilog) {
|
||||
if (emitter.isAssembler()) {
|
||||
a64::Assembler& cc = *emitter.as<a64::Assembler>();
|
||||
|
||||
a64::Gp a = a64::x0;
|
||||
a64::Gp b = a64::x1;
|
||||
a64::Gp c = a64::x2;
|
||||
a64::Gp d = a64::x3;
|
||||
|
||||
if (emitPrologEpilog) {
|
||||
FuncDetail func;
|
||||
func.init(FuncSignatureT<void, void*, const void*, size_t>(CallConvId::kHost), cc.environment());
|
||||
|
||||
FuncFrame frame;
|
||||
frame.init(func);
|
||||
frame.addDirtyRegs(a, b, c, d);
|
||||
frame.finalize();
|
||||
|
||||
cc.emitProlog(frame);
|
||||
generateGpSequenceInternal(cc, a, b, c, d);
|
||||
cc.emitEpilog(frame);
|
||||
}
|
||||
else {
|
||||
generateGpSequenceInternal(cc, a, b, c, d);
|
||||
}
|
||||
}
|
||||
#ifndef ASMJIT_NO_BUILDER
|
||||
else if (emitter.isBuilder()) {
|
||||
a64::Builder& cc = *emitter.as<a64::Builder>();
|
||||
|
||||
a64::Gp a = a64::x0;
|
||||
a64::Gp b = a64::x1;
|
||||
a64::Gp c = a64::x2;
|
||||
a64::Gp d = a64::x3;
|
||||
|
||||
if (emitPrologEpilog) {
|
||||
FuncDetail func;
|
||||
func.init(FuncSignatureT<void, void*, const void*, size_t>(CallConvId::kHost), cc.environment());
|
||||
|
||||
FuncFrame frame;
|
||||
frame.init(func);
|
||||
frame.addDirtyRegs(a, b, c, d);
|
||||
frame.finalize();
|
||||
|
||||
cc.emitProlog(frame);
|
||||
generateGpSequenceInternal(cc, a, b, c, d);
|
||||
cc.emitEpilog(frame);
|
||||
}
|
||||
else {
|
||||
generateGpSequenceInternal(cc, a, b, c, d);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
else if (emitter.isCompiler()) {
|
||||
a64::Compiler& cc = *emitter.as<a64::Compiler>();
|
||||
|
||||
a64::Gp a = cc.newIntPtr("a");
|
||||
a64::Gp b = cc.newIntPtr("b");
|
||||
a64::Gp c = cc.newIntPtr("c");
|
||||
a64::Gp d = cc.newIntPtr("d");
|
||||
|
||||
cc.addFunc(FuncSignatureT<void>(CallConvId::kHost));
|
||||
generateGpSequenceInternal(cc, a, b, c, d);
|
||||
cc.endFunc();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename EmitterFn>
|
||||
static void benchmarkA64Function(Arch arch, uint32_t numIterations, const char* description, const EmitterFn& emitterFn) noexcept {
|
||||
CodeHolder code;
|
||||
printf("%s:\n", description);
|
||||
|
||||
bench<a64::Assembler>(code, arch, numIterations, "[raw]", [&](a64::Assembler& cc) {
|
||||
emitterFn(cc, false);
|
||||
});
|
||||
|
||||
bench<a64::Assembler>(code, arch, numIterations, "[validated]", [&](a64::Assembler& cc) {
|
||||
cc.addDiagnosticOptions(DiagnosticOptions::kValidateAssembler);
|
||||
emitterFn(cc, false);
|
||||
});
|
||||
|
||||
bench<a64::Assembler>(code, arch, numIterations, "[prolog/epilog]", [&](a64::Assembler& cc) {
|
||||
cc.addDiagnosticOptions(DiagnosticOptions::kValidateAssembler);
|
||||
emitterFn(cc, true);
|
||||
});
|
||||
|
||||
#ifndef ASMJIT_NO_BUILDER
|
||||
bench<a64::Builder>(code, arch, numIterations, "[no-asm]", [&](a64::Builder& cc) {
|
||||
emitterFn(cc, false);
|
||||
});
|
||||
|
||||
bench<a64::Builder>(code, arch, numIterations, "[finalized]", [&](a64::Builder& cc) {
|
||||
emitterFn(cc, false);
|
||||
cc.finalize();
|
||||
});
|
||||
|
||||
bench<a64::Builder>(code, arch, numIterations, "[prolog/epilog]", [&](a64::Builder& cc) {
|
||||
emitterFn(cc, true);
|
||||
cc.finalize();
|
||||
});
|
||||
#endif
|
||||
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
bench<a64::Compiler>(code, arch, numIterations, "[no-asm]", [&](a64::Compiler& cc) {
|
||||
emitterFn(cc, true);
|
||||
});
|
||||
|
||||
bench<a64::Compiler>(code, arch, numIterations, "[finalized]", [&](a64::Compiler& cc) {
|
||||
emitterFn(cc, true);
|
||||
cc.finalize();
|
||||
});
|
||||
#endif
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
void benchmarkA64Emitters(uint32_t numIterations) {
|
||||
static const char description[] = "GpSequence (Sequence of GP instructions - reg/mem)";
|
||||
benchmarkA64Function(Arch::kAArch64, numIterations, description, [](BaseEmitter& emitter, bool emitPrologEpilog) {
|
||||
generateGpSequence(emitter, emitPrologEpilog);
|
||||
});
|
||||
}
|
||||
|
||||
#endif // !ASMJIT_NO_AARCH64
|
||||
@@ -136,12 +136,17 @@ int main() {
|
||||
// Relocate to the base-address of the allocated memory.
|
||||
code.relocateToBase(uint64_t(uintptr_t(rxPtr)));
|
||||
|
||||
VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadWrite);
|
||||
|
||||
// Copy the flattened code into `mem.rw`. There are two ways. You can either copy
|
||||
// everything manually by iterating over all sections or use `copyFlattenedData`.
|
||||
// This code is similar to what `copyFlattenedData(p, codeSize, 0)` would do:
|
||||
for (Section* section : code.sectionsByOrder())
|
||||
memcpy(static_cast<uint8_t*>(rwPtr) + size_t(section->offset()), section->data(), section->bufferSize());
|
||||
|
||||
VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadExecute);
|
||||
VirtMem::flushInstructionCache(rwPtr, code.codeSize());
|
||||
|
||||
// Execute the function and test whether it works.
|
||||
typedef size_t (*Func)(size_t idx);
|
||||
Func fn = (Func)rxPtr;
|
||||
|
||||
365
tools/tablegen-arm.js
Normal file
365
tools/tablegen-arm.js
Normal file
@@ -0,0 +1,365 @@
|
||||
// [AsmJit]
|
||||
// Machine Code Generation for C++.
|
||||
//
|
||||
// [License]
|
||||
// ZLIB - See LICENSE.md file in the package.
|
||||
|
||||
// ============================================================================
|
||||
// tablegen-arm.js
|
||||
// ============================================================================
|
||||
|
||||
"use strict";
|
||||
|
||||
const { executionAsyncResource } = require("async_hooks");
|
||||
const core = require("./tablegen.js");
|
||||
const hasOwn = Object.prototype.hasOwnProperty;
|
||||
|
||||
const asmdb = core.asmdb;
|
||||
const kIndent = core.kIndent;
|
||||
const IndexedArray = core.IndexedArray;
|
||||
const StringUtils = core.StringUtils;
|
||||
|
||||
const FAIL = core.FAIL;
|
||||
|
||||
// ============================================================================
|
||||
// [ArmDB]
|
||||
// ============================================================================
|
||||
|
||||
// Create ARM ISA.
|
||||
const isa = new asmdb.arm.ISA();
|
||||
|
||||
// ============================================================================
|
||||
// [tablegen.arm.GenUtils]
|
||||
// ============================================================================
|
||||
|
||||
class GenUtils {
|
||||
// Get a list of instructions based on `name` and optional `mode`.
|
||||
static query(name, mode) {
|
||||
const insts = isa.query(name);
|
||||
return !mode ? insts : insts.filter(function(inst) { return inst.arch === mode; });
|
||||
}
|
||||
|
||||
static archOf(records) {
|
||||
var t16Arch = false;
|
||||
var t32Arch = false;
|
||||
var a32Arch = false;
|
||||
var a64Arch = false;
|
||||
|
||||
for (var i = 0; i < records.length; i++) {
|
||||
const record = records[i];
|
||||
if (record.encoding === "T16") t16Arch = true;
|
||||
if (record.encoding === "T32") t32Arch = true;
|
||||
if (record.encoding === "A32") a32Arch = true;
|
||||
if (record.encoding === "A64") a64Arch = true;
|
||||
}
|
||||
|
||||
var s = (t16Arch && !t32Arch) ? "T16" :
|
||||
(t32Arch && !t16Arch) ? "T32" :
|
||||
(t16Arch && t32Arch) ? "Txx" : "---";
|
||||
s += " ";
|
||||
s += (a32Arch) ? "A32" : "---";
|
||||
s += " ";
|
||||
s += (a64Arch) ? "A64" : "---";
|
||||
|
||||
return `[${s}]`;
|
||||
}
|
||||
|
||||
static featuresOf(records) {
|
||||
const exts = Object.create(null);
|
||||
for (var i = 0; i < records.length; i++) {
|
||||
const record = records[i];
|
||||
for (var k in record.extensions)
|
||||
exts[k] = true;
|
||||
}
|
||||
const arr = Object.keys(exts);
|
||||
arr.sort();
|
||||
return arr;
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// [tablegen.arm.ArmTableGen]
|
||||
// ============================================================================
|
||||
|
||||
class ArmTableGen extends core.TableGen {
|
||||
constructor() {
|
||||
super("A64");
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// [Parse / Merge]
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
parse() {
|
||||
const rawData = this.dataOfFile("src/asmjit/arm/a64instdb.cpp");
|
||||
const stringData = StringUtils.extract(rawData, "// ${InstInfo:Begin}", "// ${InstInfo:End");
|
||||
|
||||
const re = new RegExp(
|
||||
"INST\\(\\s*" +
|
||||
// [01] Instruction.
|
||||
"(" +
|
||||
"[A-Za-z0-9_]+" +
|
||||
")\\s*,\\s*" +
|
||||
|
||||
// [02] Encoding.
|
||||
"(" +
|
||||
"[^,]+" +
|
||||
")\\s*,\\s*" +
|
||||
|
||||
// [03] OpcodeData.
|
||||
"(" +
|
||||
"\\([^\\)]+\\)" +
|
||||
")\\s*,\\s*" +
|
||||
|
||||
// [04] RWInfo.
|
||||
"(" +
|
||||
"[^,]+" +
|
||||
")\\s*,\\s*" +
|
||||
|
||||
// [05] InstructionFlags.
|
||||
"(\\s*" +
|
||||
"(?:" +
|
||||
"(?:" +
|
||||
"[\\d]+" +
|
||||
"|" +
|
||||
"F\\([^\\)]*\\)" +
|
||||
")" +
|
||||
"\\s*" +
|
||||
"[|]?\\s*" +
|
||||
")+" +
|
||||
")\\s*,\\s*" +
|
||||
|
||||
// --- autogenerated fields ---
|
||||
|
||||
// [06] OpcodeDataIndex.
|
||||
"([^\\)]+)" +
|
||||
"\\s*,\\s*" +
|
||||
|
||||
// [07] NameDataIndex.
|
||||
"([^\\)]+)" +
|
||||
"\\s*\\)"
|
||||
, "g");
|
||||
|
||||
var m;
|
||||
while ((m = re.exec(stringData)) !== null) {
|
||||
var enum_ = m[1];
|
||||
var name = enum_ === "None" ? "" : enum_.toLowerCase();
|
||||
var encoding = m[2].trim();
|
||||
var opcodeData = m[3].trim();
|
||||
var rwInfo = m[4].trim();
|
||||
var instFlags = m[5].trim();
|
||||
|
||||
var displayName = name;
|
||||
if (name.endsWith("_v"))
|
||||
displayName = name.substring(0, name.length - 2);
|
||||
|
||||
// We have just matched #define INST()
|
||||
if (name == "id" &&
|
||||
encoding === "encoding" &&
|
||||
encodingDataIndex === "encodingDataIndex")
|
||||
continue;
|
||||
|
||||
this.addInst({
|
||||
id : 0, // Instruction id (numeric value).
|
||||
name : name, // Instruction name.
|
||||
displayName : displayName, // Instruction name to display.
|
||||
enum : enum_, // Instruction enum without `kId` prefix.
|
||||
encoding : encoding, // Opcode encoding.
|
||||
opcodeData : opcodeData, // Opcode data.
|
||||
opcodeDataIndex : -1, // Opcode data index.
|
||||
rwInfo : rwInfo, // RW info.
|
||||
flags : instFlags, // Instruction flags.
|
||||
|
||||
nameIndex : -1 // Index to InstDB::_nameData.
|
||||
});
|
||||
}
|
||||
|
||||
if (this.insts.length === 0 || this.insts.length !== StringUtils.countOf(stringData, "INST("))
|
||||
FAIL("ARMTableGen.parse(): Invalid parsing regexp (no data parsed)");
|
||||
|
||||
console.log("Number of Instructions: " + this.insts.length);
|
||||
}
|
||||
|
||||
merge() {
|
||||
var s = StringUtils.format(this.insts, "", true, function(inst) {
|
||||
return "INST(" +
|
||||
String(inst.enum ).padEnd(17) + ", " +
|
||||
String(inst.encoding ).padEnd(19) + ", " +
|
||||
String(inst.opcodeData ).padEnd(86) + ", " +
|
||||
String(inst.rwInfo ).padEnd(10) + ", " +
|
||||
String(inst.flags ).padEnd(26) + ", " +
|
||||
String(inst.opcodeDataIndex ).padEnd( 3) + ", " +
|
||||
String(inst.nameIndex ).padEnd( 4) + ")";
|
||||
}) + "\n";
|
||||
return this.inject("InstInfo", s, this.insts.length * 4);
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// [Hooks]
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
onBeforeRun() {
|
||||
this.load([
|
||||
"src/asmjit/arm/a64emitter.h",
|
||||
"src/asmjit/arm/a64globals.h",
|
||||
"src/asmjit/arm/a64instdb.cpp",
|
||||
"src/asmjit/arm/a64instdb.h",
|
||||
"src/asmjit/arm/a64instdb_p.h"
|
||||
]);
|
||||
this.parse();
|
||||
}
|
||||
|
||||
onAfterRun() {
|
||||
this.merge();
|
||||
this.save();
|
||||
this.dumpTableSizes();
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// [tablegen.arm.IdEnum]
|
||||
// ============================================================================
|
||||
|
||||
class IdEnum extends core.IdEnum {
|
||||
constructor() {
|
||||
super("IdEnum");
|
||||
}
|
||||
|
||||
comment(inst) {
|
||||
let name = inst.name;
|
||||
let ext = [];
|
||||
|
||||
if (name.endsWith("_v")) {
|
||||
name = name.substr(0, name.length - 2);
|
||||
ext.push("ASIMD");
|
||||
}
|
||||
|
||||
let exts = "";
|
||||
if (ext.length)
|
||||
exts = " {" + ext.join("&") + "}";
|
||||
|
||||
return `Instruction '${name}'${exts}.`;
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// [tablegen.arm.NameTable]
|
||||
// ============================================================================
|
||||
|
||||
class NameTable extends core.NameTable {
|
||||
constructor() {
|
||||
super("NameTable");
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// [tablegen.arm.EncodingTable]
|
||||
// ============================================================================
|
||||
|
||||
class EncodingTable extends core.Task {
|
||||
constructor() {
|
||||
super("EncodingTable");
|
||||
}
|
||||
|
||||
run() {
|
||||
const insts = this.ctx.insts;
|
||||
const map = {};
|
||||
|
||||
for (var i = 0; i < insts.length; i++) {
|
||||
const inst = insts[i];
|
||||
|
||||
const encoding = inst.encoding;
|
||||
const opcodeData = inst.opcodeData.replace(/\(/g, "{ ").replace(/\)/g, " }");
|
||||
|
||||
if (!hasOwn.call(map, encoding))
|
||||
map[encoding] = [];
|
||||
|
||||
if (inst.opcodeData === "(_)") {
|
||||
inst.opcodeDataIndex = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
const opcodeTable = map[encoding];
|
||||
const opcodeDataIndex = opcodeTable.length;
|
||||
|
||||
opcodeTable.push({ name: inst.name, data: opcodeData });
|
||||
inst.opcodeDataIndex = opcodeDataIndex;
|
||||
}
|
||||
|
||||
const keys = Object.keys(map);
|
||||
keys.sort();
|
||||
|
||||
var tableSource = "";
|
||||
var tableHeader = "";
|
||||
var encodingIds = "";
|
||||
|
||||
encodingIds += "enum EncodingId : uint32_t {\n"
|
||||
encodingIds += " kEncodingNone = 0";
|
||||
|
||||
keys.forEach((dataClass) => {
|
||||
const dataName = dataClass[0].toLowerCase() + dataClass.substr(1);
|
||||
const opcodeTable = map[dataClass];
|
||||
const count = opcodeTable.length;
|
||||
|
||||
if (dataClass !== "None") {
|
||||
encodingIds += ",\n"
|
||||
encodingIds += " kEncoding" + dataClass;
|
||||
}
|
||||
|
||||
if (count) {
|
||||
tableHeader += `extern const ${dataClass} ${dataName}[${count}];\n`;
|
||||
|
||||
if (tableSource)
|
||||
tableSource += "\n";
|
||||
|
||||
tableSource += `const ${dataClass} ${dataName}[${count}] = {\n`;
|
||||
for (var i = 0; i < count; i++) {
|
||||
tableSource += ` ${opcodeTable[i].data}` + (i == count - 1 ? " " : ",") + " // " + opcodeTable[i].name + "\n";
|
||||
}
|
||||
tableSource += `};\n`;
|
||||
}
|
||||
});
|
||||
|
||||
encodingIds += "\n};\n";
|
||||
|
||||
return this.ctx.inject("EncodingId" , StringUtils.disclaimer(encodingIds), 0) +
|
||||
this.ctx.inject("EncodingDataForward", StringUtils.disclaimer(tableHeader), 0) +
|
||||
this.ctx.inject("EncodingData" , StringUtils.disclaimer(tableSource), 0);
|
||||
}
|
||||
}
|
||||
// ============================================================================
|
||||
// [tablegen.arm.CommonTable]
|
||||
// ============================================================================
|
||||
|
||||
class CommonTable extends core.Task {
|
||||
constructor() {
|
||||
super("CommonTable", [
|
||||
"IdEnum",
|
||||
"NameTable"
|
||||
]);
|
||||
}
|
||||
|
||||
run() {
|
||||
//const table = new IndexedArray();
|
||||
|
||||
//for (var i = 0; i < insts.length; i++) {
|
||||
// const inst = insts[i];
|
||||
// const item = "{ " + "0" + "}";
|
||||
// inst.commonIndex = table.addIndexed(item);
|
||||
//}
|
||||
|
||||
// return this.ctx.inject("InstInfo", StringUtils.disclaimer(s), 0);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// [Main]
|
||||
// ============================================================================
|
||||
|
||||
new ArmTableGen()
|
||||
.addTask(new IdEnum())
|
||||
.addTask(new NameTable())
|
||||
.addTask(new EncodingTable())
|
||||
.addTask(new CommonTable())
|
||||
.run();
|
||||
3
tools/tablegen-arm.sh
Executable file
3
tools/tablegen-arm.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
node ./tablegen-arm.js
|
||||
@@ -524,6 +524,7 @@ class X86TableGen extends core.TableGen {
|
||||
this.addInst({
|
||||
id : 0, // Instruction id (numeric value).
|
||||
name : name, // Instruction name.
|
||||
displayName : name, // Instruction name to display.
|
||||
enum : enum_, // Instruction enum without `kId` prefix.
|
||||
dbInsts : dbInsts, // All dbInsts returned from asmdb query.
|
||||
encoding : encoding, // Instruction encoding.
|
||||
|
||||
3
tools/tablegen-x86.sh
Executable file
3
tools/tablegen-x86.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env sh
|
||||
set -e
|
||||
node ./tablegen-x86.js $@
|
||||
@@ -160,6 +160,21 @@ exports.Lang = Lang;
|
||||
class StringUtils {
|
||||
static asString(x) { return String(x); }
|
||||
|
||||
static countOf(s, pattern) {
|
||||
if (!pattern)
|
||||
FAIL(`Pattern cannot be empty`);
|
||||
|
||||
var n = 0;
|
||||
var pos = 0;
|
||||
|
||||
while ((pos = s.indexOf(pattern, pos)) >= 0) {
|
||||
n++;
|
||||
pos += pattern.length;
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static capitalize(s) {
|
||||
s = String(s);
|
||||
return !s ? s : s[0].toUpperCase() + s.substr(1);
|
||||
@@ -258,15 +273,28 @@ class StringUtils {
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
static extract(s, start, end) {
|
||||
var iStart = s.indexOf(start);
|
||||
var iEnd = s.indexOf(end);
|
||||
|
||||
if (iStart === -1)
|
||||
FAIL(`StringUtils.extract(): Couldn't locate start mark '${start}'`);
|
||||
|
||||
if (iEnd === -1)
|
||||
FAIL(`StringUtils.extract(): Couldn't locate end mark '${end}'`);
|
||||
|
||||
return s.substring(iStart + start.length, iEnd).trim();
|
||||
}
|
||||
|
||||
static inject(s, start, end, code) {
|
||||
var iStart = s.indexOf(start);
|
||||
var iEnd = s.indexOf(end);
|
||||
|
||||
if (iStart === -1)
|
||||
FAIL(`Utils.inject(): Couldn't locate start mark '${start}'`);
|
||||
FAIL(`StringUtils.inject(): Couldn't locate start mark '${start}'`);
|
||||
|
||||
if (iEnd === -1)
|
||||
FAIL(`Utils.inject(): Couldn't locate end mark '${end}'`);
|
||||
FAIL(`StringUtils.inject(): Couldn't locate end mark '${end}'`);
|
||||
|
||||
var nIndent = 0;
|
||||
while (iStart > 0 && s[iStart-1] === " ") {
|
||||
@@ -875,14 +903,14 @@ class NameTable extends Task {
|
||||
var maxLength = 0;
|
||||
for (var i = 0; i < insts.length; i++) {
|
||||
const inst = insts[i];
|
||||
instNames.add(inst.name);
|
||||
maxLength = Math.max(maxLength, inst.name.length);
|
||||
instNames.add(inst.displayName);
|
||||
maxLength = Math.max(maxLength, inst.displayName.length);
|
||||
}
|
||||
instNames.index();
|
||||
|
||||
for (var i = 0; i < insts.length; i++) {
|
||||
const inst = insts[i];
|
||||
const name = inst.name;
|
||||
const name = inst.displayName;
|
||||
const nameIndex = instNames.getIndex(name);
|
||||
|
||||
const index = name.charCodeAt(0) - 'a'.charCodeAt(0);
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env sh
|
||||
set -e
|
||||
node ./tablegen-arm.js $@
|
||||
node ./tablegen-x86.js $@
|
||||
|
||||
Reference in New Issue
Block a user