From 9b48ec0f1e66ec85634b8ef801804e56619f01cb Mon Sep 17 00:00:00 2001 From: kobalicek Date: Sun, 1 Mar 2015 17:08:38 +0100 Subject: [PATCH] Fixed tracing. Added possibility to unfollow jump (#7). Added possibility to return from multiple locations in code (related to unfollow). Added more tests. Moved some functions from X86Context to Context. --- src/app/test/asmjit_test_x86.cpp | 165 ++++++++++++++++++++++++++++++- src/asmjit/base/assembler.h | 7 +- src/asmjit/base/compiler.cpp | 28 +++--- src/asmjit/base/context.cpp | 36 ++++--- src/asmjit/base/context_p.h | 45 ++++++++- src/asmjit/build.h | 2 +- src/asmjit/host.h | 6 +- src/asmjit/x86/x86compiler.cpp | 18 +++- src/asmjit/x86/x86compiler.h | 6 ++ src/asmjit/x86/x86context.cpp | 94 ++++++++++-------- src/asmjit/x86/x86inst.h | 14 +-- src/asmjit/x86/x86operand.h | 2 +- 12 files changed, 334 insertions(+), 89 deletions(-) diff --git a/src/app/test/asmjit_test_x86.cpp b/src/app/test/asmjit_test_x86.cpp index 5a65263..04caa45 100644 --- a/src/app/test/asmjit_test_x86.cpp +++ b/src/app/test/asmjit_test_x86.cpp @@ -14,6 +14,7 @@ #include #include #include +#include using namespace asmjit; @@ -2601,14 +2602,14 @@ struct X86Test_CallMisc3 : public X86Test { }; // ============================================================================ -// [X86Test_ConstPoolBase] +// [X86Test_MiscConstPool] // ============================================================================ -struct X86Test_ConstPoolBase : public X86Test { - X86Test_ConstPoolBase() : X86Test("[ConstPool] Base") {} +struct X86Test_MiscConstPool : public X86Test { + X86Test_MiscConstPool() : X86Test("[Misc] ConstPool") {} static void add(PodVector& tests) { - tests.append(new X86Test_ConstPoolBase()); + tests.append(new X86Test_MiscConstPool()); } virtual void compile(X86Compiler& c) { @@ -2642,6 +2643,156 @@ struct X86Test_ConstPoolBase : public X86Test { } }; +// ============================================================================ +// [X86Test_MiscMultiRet] +// ============================================================================ + +struct X86Test_MiscMultiRet : public X86Test { + X86Test_MiscMultiRet() : X86Test("[Misc] MultiRet") {} + + static void add(PodVector& tests) { + tests.append(new X86Test_MiscMultiRet()); + } + + virtual void compile(X86Compiler& c) { + c.addFunc(kFuncConvHost, FuncBuilder3()); + + X86GpVar op(c, kVarTypeInt32, "op"); + X86GpVar a(c, kVarTypeInt32, "a"); + X86GpVar b(c, kVarTypeInt32, "b"); + + Label L_Zero(c); + Label L_Add(c); + Label L_Sub(c); + Label L_Mul(c); + Label L_Div(c); + + c.setArg(0, op); + c.setArg(1, a); + c.setArg(2, b); + + c.cmp(op, 0); + c.jz(L_Add); + + c.cmp(op, 1); + c.jz(L_Sub); + + c.cmp(op, 2); + c.jz(L_Mul); + + c.cmp(op, 3); + c.jz(L_Div); + + c.bind(L_Zero); + c.xor_(a, a); + c.ret(a); + + c.bind(L_Add); + c.add(a, b); + c.ret(a); + + c.bind(L_Sub); + c.sub(a, b); + c.ret(a); + + c.bind(L_Mul); + c.imul(a, b); + c.ret(a); + + c.bind(L_Div); + c.cmp(b, 0); + c.jz(L_Zero); + + X86GpVar zero(c, kVarTypeInt32, "zero"); + c.xor_(zero, zero); + c.idiv(zero, a, b); + c.ret(a); + + c.endFunc(); + } + + virtual bool run(void* _func, StringBuilder& result, StringBuilder& expect) { + typedef int (*Func)(int, int, int); + + Func func = asmjit_cast(_func); + + int a = 44; + int b = 3; + + int r0 = func(0, a, b); + int r1 = func(1, a, b); + int r2 = func(2, a, b); + int r3 = func(3, a, b); + int e0 = a + b; + int e1 = a - b; + int e2 = a * b; + int e3 = a / b; + + result.setFormat("ret={%d %d %d %d}", r0, r1, r2, r3); + expect.setFormat("ret={%d %d %d %d}", e0, e1, e2, e3); + + return result.eq(expect); +} +}; + +// ============================================================================ +// [X86Test_MiscUnfollow] +// ============================================================================ + +// Global (I didn't find better way to really test this). +static jmp_buf globalJmpBuf; + +struct X86Test_MiscUnfollow : public X86Test { + X86Test_MiscUnfollow() : X86Test("[Misc] Unfollow") {} + + static void add(PodVector& tests) { + tests.append(new X86Test_MiscUnfollow()); + } + + virtual void compile(X86Compiler& c) { + c.addFunc(kFuncConvHost, FuncBuilder2()); + + X86GpVar a(c, kVarTypeInt32); + X86GpVar b(c, kVarTypeIntPtr); + + Label tramp(c); + + c.setArg(0, a); + c.setArg(1, b); + + c.cmp(a, 0); + c.jz(tramp); + + c.ret(a); + + c.bind(tramp); + c.unfollow().jmp(b); + + c.endFunc(); + } + + virtual bool run(void* _func, StringBuilder& result, StringBuilder& expect) { + typedef int (*Func)(int, void*); + + Func func = asmjit_cast(_func); + + int resultRet = 0; + int expectRet = 1; + + if (!setjmp(globalJmpBuf)) + resultRet = func(0, (void*)handler); + else + resultRet = 1; + + result.setFormat("ret={%d}", resultRet); + expect.setFormat("ret={%d}", expectRet); + + return resultRet == expectRet; + } + + static void handler() { longjmp(globalJmpBuf, 1); } +}; + // ============================================================================ // [X86TestSuite] // ============================================================================ @@ -2730,7 +2881,11 @@ X86TestSuite::X86TestSuite() : ADD_TEST(X86Test_CallMisc1); ADD_TEST(X86Test_CallMisc2); ADD_TEST(X86Test_CallMisc3); - ADD_TEST(X86Test_ConstPoolBase); + + // Misc. + ADD_TEST(X86Test_MiscConstPool); + ADD_TEST(X86Test_MiscUnfollow); + ADD_TEST(X86Test_MiscMultiRet); } X86TestSuite::~X86TestSuite() { diff --git a/src/asmjit/base/assembler.h b/src/asmjit/base/assembler.h index 02c4cc3..92874f0 100644 --- a/src/asmjit/base/assembler.h +++ b/src/asmjit/base/assembler.h @@ -68,7 +68,12 @@ ASMJIT_ENUM(InstOptions) { kInstOptionTaken = 0x00000004, //! Condition is unlikely to be taken. - kInstOptionNotTaken = 0x00000008 + kInstOptionNotTaken = 0x00000008, + + //! Don't follow the jump (Compiler-only). + //! + //! Prevents following the jump during compilation. + kInstOptionUnfollow = 0x00000010 }; // ============================================================================ diff --git a/src/asmjit/base/compiler.cpp b/src/asmjit/base/compiler.cpp index 0ff905c..c19c112 100644 --- a/src/asmjit/base/compiler.cpp +++ b/src/asmjit/base/compiler.cpp @@ -192,24 +192,26 @@ static ASMJIT_INLINE void BaseCompiler_nodeRemoved(Compiler* self, Node* node_) JumpNode* node = static_cast(node_); TargetNode* target = node->getTarget(); - // Disconnect. - JumpNode** pPrev = &target->_from; - for (;;) { - ASMJIT_ASSERT(*pPrev != NULL); - JumpNode* current = *pPrev; + if (target != NULL) { + // Disconnect. + JumpNode** pPrev = &target->_from; + for (;;) { + ASMJIT_ASSERT(*pPrev != NULL); + JumpNode* current = *pPrev; - if (current == NULL) - break; + if (current == NULL) + break; - if (current == node) { - *pPrev = node->_jumpNext; - break; + if (current == node) { + *pPrev = node->_jumpNext; + break; + } + + pPrev = ¤t->_jumpNext; } - pPrev = ¤t->_jumpNext; + target->subNumRefs(); } - - target->subNumRefs(); } } diff --git a/src/asmjit/base/context.cpp b/src/asmjit/base/context.cpp index 702b69b..89bad1d 100644 --- a/src/asmjit/base/context.cpp +++ b/src/asmjit/base/context.cpp @@ -48,6 +48,7 @@ void Context::reset(bool releaseMemory) { _stop = NULL; _unreachableList.reset(); + _returningList.reset(); _jccList.reset(); _contextVd.reset(releaseMemory); @@ -277,7 +278,7 @@ Error Context::removeUnreachableCode() { while (link != NULL) { Node* node = link->getValue(); - if (node != NULL && node->getPrev() != NULL) { + if (node != NULL && node->getPrev() != NULL && node != stop) { // Locate all unreachable nodes. Node* first = node; do { @@ -292,8 +293,10 @@ Error Context::removeUnreachableCode() { node = first; do { Node* next = node->getNext(); - if (!node->isInformative() && node->getType() != kNodeTypeAlign) + if (!node->isInformative() && node->getType() != kNodeTypeAlign) { + ASMJIT_TLOG("[%05d] Unreachable\n", node->getFlowId()); compiler->removeNode(node); + } node = next; } while (node != end); } @@ -321,23 +324,27 @@ struct LivenessTarget { }; Error Context::livenessAnalysis() { - FuncNode* func = getFunc(); - JumpNode* from = NULL; - - Node* node = func->getEnd(); uint32_t bLen = static_cast( ((_contextVd.getLength() + VarBits::kEntityBits - 1) / VarBits::kEntityBits)); - LivenessTarget* ltCur = NULL; - LivenessTarget* ltUnused = NULL; - - size_t varMapToVaListOffset = _varMapToVaListOffset; - // No variables. if (bLen == 0) return kErrorOk; + FuncNode* func = getFunc(); + JumpNode* from = NULL; + + LivenessTarget* ltCur = NULL; + LivenessTarget* ltUnused = NULL; + + PodList::Link* retPtr = _returningList.getFirst(); + ASMJIT_ASSERT(retPtr != NULL); + + Node* node = retPtr->getValue(); + + size_t varMapToVaListOffset = _varMapToVaListOffset; VarBits* bCur = newBits(bLen); + if (bCur == NULL) goto _NoMemory; @@ -491,6 +498,13 @@ _OnDone: goto _OnJumpNext; } + + retPtr = retPtr->getNext(); + if (retPtr != NULL) { + node = retPtr->getValue(); + goto _OnVisit; + } + return kErrorOk; _NoMemory: diff --git a/src/asmjit/base/context_p.h b/src/asmjit/base/context_p.h index bd23501..5c0ed45 100644 --- a/src/asmjit/base/context_p.h +++ b/src/asmjit/base/context_p.h @@ -160,12 +160,53 @@ struct Context { virtual Error fetch() = 0; // -------------------------------------------------------------------------- - // [RemoveUnreachableCode] + // [Unreachable Code] // -------------------------------------------------------------------------- + //! Add unreachable-flow data to the unreachable flow list. + ASMJIT_INLINE Error addUnreachableNode(Node* node) { + PodList::Link* link = _baseZone.allocT::Link>(); + if (link == NULL) + return setError(kErrorNoHeapMemory); + + link->setValue(node); + _unreachableList.append(link); + + return kErrorOk; + } + //! Remove unreachable code. virtual Error removeUnreachableCode(); + // -------------------------------------------------------------------------- + // [Code-Flow] + // -------------------------------------------------------------------------- + + //! Add returning node (i.e. node that returns and where liveness analysis + //! should start). + ASMJIT_INLINE Error addReturningNode(Node* node) { + PodList::Link* link = _baseZone.allocT::Link>(); + if (link == NULL) + return setError(kErrorNoHeapMemory); + + link->setValue(node); + _returningList.append(link); + + return kErrorOk; + } + + //! Add jump-flow data to the jcc flow list. + ASMJIT_INLINE Error addJccNode(Node* node) { + PodList::Link* link = _baseZone.allocT::Link>(); + if (link == NULL) + return setError(kErrorNoHeapMemory); + + link->setValue(node); + _jccList.append(link); + + return kErrorOk; + } + // -------------------------------------------------------------------------- // [Analyze] // -------------------------------------------------------------------------- @@ -251,6 +292,8 @@ struct Context { //! Unreachable nodes. PodList _unreachableList; + //! Returning nodes. + PodList _returningList; //! Jump nodes. PodList _jccList; diff --git a/src/asmjit/build.h b/src/asmjit/build.h index 5fa8074..e4a1bf6 100644 --- a/src/asmjit/build.h +++ b/src/asmjit/build.h @@ -277,7 +277,7 @@ namespace asmjit { static inline int disabledTrace(...) { return 0; } } # if defined(ASMJIT_TRACE) # define ASMJIT_TSEC(_Section_) _Section_ -# define ASMJIT_TLOG ::printf(__VA_ARGS__) +# define ASMJIT_TLOG ::printf # else # define ASMJIT_TSEC(_Section_) do {} while(0) # define ASMJIT_TLOG 0 && ::asmjit::disabledTrace diff --git a/src/asmjit/host.h b/src/asmjit/host.h index 6031a5c..987f485 100644 --- a/src/asmjit/host.h +++ b/src/asmjit/host.h @@ -5,8 +5,8 @@ // Zlib - See LICENSE.md file in the package. // [Guard] -#ifndef _ASMJIT_ARCH_H -#define _ASMJIT_ARCH_H +#ifndef _ASMJIT_HOST_H +#define _ASMJIT_HOST_H // [Dependencies - Core] #include "base.h" @@ -56,4 +56,4 @@ typedef X86YmmVar YmmVar; #endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 // [Guard] -#endif // _ASMJIT_ARCH_H +#endif // _ASMJIT_HOST_H diff --git a/src/asmjit/x86/x86compiler.cpp b/src/asmjit/x86/x86compiler.cpp index 424188a..8071e09 100644 --- a/src/asmjit/x86/x86compiler.cpp +++ b/src/asmjit/x86/x86compiler.cpp @@ -793,14 +793,24 @@ static ASMJIT_INLINE size_t X86Compiler_getInstSize(uint32_t code) { static InstNode* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) { if (IntUtil::inInterval(code, _kX86InstIdJbegin, _kX86InstIdJend)) { JumpNode* node = new(p) JumpNode(self, code, options, opList, opCount); - TargetNode* jTarget = self->getTargetById(opList[0].getId()); + TargetNode* jTarget = NULL; + + if ((options & kInstOptionUnfollow) == 0) { + if (opList[0].isLabel()) + jTarget = self->getTargetById(opList[0].getId()); + else + options |= kInstOptionUnfollow; + } node->orFlags(code == kX86InstIdJmp ? kNodeFlagIsJmp | kNodeFlagIsTaken : kNodeFlagIsJcc); node->_target = jTarget; - node->_jumpNext = static_cast(jTarget->_from); + node->_jumpNext = NULL; - jTarget->_from = node; - jTarget->addNumRefs(); + if (jTarget) { + node->_jumpNext = static_cast(jTarget->_from); + jTarget->_from = node; + jTarget->addNumRefs(); + } // The 'jmp' is always taken, conditional jump can contain hint, we detect it. if (code == kX86InstIdJmp) diff --git a/src/asmjit/x86/x86compiler.h b/src/asmjit/x86/x86compiler.h index 76b4193..aa2d0ab 100644 --- a/src/asmjit/x86/x86compiler.h +++ b/src/asmjit/x86/x86compiler.h @@ -2448,6 +2448,12 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler { ASMJIT_X86_EMIT_OPTIONS(X86Compiler) + //! Force the compiler to not follow the conditional or unconditional jump. + ASMJIT_INLINE X86Compiler& unfollow() { + _instOptions |= kInstOptionUnfollow; + return *this; + } + // -------------------------------------------------------------------------- // [Members] // -------------------------------------------------------------------------- diff --git a/src/asmjit/x86/x86context.cpp b/src/asmjit/x86/x86context.cpp index cb134ee..dced6ed 100644 --- a/src/asmjit/x86/x86context.cpp +++ b/src/asmjit/x86/x86context.cpp @@ -1895,35 +1895,6 @@ static void X86Context_prepareSingleVarInst(uint32_t instId, VarAttr* va) { // [asmjit::X86Context - Helpers] // ============================================================================ -//! \internal -//! -//! Add unreachable-flow data to the unreachable flow list. -static ASMJIT_INLINE Error X86Context_addUnreachableNode(X86Context* self, Node* node) { - PodList::Link* link = self->_baseZone.allocT::Link>(); - if (link == NULL) - return self->setError(kErrorNoHeapMemory); - - link->setValue(node); - self->_unreachableList.append(link); - - return kErrorOk; -} - -//! \internal -//! -//! Add jump-flow data to the jcc flow list. -static ASMJIT_INLINE Error X86Context_addJccNode(X86Context* self, Node* node) { - PodList::Link* link = self->_baseZone.allocT::Link>(); - - if (link == NULL) - ASMJIT_PROPAGATE_ERROR(self->setError(kErrorNoHeapMemory)); - - link->setValue(node); - self->_jccList.append(link); - - return kErrorOk; -} - //! \internal //! //! Get mask of all registers actually used to pass function arguments. @@ -2181,7 +2152,7 @@ Error X86Context::fetch() { // Function flags. func->clearFuncFlags( - kFuncFlagIsNaked | + kFuncFlagIsNaked | kX86FuncFlagPushPop | kX86FuncFlagEmms | kX86FuncFlagSFence | @@ -2660,8 +2631,6 @@ _NextGroup: // Handle conditional/unconditional jump. if (node->isJmpOrJcc()) { JumpNode* jNode = static_cast(node); - - Node* jNext = jNode->getNext(); TargetNode* jTarget = jNode->getTarget(); // If this jump is unconditional we put next node to unreachable node @@ -2671,13 +2640,23 @@ _NextGroup: // We also advance our node pointer to the target node to simulate // natural flow of the function. if (jNode->isJmp()) { - if (!jNext->isFetched()) - ASMJIT_PROPAGATE_ERROR(X86Context_addUnreachableNode(this, jNext)); + if (!next->isFetched()) + ASMJIT_PROPAGATE_ERROR(addUnreachableNode(next)); + + // Jump not followed. + if (jTarget == NULL) { + ASMJIT_PROPAGATE_ERROR(addReturningNode(jNode)); + goto _NextGroup; + } node_ = jTarget; goto _Do; } else { + // Jump not followed. + if (jTarget == NULL) + break; + if (jTarget->isFetched()) { uint32_t jTargetFlowId = jTarget->getFlowId(); @@ -2688,13 +2667,12 @@ _NextGroup: jNode->orFlags(kNodeFlagIsTaken); } } - else if (jNext->isFetched()) { + else if (next->isFetched()) { node_ = jTarget; goto _Do; } else { - ASMJIT_PROPAGATE_ERROR(X86Context_addJccNode(this, jNode)); - + ASMJIT_PROPAGATE_ERROR(addJccNode(jNode)); node_ = X86Context_getJccFlow(jNode); goto _Do; } @@ -2759,6 +2737,7 @@ _NextGroup: // ---------------------------------------------------------------------- case kNodeTypeEnd: { + ASMJIT_PROPAGATE_ERROR(addReturningNode(node_)); goto _NextGroup; } @@ -2768,8 +2747,9 @@ _NextGroup: case kNodeTypeRet: { RetNode* node = static_cast(node_); - X86FuncDecl* decl = func->getDecl(); + ASMJIT_PROPAGATE_ERROR(addReturningNode(node)); + X86FuncDecl* decl = func->getDecl(); if (decl->hasRet()) { const FuncInOut& ret = decl->getRet(0); uint32_t retClass = x86VarTypeToClass(ret.getVarType()); @@ -2802,7 +2782,10 @@ _NextGroup: } VI_END(node_); } - break; + + if (!next->isFetched()) + ASMJIT_PROPAGATE_ERROR(addUnreachableNode(next)); + goto _NextGroup; } // ---------------------------------------------------------------------- @@ -2946,6 +2929,14 @@ _NextGroup: } while (node_ != stop); _Done: + // Mark exit label and end node as fetched, otherwise they can be removed by + // `removeUnreachableCode()`, which would lead to crash in some later step. + node_ = func->getEnd(); + if (!node_->isFetched()) { + func->getExitNode()->setFlowId(++flowId); + node_->setFlowId(++flowId); + } + ASMJIT_TLOG("[Fetch] === Done ===\n\n"); return kErrorOk; @@ -3771,8 +3762,12 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe break; // Advance on non-conditional jump. - if (node->hasFlag(kNodeFlagIsJmp)) + if (node->hasFlag(kNodeFlagIsJmp)) { node = static_cast(node)->getTarget(); + // Stop on jump that is not followed. + if (node == NULL) + break; + } node = node->getNext(); ASMJIT_ASSERT(node != NULL); @@ -4378,8 +4373,12 @@ ASMJIT_INLINE uint32_t X86CallAlloc::guessAlloc(VarData* vd, uint32_t allocableR break; // Advance on non-conditional jump. - if (node->hasFlag(kNodeFlagIsJmp)) + if (node->hasFlag(kNodeFlagIsJmp)) { node = static_cast(node)->getTarget(); + // Stop on jump that is not followed. + if (node == NULL) + break; + } node = node->getNext(); ASMJIT_ASSERT(node != NULL); @@ -5317,6 +5316,14 @@ _NextGroup: JumpNode* node = static_cast(node_); TargetNode* jTarget = node->getTarget(); + // Target not followed. + if (jTarget == NULL) { + if (node->isJmp()) + goto _NextGroup; + else + break; + } + if (node->isJmp()) { if (jTarget->hasState()) { compiler->_setCursor(node->getPrev()); @@ -5526,7 +5533,10 @@ _NextGroup: // If node is `jmp` we follow it as well. if (node_->isJmp()) { node_ = static_cast(node_)->getTarget(); - goto _Advance; + if (node_ == NULL) + goto _NextGroup; + else + goto _Advance; } // Handle stop nodes. diff --git a/src/asmjit/x86/x86inst.h b/src/asmjit/x86/x86inst.h index fe625ec..0136f2a 100644 --- a/src/asmjit/x86/x86inst.h +++ b/src/asmjit/x86/x86inst.h @@ -1164,12 +1164,6 @@ ASMJIT_ENUM(X86InstId) { //! X86/X64 instruction emit options, mainly for internal purposes. ASMJIT_ENUM(X86InstOptions) { - //! Emit instruction with LOCK prefix. - //! - //! If this option is used and instruction doesn't support LOCK prefix an - //! invalid instruction error is generated. - kX86InstOptionLock = 0x00000010, - //! Force REX prefix (X64). //! //! This option should be used carefully as there are combinations of @@ -1183,11 +1177,17 @@ ASMJIT_ENUM(X86InstOptions) { //! Reserved by `X86Assembler`, do not use! _kX86InstOptionNoRex = 0x00000080, + //! Emit instruction with LOCK prefix. + //! + //! If this option is used and instruction doesn't support LOCK prefix an + //! invalid instruction error is generated. + kX86InstOptionLock = 0x00000100, + //! Force 3-byte VEX prefix even if the instruction is encodable by 2-byte //! VEX prefix (AVX). //! //! Ignored if the instruction is not AVX or `kX86InstOptionEVEX` is used. - kX86InstOptionVex3 = 0x00000100, + kX86InstOptionVex3 = 0x00001000, //! Force 4-byte EVEX prefix even if the instruction is encodable by using //! VEX prefix. Please note that all higher bits from `kX86InstOptionEvex` diff --git a/src/asmjit/x86/x86operand.h b/src/asmjit/x86/x86operand.h index 7d573fd..d0cbcee 100644 --- a/src/asmjit/x86/x86operand.h +++ b/src/asmjit/x86/x86operand.h @@ -282,7 +282,7 @@ struct X86ZmmReg { Operand::VRegOp data; }; //! 16 Xmm/Ymm/Zmm registers. AVX512 instruction set doubles the number of SIMD //! registers (Xmm/Ymm/Zmm) to 32, this mode has to be explicitly enabled to //! take effect as it changes some assumptions. -//! +//! //! `X86RegCount` is also used extensively by `X86Compiler`'s register allocator //! and data structures. Fp registers were omitted as they are never mapped to //! variables, thus, not needed to be managed.