mirror of
https://github.com/asmjit/asmjit.git
synced 2025-12-17 04:24:37 +03:00
[Bug] Fixed invalid code generation related to lookup tables (Compiler)
This commit is contained in:
@@ -46,6 +46,10 @@ class BaseBuilder;
|
||||
class BaseNode;
|
||||
#endif
|
||||
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
class BaseCompiler;
|
||||
#endif
|
||||
|
||||
// ============================================================================
|
||||
// [asmjit::FormatOptions]
|
||||
// ============================================================================
|
||||
|
||||
@@ -232,14 +232,17 @@ public:
|
||||
if (ASMJIT_UNLIKELY(!targetBlock))
|
||||
return DebugUtils::errored(kErrorOutOfMemory);
|
||||
|
||||
targetBlock->makeTargetable();
|
||||
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
|
||||
}
|
||||
else {
|
||||
// Not a label - could be jump with reg/mem operand, which
|
||||
// means that it can go anywhere. Such jumps must either be
|
||||
// annotated so the CFG can be properly constructed, otherwise
|
||||
// we assume the worst case - can jump to every basic block.
|
||||
// we assume the worst case - can jump to any basic block.
|
||||
JumpAnnotation* jumpAnnotation = nullptr;
|
||||
_curBlock->addFlags(RABlock::kFlagHasJumpTable);
|
||||
|
||||
if (inst->type() == BaseNode::kNodeJump)
|
||||
jumpAnnotation = inst->as<JumpNode>()->annotation();
|
||||
|
||||
@@ -256,6 +259,7 @@ public:
|
||||
// Prevents adding basic-block successors multiple times.
|
||||
if (!targetBlock->hasTimestamp(timestamp)) {
|
||||
targetBlock->setTimestamp(timestamp);
|
||||
targetBlock->makeTargetable();
|
||||
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
|
||||
}
|
||||
}
|
||||
@@ -331,7 +335,7 @@ public:
|
||||
if (!_curBlock) {
|
||||
// If the current code is unreachable the label makes it reachable
|
||||
// again. We may remove the whole block in the future if it's not
|
||||
// referenced.
|
||||
// referenced though.
|
||||
_curBlock = node->passData<RABlock>();
|
||||
|
||||
if (_curBlock) {
|
||||
@@ -341,13 +345,14 @@ public:
|
||||
break;
|
||||
}
|
||||
else {
|
||||
// No block assigned, to create a new one, and assign it.
|
||||
// No block assigned - create a new one and assign it.
|
||||
_curBlock = _pass->newBlock(node);
|
||||
if (ASMJIT_UNLIKELY(!_curBlock))
|
||||
return DebugUtils::errored(kErrorOutOfMemory);
|
||||
node->setPassData<RABlock>(_curBlock);
|
||||
}
|
||||
|
||||
_curBlock->makeTargetable();
|
||||
_hasCode = false;
|
||||
_blockRegStats.reset();
|
||||
ASMJIT_PROPAGATE(_pass->addBlock(_curBlock));
|
||||
@@ -355,10 +360,13 @@ public:
|
||||
else {
|
||||
if (node->hasPassData()) {
|
||||
RABlock* consecutive = node->passData<RABlock>();
|
||||
consecutive->makeTargetable();
|
||||
|
||||
if (_curBlock == consecutive) {
|
||||
// The label currently processed is part of the current block. This
|
||||
// is only possible for multiple labels that are right next to each
|
||||
// other, or are separated by non-code nodes like directives and comments.
|
||||
// other or labels that are separated by non-code nodes like directives
|
||||
// and comments.
|
||||
if (ASMJIT_UNLIKELY(_hasCode))
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
@@ -392,6 +400,7 @@ public:
|
||||
RABlock* consecutive = _pass->newBlock(node);
|
||||
if (ASMJIT_UNLIKELY(!consecutive))
|
||||
return DebugUtils::errored(kErrorOutOfMemory);
|
||||
consecutive->makeTargetable();
|
||||
|
||||
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
|
||||
ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
|
||||
@@ -479,6 +488,8 @@ public:
|
||||
|
||||
if (ASMJIT_UNLIKELY(!_retBlock))
|
||||
return DebugUtils::errored(kErrorOutOfMemory);
|
||||
|
||||
_retBlock->makeTargetable();
|
||||
ASMJIT_PROPAGATE(_pass->addExitBlock(_retBlock));
|
||||
|
||||
if (node != func) {
|
||||
@@ -521,13 +532,13 @@ public:
|
||||
size_t blockCount = blocks.size();
|
||||
|
||||
// NOTE: Iterate from `1` as the first block is the entry block, we don't
|
||||
// allow the entry to be a successor of block that ends with unknown jump.
|
||||
// allow the entry to be a successor of any block.
|
||||
RABlock* consecutive = block->consecutive();
|
||||
for (size_t i = 1; i < blockCount; i++) {
|
||||
RABlock* successor = blocks[i];
|
||||
if (successor == consecutive)
|
||||
RABlock* candidate = blocks[i];
|
||||
if (candidate == consecutive || !candidate->isTargetable())
|
||||
continue;
|
||||
block->appendSuccessor(successor);
|
||||
block->appendSuccessor(candidate);
|
||||
}
|
||||
|
||||
return shareAssignmentAcrossSuccessors(block);
|
||||
|
||||
@@ -370,7 +370,7 @@ Cleared:
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
Error RALocalAllocator::spillGpScratchRegsBeforeEntry(uint32_t scratchRegs) noexcept {
|
||||
Error RALocalAllocator::spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept {
|
||||
uint32_t group = BaseReg::kGroupGp;
|
||||
Support::BitWordIterator<uint32_t> it(scratchRegs);
|
||||
|
||||
@@ -931,9 +931,6 @@ Error RALocalAllocator::allocJumpTable(InstNode* node, const RABlocks& targets,
|
||||
if (targets.empty())
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
|
||||
if (targets.size() == 1)
|
||||
return allocBranch(node, targets[0], cont);
|
||||
|
||||
// The cursor must point to the previous instruction for a possible instruction insertion.
|
||||
_cc->_setCursor(node->prev());
|
||||
|
||||
|
||||
@@ -155,10 +155,10 @@ public:
|
||||
bool tryMode) noexcept;
|
||||
|
||||
inline Error spillRegsBeforeEntry(RABlock* block) noexcept {
|
||||
return spillGpScratchRegsBeforeEntry(block->entryScratchGpRegs());
|
||||
return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs());
|
||||
}
|
||||
|
||||
Error spillGpScratchRegsBeforeEntry(uint32_t scratchRegs) noexcept;
|
||||
Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept;
|
||||
|
||||
//! \}
|
||||
|
||||
|
||||
@@ -382,9 +382,20 @@ Error RAPass::initSharedAssignments(const ZoneVector<uint32_t>& sharedAssignment
|
||||
// the assignment itself. It will then be used instead of RABlock's own scratch
|
||||
// regs mask, as shared assignments have precedence.
|
||||
for (RABlock* block : _blocks) {
|
||||
if (block->hasJumpTable()) {
|
||||
const RABlocks& successors = block->successors();
|
||||
if (!successors.empty()) {
|
||||
RABlock* firstSuccessor = successors[0];
|
||||
// NOTE: Shared assignments connect all possible successors so we only
|
||||
// need the first to propagate exit scratch gp registers.
|
||||
ASMJIT_ASSERT(firstSuccessor->hasSharedAssignmentId());
|
||||
RASharedAssignment& sa = _sharedAssignments[firstSuccessor->sharedAssignmentId()];
|
||||
sa.addEntryScratchGpRegs(block->exitScratchGpRegs());
|
||||
}
|
||||
}
|
||||
if (block->hasSharedAssignmentId()) {
|
||||
RASharedAssignment& sa = _sharedAssignments[block->sharedAssignmentId()];
|
||||
sa.addScratchGpRegs(block->_entryScratchGpRegs);
|
||||
sa.addEntryScratchGpRegs(block->_entryScratchGpRegs);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -60,19 +60,23 @@ public:
|
||||
kFlagIsConstructed = 0x00000001u,
|
||||
//! Block is reachable (set by `buildViews()`).
|
||||
kFlagIsReachable = 0x00000002u,
|
||||
//! Block is a target (has an associated label or multiple labels).
|
||||
kFlagIsTargetable = 0x00000004u,
|
||||
//! Block has been allocated.
|
||||
kFlagIsAllocated = 0x00000004u,
|
||||
kFlagIsAllocated = 0x00000008u,
|
||||
//! Block is a function-exit.
|
||||
kFlagIsFuncExit = 0x00000008u,
|
||||
kFlagIsFuncExit = 0x00000010u,
|
||||
|
||||
//! Block has a terminator (jump, conditional jump, ret).
|
||||
kFlagHasTerminator = 0x00000010u,
|
||||
kFlagHasTerminator = 0x00000100u,
|
||||
//! Block naturally flows to the next block.
|
||||
kFlagHasConsecutive = 0x00000020u,
|
||||
kFlagHasConsecutive = 0x00000200u,
|
||||
//! Block has a jump to a jump-table at the end.
|
||||
kFlagHasJumpTable = 0x00000400u,
|
||||
//! Block contains fixed registers (precolored).
|
||||
kFlagHasFixedRegs = 0x00000040u,
|
||||
kFlagHasFixedRegs = 0x00000800u,
|
||||
//! Block contains function calls.
|
||||
kFlagHasFuncCalls = 0x00000080u
|
||||
kFlagHasFuncCalls = 0x00001000u
|
||||
};
|
||||
|
||||
//! Register allocator pass.
|
||||
@@ -114,11 +118,11 @@ public:
|
||||
RABlocks _successors;
|
||||
|
||||
enum LiveType : uint32_t {
|
||||
kLiveIn = 0,
|
||||
kLiveOut = 1,
|
||||
kLiveGen = 2,
|
||||
kLiveKill = 3,
|
||||
kLiveCount = 4
|
||||
kLiveIn = 0,
|
||||
kLiveOut = 1,
|
||||
kLiveGen = 2,
|
||||
kLiveKill = 3,
|
||||
kLiveCount = 4
|
||||
};
|
||||
|
||||
//! Liveness in/out/use/kill.
|
||||
@@ -180,6 +184,7 @@ public:
|
||||
|
||||
inline bool isConstructed() const noexcept { return hasFlag(kFlagIsConstructed); }
|
||||
inline bool isReachable() const noexcept { return hasFlag(kFlagIsReachable); }
|
||||
inline bool isTargetable() const noexcept { return hasFlag(kFlagIsTargetable); }
|
||||
inline bool isAllocated() const noexcept { return hasFlag(kFlagIsAllocated); }
|
||||
inline bool isFuncExit() const noexcept { return hasFlag(kFlagIsFuncExit); }
|
||||
|
||||
@@ -189,12 +194,14 @@ public:
|
||||
}
|
||||
|
||||
inline void makeReachable() noexcept { _flags |= kFlagIsReachable; }
|
||||
inline void makeTargetable() noexcept { _flags |= kFlagIsTargetable; }
|
||||
inline void makeAllocated() noexcept { _flags |= kFlagIsAllocated; }
|
||||
|
||||
inline const RARegsStats& regsStats() const noexcept { return _regsStats; }
|
||||
|
||||
inline bool hasTerminator() const noexcept { return hasFlag(kFlagHasTerminator); }
|
||||
inline bool hasConsecutive() const noexcept { return hasFlag(kFlagHasConsecutive); }
|
||||
inline bool hasJumpTable() const noexcept { return hasFlag(kFlagHasJumpTable); }
|
||||
|
||||
inline bool hasPredecessors() const noexcept { return !_predecessors.empty(); }
|
||||
inline bool hasSuccessors() const noexcept { return !_successors.empty(); }
|
||||
@@ -219,6 +226,7 @@ public:
|
||||
inline uint32_t entryScratchGpRegs() const noexcept;
|
||||
inline uint32_t exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; }
|
||||
|
||||
inline void addEntryScratchGpRegs(uint32_t regMask) noexcept { _entryScratchGpRegs |= regMask; }
|
||||
inline void addExitScratchGpRegs(uint32_t regMask) noexcept { _exitScratchGpRegs |= regMask; }
|
||||
|
||||
inline bool hasSharedAssignmentId() const noexcept { return _sharedAssignmentId != Globals::kInvalidId; }
|
||||
@@ -629,7 +637,7 @@ public:
|
||||
_workToPhysMap(nullptr) {}
|
||||
|
||||
inline uint32_t entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; }
|
||||
inline void addScratchGpRegs(uint32_t mask) noexcept { _entryScratchGpRegs |= mask; }
|
||||
inline void addEntryScratchGpRegs(uint32_t mask) noexcept { _entryScratchGpRegs |= mask; }
|
||||
|
||||
inline const ZoneBitVector& liveIn() const noexcept { return _liveIn; }
|
||||
|
||||
|
||||
Reference in New Issue
Block a user