summaryrefslogtreecommitdiff
path: root/erts/emulator/asmjit/core/rapass.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/asmjit/core/rapass.cpp')
-rw-r--r--erts/emulator/asmjit/core/rapass.cpp99
1 files changed, 43 insertions, 56 deletions
diff --git a/erts/emulator/asmjit/core/rapass.cpp b/erts/emulator/asmjit/core/rapass.cpp
index 79709f69d6..29b7dea9f8 100644
--- a/erts/emulator/asmjit/core/rapass.cpp
+++ b/erts/emulator/asmjit/core/rapass.cpp
@@ -114,11 +114,14 @@ Error BaseRAPass::runOnFunction(Zone* zone, Logger* logger, FuncNode* func) {
#ifndef ASMJIT_NO_LOGGING
_logger = logger;
_formatOptions.reset();
- _diagnosticOptions = DiagnosticOptions::kNone;
+ _diagnosticOptions = _cb->diagnosticOptions();
if (logger) {
_formatOptions = logger->options();
- _diagnosticOptions = _cb->diagnosticOptions();
+ }
+ else {
+ _diagnosticOptions &= ~(DiagnosticOptions::kRADebugCFG |
+ DiagnosticOptions::kRADebugUnreachable);
}
#else
DebugUtils::unused(logger);
@@ -328,9 +331,14 @@ Error BaseRAPass::initSharedAssignments(const ZoneVector<uint32_t>& sharedAssign
RABlock* firstSuccessor = successors[0];
// NOTE: Shared assignments connect all possible successors so we only need the first to propagate exit scratch
// GP registers.
- ASMJIT_ASSERT(firstSuccessor->hasSharedAssignmentId());
- RASharedAssignment& sa = _sharedAssignments[firstSuccessor->sharedAssignmentId()];
- sa.addEntryScratchGpRegs(block->exitScratchGpRegs());
+ if (firstSuccessor->hasSharedAssignmentId()) {
+ RASharedAssignment& sa = _sharedAssignments[firstSuccessor->sharedAssignmentId()];
+ sa.addEntryScratchGpRegs(block->exitScratchGpRegs());
+ }
+ else {
+ // This is only allowed if there is a single successor - in that case shared assignment is not necessary.
+ ASMJIT_ASSERT(successors.size() == 1u);
+ }
}
}
if (block->hasSharedAssignmentId()) {
@@ -1483,18 +1491,12 @@ Error BaseRAPass::runLocalAllocator() noexcept {
cc()->_setCursor(unconditionalJump ? prev->prev() : prev);
if (consecutive->hasEntryAssignment()) {
- ASMJIT_PROPAGATE(
- lra.switchToAssignment(
- consecutive->entryPhysToWorkMap(),
- consecutive->entryWorkToPhysMap(),
- consecutive->liveIn(),
- consecutive->isAllocated(),
- false));
+ ASMJIT_PROPAGATE(lra.switchToAssignment(consecutive->entryPhysToWorkMap(), consecutive->liveIn(), consecutive->isAllocated(), false));
}
else {
ASMJIT_PROPAGATE(lra.spillRegsBeforeEntry(consecutive));
ASMJIT_PROPAGATE(setBlockEntryAssignment(consecutive, block, lra._curAssignment));
- lra._curAssignment.copyFrom(consecutive->entryPhysToWorkMap(), consecutive->entryWorkToPhysMap());
+ lra._curAssignment.copyFrom(consecutive->entryPhysToWorkMap());
}
}
@@ -1526,7 +1528,7 @@ Error BaseRAPass::runLocalAllocator() noexcept {
}
// If we switched to some block we have to update the local allocator.
- lra.replaceAssignment(block->entryPhysToWorkMap(), block->entryWorkToPhysMap());
+ lra.replaceAssignment(block->entryPhysToWorkMap());
}
_clobberedRegs.op<Support::Or>(lra._clobberedRegs);
@@ -1546,12 +1548,10 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo
}
PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
- WorkToPhysMap* workToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
-
- if (ASMJIT_UNLIKELY(!physToWorkMap || !workToPhysMap))
+ if (ASMJIT_UNLIKELY(!physToWorkMap))
return DebugUtils::errored(kErrorOutOfMemory);
- block->setEntryAssignment(physToWorkMap, workToPhysMap);
+ block->setEntryAssignment(physToWorkMap);
// True if this is the first (entry) block, nothing to do in this case.
if (block == fromBlock) {
@@ -1562,10 +1562,6 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo
return kErrorOk;
}
- RAAssignment as;
- as.initLayout(_physRegCount, workRegs());
- as.initMaps(physToWorkMap, workToPhysMap);
-
const ZoneBitVector& liveOut = fromBlock->liveOut();
const ZoneBitVector& liveIn = block->liveIn();
@@ -1578,94 +1574,85 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo
RAWorkReg* workReg = workRegById(workId);
RegGroup group = workReg->group();
- uint32_t physId = as.workToPhysId(group, workId);
+ uint32_t physId = fromAssignment.workToPhysId(group, workId);
if (physId != RAAssignment::kPhysNone)
- as.unassign(group, workId, physId);
+ physToWorkMap->unassign(group, physId, _physRegIndex.get(group) + physId);
}
}
- return blockEntryAssigned(as);
+ return blockEntryAssigned(physToWorkMap);
}
Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept {
ASMJIT_ASSERT(_sharedAssignments[sharedAssignmentId].empty());
PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
- WorkToPhysMap* workToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
-
- if (ASMJIT_UNLIKELY(!physToWorkMap || !workToPhysMap))
+ if (ASMJIT_UNLIKELY(!physToWorkMap))
return DebugUtils::errored(kErrorOutOfMemory);
- _sharedAssignments[sharedAssignmentId].assignMaps(physToWorkMap, workToPhysMap);
+ _sharedAssignments[sharedAssignmentId].assignPhysToWorkMap(physToWorkMap);
+
ZoneBitVector& sharedLiveIn = _sharedAssignments[sharedAssignmentId]._liveIn;
ASMJIT_PROPAGATE(sharedLiveIn.resize(allocator(), workRegCount()));
- RAAssignment as;
- as.initLayout(_physRegCount, workRegs());
-
Support::Array<uint32_t, Globals::kNumVirtGroups> sharedAssigned {};
-
for (RABlock* block : blocks()) {
if (block->sharedAssignmentId() == sharedAssignmentId) {
ASMJIT_ASSERT(!block->hasEntryAssignment());
PhysToWorkMap* entryPhysToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
- WorkToPhysMap* entryWorkToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
-
- if (ASMJIT_UNLIKELY(!entryPhysToWorkMap || !entryWorkToPhysMap))
+ if (ASMJIT_UNLIKELY(!entryPhysToWorkMap))
return DebugUtils::errored(kErrorOutOfMemory);
- block->setEntryAssignment(entryPhysToWorkMap, entryWorkToPhysMap);
- as.initMaps(entryPhysToWorkMap, entryWorkToPhysMap);
+ block->setEntryAssignment(entryPhysToWorkMap);
const ZoneBitVector& liveIn = block->liveIn();
sharedLiveIn.or_(liveIn);
for (RegGroup group : RegGroupVirtValues{}) {
sharedAssigned[group] |= entryPhysToWorkMap->assigned[group];
+
+ uint32_t physBaseIndex = _physRegIndex.get(group);
Support::BitWordIterator<RegMask> it(entryPhysToWorkMap->assigned[group]);
while (it.hasNext()) {
uint32_t physId = it.next();
- uint32_t workId = as.physToWorkId(group, physId);
+ uint32_t workId = entryPhysToWorkMap->workIds[physBaseIndex + physId];
if (!liveIn.bitAt(workId))
- as.unassign(group, workId, physId);
+ entryPhysToWorkMap->unassign(group, physId, physBaseIndex + physId);
}
}
}
}
- {
- as.initMaps(physToWorkMap, workToPhysMap);
-
- for (RegGroup group : RegGroupVirtValues{}) {
- Support::BitWordIterator<RegMask> it(_availableRegs[group] & ~sharedAssigned[group]);
+ for (RegGroup group : RegGroupVirtValues{}) {
+ uint32_t physBaseIndex = _physRegIndex.get(group);
+ Support::BitWordIterator<RegMask> it(_availableRegs[group] & ~sharedAssigned[group]);
- while (it.hasNext()) {
- uint32_t physId = it.next();
- if (as.isPhysAssigned(group, physId)) {
- uint32_t workId = as.physToWorkId(group, physId);
- as.unassign(group, workId, physId);
- }
- }
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ if (Support::bitTest(physToWorkMap->assigned[group], physId))
+ physToWorkMap->unassign(group, physId, physBaseIndex + physId);
}
}
- return blockEntryAssigned(as);
+ return blockEntryAssigned(physToWorkMap);
}
-Error BaseRAPass::blockEntryAssigned(const RAAssignment& as) noexcept {
+Error BaseRAPass::blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept {
// Complex allocation strategy requires to record register assignments upon block entry (or per shared state).
for (RegGroup group : RegGroupVirtValues{}) {
if (!_strategy[group].isComplex())
continue;
- Support::BitWordIterator<RegMask> it(as.assigned(group));
+ uint32_t physBaseIndex = _physRegIndex[group];
+ Support::BitWordIterator<RegMask> it(physToWorkMap->assigned[group]);
+
while (it.hasNext()) {
uint32_t physId = it.next();
- uint32_t workId = as.physToWorkId(group, physId);
+ uint32_t workId = physToWorkMap->workIds[physBaseIndex + physId];
RAWorkReg* workReg = workRegById(workId);
workReg->addAllocatedMask(Support::bitMask(physId));